hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bcc019e1e7277f852d55bb225dc74bb333185aa3
| 660
|
py
|
Python
|
tests/test_buffers.py
|
romanchyla/CSPatterns
|
d9627297aabce1ab648f4a4cdbe9882527add138
|
[
"MIT"
] | null | null | null |
tests/test_buffers.py
|
romanchyla/CSPatterns
|
d9627297aabce1ab648f4a4cdbe9882527add138
|
[
"MIT"
] | null | null | null |
tests/test_buffers.py
|
romanchyla/CSPatterns
|
d9627297aabce1ab648f4a4cdbe9882527add138
|
[
"MIT"
] | null | null | null |
from cspatterns.datastructures import buffer
def test_circular_buffer():
b = buffer.CircularBuffer(2, ['n'])
assert len(b.next) == 2
assert b.n is None
b = buffer.CircularBuffer.create(2, attrs=['n', 'fib'])
curr = b
out = [0, 1, ]
curr.prev[-2].n = 0
curr.prev[-2].fib = 1
curr.prev[-1].n = 1
curr.prev[-1].fib = 1
# we are going to calculate fibonacci
while curr.prev[-1].n < 12:
curr.n = curr.prev[-1].n + 1
curr.fib = curr.prev[-1].fib + curr.prev[-2].fib
out.append(curr.fib)
curr = curr.next[1]
assert out == [0, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233]
| 25.384615
| 66
| 0.551515
| 109
| 660
| 3.321101
| 0.40367
| 0.176796
| 0.124309
| 0.082873
| 0.082873
| 0.082873
| 0
| 0
| 0
| 0
| 0
| 0.089583
| 0.272727
| 660
| 26
| 66
| 25.384615
| 0.664583
| 0.05303
| 0
| 0
| 0
| 0
| 0.008013
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 1
| 0.055556
| false
| 0
| 0.055556
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bcc1d17c27a82c381571bf91c586033e374ec7d9
| 1,741
|
py
|
Python
|
code_examples/plotting_data/hexbin.py
|
ezcitron/BasemapTutorial
|
0db9248b430d39518bdfdb25d713145be4eb966a
|
[
"CC0-1.0"
] | 99
|
2015-01-14T21:20:48.000Z
|
2022-01-25T10:38:37.000Z
|
code_examples/plotting_data/hexbin.py
|
ezcitron/BasemapTutorial
|
0db9248b430d39518bdfdb25d713145be4eb966a
|
[
"CC0-1.0"
] | 1
|
2017-08-31T07:02:20.000Z
|
2017-08-31T07:02:20.000Z
|
code_examples/plotting_data/hexbin.py
|
ezcitron/BasemapTutorial
|
0db9248b430d39518bdfdb25d713145be4eb966a
|
[
"CC0-1.0"
] | 68
|
2015-01-14T21:21:01.000Z
|
2022-01-29T14:53:38.000Z
|
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from numpy import array
from numpy import max
map = Basemap(llcrnrlon=-0.5,llcrnrlat=39.8,urcrnrlon=4.,urcrnrlat=43.,
resolution='i', projection='tmerc', lat_0 = 39.5, lon_0 = 1)
map.readshapefile('../sample_files/lightnings', 'lightnings')
x = []
y = []
c = []
for info, lightning in zip(map.lightnings_info, map.lightnings):
x.append(lightning[0])
y.append(lightning[1])
if float(info['amplitude']) < 0:
c.append(-1 * float(info['amplitude']))
else:
c.append(float(info['amplitude']))
plt.figure(0)
map.drawcoastlines()
map.readshapefile('../sample_files/comarques', 'comarques')
map.hexbin(array(x), array(y))
map.colorbar(location='bottom')
plt.figure(1)
map.drawcoastlines()
map.readshapefile('../sample_files/comarques', 'comarques')
map.hexbin(array(x), array(y), gridsize=20, mincnt=1, cmap='summer', bins='log')
map.colorbar(location='bottom', format='%.1f', label='log(# lightnings)')
plt.figure(2)
map.drawcoastlines()
map.readshapefile('../sample_files/comarques', 'comarques')
map.hexbin(array(x), array(y), gridsize=20, mincnt=1, cmap='summer', norm=colors.LogNorm())
cb = map.colorbar(location='bottom', format='%d', label='# lightnings')
cb.set_ticks([1, 5, 10, 15, 20, 25, 30])
cb.set_ticklabels([1, 5, 10, 15, 20, 25, 30])
plt.figure(3)
map.drawcoastlines()
map.readshapefile('../sample_files/comarques', 'comarques')
map.hexbin(array(x), array(y), C = array(c), reduce_C_function = max, gridsize=20, mincnt=1, cmap='YlOrBr', linewidths=0.5, edgecolors='k')
map.colorbar(location='bottom', label='Mean amplitude (kA)')
plt.show()
| 23.527027
| 139
| 0.687536
| 246
| 1,741
| 4.813008
| 0.353659
| 0.067568
| 0.092905
| 0.11402
| 0.416385
| 0.346284
| 0.346284
| 0.326014
| 0.326014
| 0.326014
| 0
| 0.038058
| 0.124641
| 1,741
| 74
| 140
| 23.527027
| 0.738845
| 0
| 0
| 0.195122
| 0
| 0
| 0.175086
| 0.072331
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.121951
| 0
| 0.121951
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bcc3dcd13da8bfacff9f3f45c797b5dd285e8744
| 4,031
|
py
|
Python
|
src/extractors/emojiextractor.py
|
chmduquesne/rofimoji
|
9abdc0a8db1b166bb30da994c4aadb7baf91df2d
|
[
"MIT"
] | 574
|
2017-10-29T18:04:31.000Z
|
2022-03-30T23:34:34.000Z
|
src/extractors/emojiextractor.py
|
chmduquesne/rofimoji
|
9abdc0a8db1b166bb30da994c4aadb7baf91df2d
|
[
"MIT"
] | 104
|
2017-11-02T08:24:29.000Z
|
2022-03-29T02:39:58.000Z
|
src/extractors/emojiextractor.py
|
chmduquesne/rofimoji
|
9abdc0a8db1b166bb30da994c4aadb7baf91df2d
|
[
"MIT"
] | 53
|
2017-11-01T22:38:02.000Z
|
2022-02-14T09:20:36.000Z
|
import html
from collections import namedtuple
from pathlib import Path
from typing import List, Dict
import requests
from bs4 import BeautifulSoup
from lxml import etree
from lxml.etree import XPath
Emoji = namedtuple('Emoji', 'char name')
class EmojiExtractor(object):
def __init__(self):
self.all_emojis = self.fetch_emoji_list()
self.annotations = self.fetch_annotations()
self.base_emojis = self.fetch_base_emojis()
def fetch_emoji_list(self: 'EmojiExtractor') -> List[Emoji]:
print('Downloading list of all emojis')
data = requests.get(
'https://unicode.org/emoji/charts-14.0/full-emoji-list.html',
timeout=120
) # type: requests.Response
html = BeautifulSoup(data.text, 'lxml')
emojis = []
for row in html.find('table').find_all('tr'):
if not row.th:
emoji = row.find('td', {'class': 'chars'}).string
description = row.find('td', {'class': 'name'}).string.replace('⊛ ', '')
emojis.append(Emoji(emoji, description))
return emojis
def fetch_annotations(self: 'EmojiExtractor') -> Dict[chr, List[str]]:
print('Downloading annotations')
data = requests.get(
'https://raw.githubusercontent.com/unicode-org/cldr/latest/common/annotations/en.xml',
timeout=60
) # type: requests.Response
xpath = XPath('./annotations/annotation[not(@type="tts")]')
return {element.get('cp'): element.text.split(' | ')
for element in xpath(etree.fromstring(data.content))}
def fetch_base_emojis(self: 'EmojiExtractor') -> List[chr]:
print('Downloading list of human emojis...')
data = requests.get(
'https://unicode.org/Public/14.0.0/ucd/emoji/emoji-data.txt',
timeout=60
) # type: requests.Response
started = False
emojis = []
for line in data.text.split('\n'):
if not started and line != '# All omitted code points have Emoji_Modifier_Base=No ':
continue
started = True
if line == '# Total elements: 132':
break
if line and not line.startswith('#'):
emojis.extend(self.resolve_character_range(line.split(';')[0].strip()))
return emojis
def resolve_character_range(self, line: str) -> List[str]:
try:
(start, end) = line.split('..')
return [chr(char) for char in range(int(start, 16), int(end, 16) + 1)]
except ValueError:
return [self.resolve_character(line)]
def resolve_character(self, string: str) -> str:
return "".join(chr(int(character, 16)) for character in string.split(' '))
def write_symbol_file(self: 'EmojiExtractor'):
print('Writing collected emojis to symbol file')
with Path('../picker/data/emojis.csv').open('w') as symbol_file:
for entry in self.compile_entries(self.all_emojis):
symbol_file.write(entry + "\n")
def compile_entries(self: 'EmojiExtractor', emojis: List[Emoji]) -> List[str]:
annotated_emojis = []
for emoji in emojis:
entry = f"{emoji.char} {html.escape(emoji.name)}"
if emoji.char in self.annotations:
entry += f" <small>({html.escape(', '.join([annotation for annotation in self.annotations[emoji.char] if annotation != emoji.name]))})</small>"
annotated_emojis.append(entry)
return annotated_emojis
def write_metadata_file(self: 'EmojiExtractor'):
print('Writing metadata to metadata file')
with Path('../picker/copyme.py').open('w') as metadata_file:
metadata_file.write('skin_tone_selectable_emojis={\'')
metadata_file.write('\', \''.join(self.base_emojis))
metadata_file.write('\'}\n')
def extract(self: 'EmojiExtractor'):
self.write_symbol_file()
self.write_metadata_file()
| 37.324074
| 159
| 0.607045
| 473
| 4,031
| 5.071882
| 0.30444
| 0.052522
| 0.018758
| 0.02501
| 0.082534
| 0.030013
| 0.030013
| 0
| 0
| 0
| 0
| 0.008696
| 0.258249
| 4,031
| 107
| 160
| 37.672897
| 0.793311
| 0.017614
| 0
| 0.107143
| 0
| 0.047619
| 0.223711
| 0.055106
| 0
| 0
| 0
| 0
| 0
| 1
| 0.119048
| false
| 0
| 0.095238
| 0.011905
| 0.309524
| 0.059524
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bcc4fcfb44a442a2523238a8484bf80417464006
| 5,084
|
py
|
Python
|
tests/integration_tests/security/test_seccomp.py
|
gregbdunn/firecracker
|
e7bc0a1f9b70deaa7bfd9eb641e0c7982fe63e68
|
[
"Apache-2.0"
] | 2
|
2018-12-20T05:40:43.000Z
|
2018-12-20T05:59:58.000Z
|
tests/integration_tests/security/test_seccomp.py
|
gregbdunn/firecracker
|
e7bc0a1f9b70deaa7bfd9eb641e0c7982fe63e68
|
[
"Apache-2.0"
] | null | null | null |
tests/integration_tests/security/test_seccomp.py
|
gregbdunn/firecracker
|
e7bc0a1f9b70deaa7bfd9eb641e0c7982fe63e68
|
[
"Apache-2.0"
] | 1
|
2018-11-27T08:50:51.000Z
|
2018-11-27T08:50:51.000Z
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests that the seccomp filters don't let blacklisted syscalls through."""
import os
from subprocess import run
import pytest
import host_tools.cargo_build as host # pylint:disable=import-error
@pytest.fixture
def tmp_basic_jailer(test_session_root_path):
"""Build `demo_basic_jailer`, required for the basic seccomp tests.
:return: The paths of the built binary.
"""
binaries_srcdir = os.path.normpath(
os.path.join(
os.getcwd(),
'integration_tests/security/demo_advanced_seccomp/'
)
)
build_path = os.path.join(
test_session_root_path,
host.CARGO_RELEASE_REL_PATH
)
run("cd {} && CARGO_TARGET_DIR={} cargo build --release".format(
binaries_srcdir, build_path), shell=True, check=True)
release_binaries_path = os.path.join(
host.CARGO_RELEASE_REL_PATH,
host.RELEASE_BINARIES_REL_PATH
)
release_binaries_path = os.path.join(
test_session_root_path,
release_binaries_path
)
demo_basic_jailer = os.path.normpath(
os.path.join(
release_binaries_path,
'demo_basic_jailer'
)
)
yield demo_basic_jailer
os.remove(demo_basic_jailer)
@pytest.fixture
def tmp_advanced_seccomp_binaries(test_session_root_path):
"""
Build binaries required for the advanced seccomp tests.
Build `demo_advanced_jailer`, `demo_harmless_firecracker`, and
`demo_malicious_firecracker.
:return: The paths of the built binaries.
"""
binaries_srcdir = os.path.normpath(
os.path.join(
os.getcwd(),
'integration_tests/security/demo_advanced_seccomp/'
)
)
build_path = os.path.join(
test_session_root_path,
host.CARGO_RELEASE_REL_PATH
)
run("cd {} && CARGO_TARGET_DIR={} cargo build --release".format(
binaries_srcdir, build_path), shell=True, check=True)
release_binaries_path = os.path.join(
host.CARGO_RELEASE_REL_PATH,
host.RELEASE_BINARIES_REL_PATH
)
release_binaries_path = os.path.join(
test_session_root_path,
release_binaries_path
)
demo_advanced_jailer = os.path.normpath(
os.path.join(
release_binaries_path,
'demo_advanced_jailer'
)
)
demo_harmless_firecracker = os.path.normpath(
os.path.join(
release_binaries_path,
'demo_harmless_firecracker'
)
)
demo_malicious_firecracker = os.path.normpath(
os.path.join(
release_binaries_path,
'demo_malicious_firecracker'
)
)
yield \
demo_advanced_jailer, \
demo_harmless_firecracker, \
demo_malicious_firecracker
os.remove(demo_advanced_jailer)
os.remove(demo_harmless_firecracker)
os.remove(demo_malicious_firecracker)
def test_seccomp_ls(tmp_basic_jailer):
"""Assert that the seccomp filters deny a blacklisted syscall."""
# pylint: disable=redefined-outer-name
# The fixture pattern causes a pylint false positive for that rule.
# Path to the `ls` binary, which attempts to execute `SYS_access`,
# blacklisted for Firecracker.
ls_command_path = '/bin/ls'
demo_jailer = tmp_basic_jailer
assert os.path.exists(demo_jailer)
# Compile the mini jailer.
outcome = run([demo_jailer, ls_command_path])
# The seccomp filters should send SIGSYS (31) to the binary. `ls` doesn't
# handle it, so it will exit with error.
assert outcome.returncode != 0
def test_advanced_seccomp_harmless(tmp_advanced_seccomp_binaries):
"""
Test `demo_harmless_firecracker`.
Test that the built demo jailer allows the built demo harmless firecracker.
"""
# pylint: disable=redefined-outer-name
# The fixture pattern causes a pylint false positive for that rule.
demo_advanced_jailer, demo_harmless_firecracker, _ =\
tmp_advanced_seccomp_binaries
assert os.path.exists(demo_advanced_jailer)
assert os.path.exists(demo_harmless_firecracker)
outcome = run([demo_advanced_jailer, demo_harmless_firecracker])
# The demo harmless firecracker should have terminated gracefully.
assert outcome.returncode == 0
def test_advanced_seccomp_malicious(tmp_advanced_seccomp_binaries):
"""
Test `demo_malicious_firecracker`.
Test that the built demo jailer denies the built demo malicious
firecracker.
"""
# pylint: disable=redefined-outer-name
# The fixture pattern causes a pylint false positive for that rule.
demo_advanced_jailer, _, demo_malicious_firecracker =\
tmp_advanced_seccomp_binaries
assert os.path.exists(demo_advanced_jailer)
assert os.path.exists(demo_malicious_firecracker)
outcome = run([demo_advanced_jailer, demo_malicious_firecracker])
# The demo malicious firecracker should have received `SIGSYS`.
assert outcome.returncode != 0
| 29.387283
| 79
| 0.696302
| 625
| 5,084
| 5.3712
| 0.2
| 0.041108
| 0.035746
| 0.045874
| 0.65922
| 0.637176
| 0.54513
| 0.473339
| 0.445934
| 0.445934
| 0
| 0.002795
| 0.225806
| 5,084
| 172
| 80
| 29.55814
| 0.850102
| 0.285995
| 0
| 0.455446
| 0
| 0
| 0.083215
| 0.042318
| 0
| 0
| 0
| 0
| 0.079208
| 1
| 0.049505
| false
| 0
| 0.039604
| 0
| 0.089109
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bcc6795e9da5c859c6308d7dfd37a7f5806dbb41
| 3,714
|
py
|
Python
|
webapp/gen_graphs.py
|
bfitzy2142/NET4901-SP
|
908c13332a5356bd6a59879b8d78af76432b807c
|
[
"MIT"
] | 3
|
2019-08-04T03:09:02.000Z
|
2020-06-08T15:48:36.000Z
|
webapp/gen_graphs.py
|
bfitzy2142/NET4901-SP
|
908c13332a5356bd6a59879b8d78af76432b807c
|
[
"MIT"
] | 3
|
2019-09-06T08:30:21.000Z
|
2020-06-30T03:24:56.000Z
|
webapp/gen_graphs.py
|
bfitzy2142/NET4901-SP-SDLENS
|
908c13332a5356bd6a59879b8d78af76432b807c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
@author: Sam Cook
MySql Parser for graphical presentation
"""
import mysql.connector
import datetime
from mysql.connector import Error
from datetime import datetime, timedelta
import json
class sql_graph_info():
def __init__(self, node, interface, time, sql_creds, db):
"""
Initializer for the sql_graph_info Object.
"""
self.node = node
self.interface = interface
self.time = time
self.sql_creds = sql_creds
self.db = db
def db_pull(self, node, interface, time, ):
""" Pulls the RX and TX information from the database
to display for the graphs page.
Arguments:
node [str] -- The node that holds the interface which
is to presented.
interface [str] -- The interface in which the counter
information will be based off of.
time [str] -- Time ranging from 30 minutes to 10 Years
Returns:
dict -- containing arrays of the counter values at
their coresponding timestamp.
"""
data_end = datetime.now()
if time == '1':
data_start = datetime.now() - timedelta(hours=0, minutes=30)
elif time == '2':
data_start = datetime.now() - timedelta(hours=1)
elif time == '3':
data_start = datetime.now() - timedelta(hours=2)
elif time == '4':
data_start = datetime.now() - timedelta(hours=6)
elif time == '5':
data_start = datetime.now() - timedelta(days=1)
else:
data_start = datetime.now() - timedelta(days=3650)
data_end.strftime('%Y-%m-%d %H:%M:%S')
data_start.strftime('%Y-%m-%d %H:%M:%S')
node_st = "openflow" + node
query = (
f"SELECT timestamp, Rx_pckts, Tx_pckts, Rx_drops, Tx_drops "
f"FROM {node_st}_counters WHERE "
f"Interface='openflow:{node}:{interface}'"
f"AND timestamp >= '{data_start}'"
f"AND timestamp < '{data_end}'"
)
mydb = mysql.connector.connect(
host=self.sql_creds['host'],
user=self.sql_creds['user'],
passwd=self.sql_creds['password'],
database=self.db
)
cur = mydb.cursor()
cur.execute(query)
response = cur.fetchall()
graphPoints = []
displayPoints = []
dataPointDict = {}
for dataPoint in response:
date = str(dataPoint[0])
rx_count = int(dataPoint[1])
tx_count = int(dataPoint[2])
rx_drops = int(dataPoint[3])
tx_drops = int(dataPoint[4])
if dataPointDict:
old_rx_c = int(dataPointDict['rx_count'])
old_tx_c = int(dataPointDict["tx_count"])
old_rx_d = int(dataPointDict["rx_drops"])
old_tx_d = int(dataPointDict["tx_drops"])
dif_rx_c = rx_count - old_rx_c
dif_tx_c = tx_count - old_tx_c
dif_rx_d = rx_drops - old_rx_d
dif_tx_d = tx_drops - old_tx_d
difDict = {"date": date, "rx_count": dif_rx_c,
"tx_count": dif_tx_c,
"rx_drops": dif_rx_d,
"tx_drops": dif_tx_d}
displayPoints.append(difDict)
dataPointDict = {"date": date, "rx_count": rx_count,
"tx_count": tx_count, "rx_drops": rx_drops,
"tx_drops": tx_drops}
graphPoints.append(dataPointDict)
return displayPoints
| 34.71028
| 72
| 0.53608
| 430
| 3,714
| 4.423256
| 0.3
| 0.037855
| 0.053628
| 0.063091
| 0.120925
| 0.120925
| 0.014721
| 0
| 0
| 0
| 0
| 0.010915
| 0.358643
| 3,714
| 106
| 73
| 35.037736
| 0.787573
| 0.159666
| 0
| 0
| 0
| 0
| 0.117529
| 0.013022
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027397
| false
| 0.013699
| 0.068493
| 0
| 0.123288
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bcca1a19ecd367ba4725d3ef774b347cae61be62
| 830
|
py
|
Python
|
scqubits/tests/test_fluxqubit.py
|
dmtvanzanten/scqubits
|
d4d8a0f71ac91077594a6173348279aa490ed048
|
[
"BSD-3-Clause"
] | null | null | null |
scqubits/tests/test_fluxqubit.py
|
dmtvanzanten/scqubits
|
d4d8a0f71ac91077594a6173348279aa490ed048
|
[
"BSD-3-Clause"
] | null | null | null |
scqubits/tests/test_fluxqubit.py
|
dmtvanzanten/scqubits
|
d4d8a0f71ac91077594a6173348279aa490ed048
|
[
"BSD-3-Clause"
] | null | null | null |
# test_fluxqubit.py
# meant to be run with 'pytest'
#
# This file is part of scqubits.
#
# Copyright (c) 2019 and later, Jens Koch and Peter Groszkowski
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
############################################################################
import numpy as np
from scqubits import FluxQubit
from scqubits.tests.conftest import StandardTests
class TestFluxQubit(StandardTests):
@classmethod
def setup_class(cls):
cls.qbt = None
cls.qbt_type = FluxQubit
cls.file_str = "fluxqubit"
cls.op1_str = "n_1_operator"
cls.op2_str = "n_2_operator"
cls.param_name = "flux"
cls.param_list = np.linspace(0.45, 0.55, 50)
| 28.62069
| 76
| 0.622892
| 110
| 830
| 4.590909
| 0.663636
| 0.039604
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.024578
| 0.215663
| 830
| 28
| 77
| 29.642857
| 0.751152
| 0.360241
| 0
| 0
| 0
| 0
| 0.083333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.230769
| 0
| 0.384615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bcca9310b776373045a4dd0e28575a2063a3d591
| 1,379
|
py
|
Python
|
PhysicsTools/PatAlgos/python/producersLayer1/pfParticleProducer_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
PhysicsTools/PatAlgos/python/producersLayer1/pfParticleProducer_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
PhysicsTools/PatAlgos/python/producersLayer1/pfParticleProducer_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
patPFParticles = cms.EDProducer("PATPFParticleProducer",
# General configurables
pfCandidateSource = cms.InputTag("noJet"),
# MC matching configurables
addGenMatch = cms.bool(False),
genParticleMatch = cms.InputTag(""), ## particles source to be used for the MC matching
## must be an InputTag or VInputTag to a product of
## type edm::Association<reco::GenParticleCollection>
embedGenMatch = cms.bool(False), ## embed gen match inside the object instead of storing the ref
# add user data
userData = cms.PSet(
# add custom classes here
userClasses = cms.PSet(
src = cms.VInputTag('')
),
# add doubles here
userFloats = cms.PSet(
src = cms.VInputTag('')
),
# add ints here
userInts = cms.PSet(
src = cms.VInputTag('')
),
# add candidate ptrs here
userCands = cms.PSet(
src = cms.VInputTag('')
),
# add "inline" functions here
userFunctions = cms.vstring(),
userFunctionLabels = cms.vstring()
),
# Efficiencies
addEfficiencies = cms.bool(False),
efficiencies = cms.PSet(),
# resolution
addResolutions = cms.bool(False),
resolutions = cms.PSet(),
)
| 29.340426
| 106
| 0.585207
| 135
| 1,379
| 5.977778
| 0.555556
| 0.060719
| 0.05948
| 0.064436
| 0.123916
| 0.123916
| 0
| 0
| 0
| 0
| 0
| 0
| 0.315446
| 1,379
| 46
| 107
| 29.978261
| 0.854873
| 0.29079
| 0
| 0.333333
| 0
| 0
| 0.027168
| 0.021944
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.037037
| 0
| 0.037037
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bcccafa97336dc1ded4587f29664425a01e6d815
| 28,365
|
py
|
Python
|
python/GafferArnold/ArnoldTextureBake.py
|
medubelko/gaffer
|
12c5994c21dcfb8b13b5b86efbcecdcb29202b33
|
[
"BSD-3-Clause"
] | 1
|
2019-12-02T02:31:25.000Z
|
2019-12-02T02:31:25.000Z
|
python/GafferArnold/ArnoldTextureBake.py
|
medubelko/gaffer
|
12c5994c21dcfb8b13b5b86efbcecdcb29202b33
|
[
"BSD-3-Clause"
] | null | null | null |
python/GafferArnold/ArnoldTextureBake.py
|
medubelko/gaffer
|
12c5994c21dcfb8b13b5b86efbcecdcb29202b33
|
[
"BSD-3-Clause"
] | null | null | null |
##########################################################################
#
# Copyright (c) 2018, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
import IECoreScene
import Gaffer
import GafferScene
import GafferArnold
import GafferDispatch
import GafferImage
import imath
import inspect
class ArnoldTextureBake( GafferDispatch.TaskNode ) :
class __CameraSetup( GafferScene.FilteredSceneProcessor ) :
def __init__( self, name = "__CameraSetup" ) :
GafferScene.FilteredSceneProcessor.__init__( self, name )
# Public plugs
self["cameraGroup"] = Gaffer.StringPlug( "cameraGroup", Gaffer.Plug.Direction.In, "__TEXTUREBAKE_CAMERAS" )
self["bakeDirectory"] = Gaffer.StringPlug( "bakeDirectory", Gaffer.Plug.Direction.In, "" )
self["defaultFileName"] = Gaffer.StringPlug( "defaultFileName", Gaffer.Plug.Direction.In, "${bakeDirectory}/<AOV>/<AOV>.<UDIM>.exr" )
self["defaultResolution"] = Gaffer.IntPlug( "defaultResolution", Gaffer.Plug.Direction.In, 512 )
self["uvSet"] = Gaffer.StringPlug( "uvSet", Gaffer.Plug.Direction.In, "uv" )
self["udims"] = Gaffer.StringPlug( "udims", Gaffer.Plug.Direction.In, "" )
self["normalOffset"] = Gaffer.FloatPlug( "normalOffset", Gaffer.Plug.Direction.In, 0.1 )
self["aovs"] = Gaffer.StringPlug( "aovs", Gaffer.Plug.Direction.In, "beauty:rgba" )
self["tasks"] = Gaffer.IntPlug( "tasks", Gaffer.Plug.Direction.In, 1 )
self["taskIndex"] = Gaffer.IntPlug( "taskIndex", Gaffer.Plug.Direction.In, 0 )
# Output
self["renderFileList"] = Gaffer.StringVectorDataPlug( "renderFileList", Gaffer.Plug.Direction.Out, defaultValue = IECore.StringVectorData() )
self["renderFileList"].setFlags( Gaffer.Plug.Flags.Serialisable, False )
# Private internal network
self["__udimQuery"] = GafferScene.UDIMQuery()
self["__udimQuery"]["in"].setInput( self["in"] )
self["__udimQuery"]["uvSet"].setInput( self["uvSet"] )
self["__udimQuery"]["attributes"].setValue( "bake:resolution bake:fileName" )
self["__udimQuery"]["filter"].setInput( self["filter"] )
self["__chunkedBakeInfo"] = Gaffer.CompoundObjectPlug( "__chunkedBakeInfo", Gaffer.Plug.Direction.In, IECore.CompoundObject() )
self["__chunkedBakeInfo"].setFlags( Gaffer.Plug.Flags.Serialisable, False )
self["__chunkExpression"] = Gaffer.Expression()
self["__chunkExpression"].setExpression( inspect.cleandoc(
"""
import collections
import re
rawInfo = parent["__udimQuery"]["out"]
defaultFileName = parent["defaultFileName"]
defaultResolution = parent["defaultResolution"]
selectUdimsStr = parent["udims"]
# FrameList really ought to take care of this check, instead of just doing
# something obviously wrong
if re.match( ".*[0-9] +[0-9].*", selectUdimsStr ):
raise RuntimeError( "ArnoldTextureBake : Udim list must be comma separated." )
selectUdims = set( IECore.FrameList.parse( selectUdimsStr ).asList() )
allMeshes = collections.defaultdict( lambda : [] )
for udim, meshes in rawInfo.items():
if selectUdims and not int( udim ) in selectUdims:
continue
for mesh, extraAttributes in meshes.items():
resolution = defaultResolution
if "bake:resolution" in extraAttributes:
resolution = extraAttributes["bake:resolution"].value
fileName = defaultFileName
if "bake:fileName" in extraAttributes:
fileName = extraAttributes["bake:fileName"].value
allMeshes[ (fileName, udim) ].append( { "mesh" : mesh, "resolution" : resolution } )
fileList = sorted( allMeshes.keys() )
info = IECore.CompoundObject()
numTasks = min( parent["tasks"], len( fileList ) )
taskIndex = parent["taskIndex"]
if taskIndex < numTasks:
chunkStart = ( taskIndex * len( fileList ) ) / numTasks
chunkEnd = ( ( taskIndex + 1 ) * len( fileList ) ) / numTasks
dupeCount = 0
prevFileName = ""
for fileNameTemplate, udim in fileList[chunkStart:chunkEnd]:
for meshData in allMeshes[(fileNameTemplate, udim)]:
o = IECore.CompoundObject()
o["mesh"] = IECore.StringData( meshData["mesh"] )
o["udim"] = IECore.IntData( int( udim ) )
o["resolution"] = IECore.IntData( meshData["resolution"] )
udimStr = str( udim )
fileName = fileNameTemplate.replace( "<UDIM>", udimStr )
if fileName == prevFileName:
dupeCount += 1
fileName = fileName + ".layer" + str( dupeCount )
else:
prevFileName = fileName
dupeCount = 0
o["fileName"] = IECore.StringData( fileName )
name = o["mesh"].value.replace( "/", "_" ) + "." + udimStr
info[ name ] = o
parent["__chunkedBakeInfo"] = info
fileList = []
for name, i in info.items():
fileName = i["fileName"].value
for nameAndAov in parent["aovs"].strip( " " ).split( " " ):
fileList.append( i["fileName"].value.replace( "<AOV>", nameAndAov.split(":")[0] ) )
parent["renderFileList"] = IECore.StringVectorData( fileList )
"""
), "python" )
self["__parent"] = GafferScene.Parent()
self["__parent"]["parent"].setValue( "/" )
for c in ['bound', 'transform', 'attributes', 'object', 'childNames', 'setNames', 'set']:
self["__parent"]["in"][c].setInput( self["in"][c] )
self["__outputExpression"] = Gaffer.Expression()
self["__outputExpression"].setExpression( inspect.cleandoc(
"""
import IECoreScene
# Transfer all input globals except for outputs
inGlobals = parent["in"]["globals"]
outGlobals = IECore.CompoundObject()
for key, value in inGlobals.items():
if not key.startswith( "output:" ):
outGlobals[key] = value
# Make our own outputs
info = parent["__chunkedBakeInfo"]
for cameraName, i in info.items():
params = IECore.CompoundData()
fileName = i["fileName"].value
params["camera"] = IECore.StringData( "/" + parent["cameraGroup"] + "/" + cameraName )
for nameAndAov in parent["aovs"].strip( " " ).split( " " ):
tokens = nameAndAov.split( ":" )
if len( tokens ) != 2:
raise RuntimeError( "Invalid bake aov specification: %s It should contain a : between name and data." )
( aovName, aov ) = tokens
aovFileName = fileName.replace( "<AOV>", aovName )
outGlobals["output:" + cameraName + "." + aov] = IECoreScene.Output( aovFileName, "exr", aov + " RGBA", params )
parent["__parent"]["in"]["globals"] = outGlobals
"""
), "python" )
self["__camera"] = GafferScene.Camera()
self["__camera"]["projection"].setValue( "orthographic" )
self["__cameraTweaks"] = GafferScene.CameraTweaks()
self["__cameraTweaks"]["in"].setInput( self["__camera"]["out"] )
self["__cameraTweaks"]["tweaks"]["projection"] = GafferScene.TweakPlug( "projection", "uv_camera" )
self["__cameraTweaks"]["tweaks"]["resolution"] = GafferScene.TweakPlug( "resolution", imath.V2i( 0 ) )
self["__cameraTweaks"]["tweaks"]["u_offset"] = GafferScene.TweakPlug( "u_offset", 0.0 )
self["__cameraTweaks"]["tweaks"]["v_offset"] = GafferScene.TweakPlug( "v_offset", 0.0 )
self["__cameraTweaks"]["tweaks"]["mesh"] = GafferScene.TweakPlug( "mesh", "" )
self["__cameraTweaks"]["tweaks"]["uv_set"] = GafferScene.TweakPlug( "uv_set", "" )
self["__cameraTweaks"]["tweaks"]["extend_edges"] = GafferScene.TweakPlug( "extend_edges", False )
self["__cameraTweaks"]["tweaks"]["offset"] = GafferScene.TweakPlug( "offset", 0.1 )
self["__cameraTweaks"]["tweaks"]["offset"]["value"].setInput( self["normalOffset"] )
self["__cameraTweaksFilter"] = GafferScene.PathFilter()
self["__cameraTweaksFilter"]["paths"].setValue( IECore.StringVectorData( [ '/camera' ] ) )
self["__cameraTweaks"]["filter"].setInput( self["__cameraTweaksFilter"]["out"] )
self["__collectScenes"] = GafferScene.CollectScenes()
self["__collectScenes"]["sourceRoot"].setValue( "/camera" )
self["__collectScenes"]["rootNameVariable"].setValue( "collect:cameraName" )
self["__collectScenes"]["in"].setInput( self["__cameraTweaks"]["out"] )
self["__group"] = GafferScene.Group()
self["__group"]["in"][0].setInput( self["__collectScenes"]["out"] )
self["__group"]["name"].setInput( self["cameraGroup"] )
self["__parent"]["children"][0].setInput( self["__group"]["out"] )
self["__collectSceneRootsExpression"] = Gaffer.Expression()
self["__collectSceneRootsExpression"].setExpression( inspect.cleandoc(
"""
info = parent["__chunkedBakeInfo"]
parent["__collectScenes"]["rootNames"] = IECore.StringVectorData( info.keys() )
"""
), "python" )
self["__cameraSetupExpression"] = Gaffer.Expression()
self["__cameraSetupExpression"].setExpression( inspect.cleandoc(
"""
cameraName = context["collect:cameraName"]
info = parent["__chunkedBakeInfo"]
i = info[cameraName]
udimOffset = i["udim"].value - 1001
parent["__cameraTweaks"]["tweaks"]["resolution"]["value"] = imath.V2i( i["resolution"].value )
parent["__cameraTweaks"]["tweaks"]["u_offset"]["value"] = -( udimOffset % 10 )
parent["__cameraTweaks"]["tweaks"]["v_offset"]["value"] = -( udimOffset / 10 )
parent["__cameraTweaks"]["tweaks"]["mesh"]["value"] = i["mesh"].value
parent["__cameraTweaks"]["tweaks"]["uv_set"]["value"] = parent["uvSet"] if parent["uvSet"] != "uv" else ""
"""
), "python" )
self["out"].setFlags( Gaffer.Plug.Flags.Serialisable, False )
self["out"].setInput( self["__parent"]["out"] )
def __init__( self, name = "ArnoldTextureBake" ) :
GafferDispatch.TaskNode.__init__( self, name )
self["in"] = GafferScene.ScenePlug()
self["filter"] = GafferScene.FilterPlug()
self["bakeDirectory"] = Gaffer.StringPlug( "bakeDirectory", defaultValue = "" )
self["defaultFileName"] = Gaffer.StringPlug( "defaultFileName", defaultValue = "${bakeDirectory}/<AOV>/<AOV>.<UDIM>.exr" )
self["defaultResolution"] = Gaffer.IntPlug( "defaultResolution", defaultValue = 512 )
self["uvSet"] = Gaffer.StringPlug( "uvSet", defaultValue = 'uv' )
self["udims"] = Gaffer.StringPlug( "udims", defaultValue = "" )
self["normalOffset"] = Gaffer.FloatPlug( "offset", defaultValue = 0.1 )
self["aovs"] = Gaffer.StringPlug( "aovs", defaultValue = 'beauty:RGBA' )
self["tasks"] = Gaffer.IntPlug( "tasks", defaultValue = 1 )
self["cleanupIntermediateFiles"] = Gaffer.BoolPlug( "cleanupIntermediateFiles", defaultValue = True )
self["applyMedianFilter"] = Gaffer.BoolPlug( "applyMedianFilter", Gaffer.Plug.Direction.In, False )
self["medianRadius"] = Gaffer.IntPlug( "medianRadius", Gaffer.Plug.Direction.In, 1 )
# Set up connection to preTasks beforehand
self["__PreTaskList"] = GafferDispatch.TaskList()
self["__PreTaskList"]["preTasks"].setInput( self["preTasks"] )
self["__CleanPreTasks"] = Gaffer.DeleteContextVariables()
self["__CleanPreTasks"].setup( GafferDispatch.TaskNode.TaskPlug() )
self["__CleanPreTasks"]["in"].setInput( self["__PreTaskList"]["task"] )
self["__CleanPreTasks"]["variables"].setValue( "BAKE_WEDGE:index BAKE_WEDGE:value_unused" )
# First, setup python commands which will dispatch a chunk of a render or image tasks as
# immediate execution once they reach the farm - this allows us to run multiple tasks in
# one farm process.
self["__RenderDispatcher"] = GafferDispatch.PythonCommand()
self["__RenderDispatcher"]["preTasks"][0].setInput( self["__CleanPreTasks"]["out"] )
self["__RenderDispatcher"]["command"].setValue( inspect.cleandoc(
"""
import GafferDispatch
# We need to access frame and "BAKE_WEDGE:index" so that the hash of render varies with the wedge index,
# so we might as well print what we're doing
IECore.msg( IECore.MessageHandler.Level.Info, "Bake Process", "Dispatching render task index %i for frame %i" % ( context["BAKE_WEDGE:index"], context.getFrame() ) )
d = GafferDispatch.LocalDispatcher()
d.dispatch( [ self.parent()["__bakeDirectoryContext"] ] )
"""
) )
self["__ImageDispatcher"] = GafferDispatch.PythonCommand()
self["__ImageDispatcher"]["preTasks"][0].setInput( self["__RenderDispatcher"]["task"] )
self["__ImageDispatcher"]["command"].setValue( inspect.cleandoc(
"""
import GafferDispatch
# We need to access frame and "BAKE_WEDGE:index" so that the hash of render varies with the wedge index,
# so we might as well print what we're doing
IECore.msg( IECore.MessageHandler.Level.Info, "Bake Process", "Dispatching image task index %i for frame %i" % ( context["BAKE_WEDGE:index"], context.getFrame() ) )
d = GafferDispatch.LocalDispatcher()
d.dispatch( [ self.parent()["__CleanUpSwitch"] ] )
"""
) )
# Connect through the dispatch settings to the render dispatcher
# ( The image dispatcher runs much quicker, and should be OK using default settings )
self["__RenderDispatcher"]["dispatcher"].setInput( self["dispatcher"] )
# Set up variables so the dispatcher knows that the render and image dispatches depend on
# the file paths ( in case they are varying in a wedge )
for redispatch in [ self["__RenderDispatcher"], self["__ImageDispatcher"] ]:
redispatch["variables"].addChild( Gaffer.NameValuePlug( "bakeDirectory", "", "bakeDirectoryVar" ) )
redispatch["variables"].addChild( Gaffer.NameValuePlug( "defaultFileName", "", "defaultFileNameVar" ) )
# Connect the variables via an expression so that get expanded ( this also means that
# if you put #### in a filename you will get per frame tasks, because the hash will depend
# on frame number )
self["__DispatchVariableExpression"] = Gaffer.Expression()
self["__DispatchVariableExpression"].setExpression( inspect.cleandoc(
"""
parent["__RenderDispatcher"]["variables"]["bakeDirectoryVar"]["value"] = parent["bakeDirectory"]
parent["__RenderDispatcher"]["variables"]["defaultFileNameVar"]["value"] = parent["defaultFileName"]
parent["__ImageDispatcher"]["variables"]["bakeDirectoryVar"]["value"] = parent["bakeDirectory"]
parent["__ImageDispatcher"]["variables"]["defaultFileNameVar"]["value"] = parent["defaultFileName"]
"""
), "python" )
# Wedge based on tasks into the overall number of tasks to run. Note that we don't know how
# much work each task will do until we actually run the render tasks ( this is when scene
# expansion happens ). Because we must group all tasks that write to the same file into the
# same task batch, if tasks is a large number, some tasks batches could end up empty
self["__MainWedge"] = GafferDispatch.Wedge()
self["__MainWedge"]["preTasks"][0].setInput( self["__ImageDispatcher"]["task"] )
self["__MainWedge"]["variable"].setValue( "BAKE_WEDGE:value_unused" )
self["__MainWedge"]["indexVariable"].setValue( "BAKE_WEDGE:index" )
self["__MainWedge"]["mode"].setValue( 1 )
self["__MainWedge"]["intMin"].setValue( 1 )
self["__MainWedge"]["intMax"].setInput( self["tasks"] )
self["task"].setInput( self["__MainWedge"]["task"] )
self["task"].setFlags( Gaffer.Plug.Flags.Serialisable, False )
# Now set up the render tasks. This involves doing the actual rendering, and triggering the
# output of the file list index file.
# First get rid of options from the upstream scene that could mess up the bake
self["__OptionOverrides"] = GafferScene.StandardOptions()
self["__OptionOverrides"]["in"].setInput( self["in"] )
self["__OptionOverrides"]["options"]["pixelAspectRatio"]["enabled"].setValue( True )
self["__OptionOverrides"]["options"]["resolutionMultiplier"]["enabled"].setValue( True )
self["__OptionOverrides"]["options"]["overscan"]["enabled"].setValue( True )
self["__OptionOverrides"]["options"]["renderCropWindow"]["enabled"].setValue( True )
self["__OptionOverrides"]["options"]["cameraBlur"]["enabled"].setValue( True )
self["__OptionOverrides"]["options"]["transformBlur"]["enabled"].setValue( True )
self["__OptionOverrides"]["options"]["deformationBlur"]["enabled"].setValue( True )
self["__CameraSetup"] = self.__CameraSetup()
self["__CameraSetup"]["in"].setInput( self["__OptionOverrides"]["out"] )
self["__CameraSetup"]["filter"].setInput( self["filter"] )
self["__CameraSetup"]["defaultFileName"].setInput( self["defaultFileName"] )
self["__CameraSetup"]["defaultResolution"].setInput( self["defaultResolution"] )
self["__CameraSetup"]["uvSet"].setInput( self["uvSet"] )
self["__CameraSetup"]["aovs"].setInput( self["aovs"] )
self["__CameraSetup"]["normalOffset"].setInput( self["normalOffset"] )
self["__CameraSetup"]["tasks"].setInput( self["tasks"] )
self["__CameraSetup"]["udims"].setInput( self["udims"] )
self["__Expression"] = Gaffer.Expression()
self["__Expression"].setExpression( 'parent["__CameraSetup"]["taskIndex"] = context.get( "BAKE_WEDGE:index", 0 )', "python" )
self["__indexFilePath"] = Gaffer.StringPlug()
self["__indexFilePath"].setFlags( Gaffer.Plug.Flags.Serialisable, False )
self["__IndexFileExpression"] = Gaffer.Expression()
self["__IndexFileExpression"].setExpression( inspect.cleandoc(
"""
import os
parent["__indexFilePath"] = os.path.join( parent["bakeDirectory"], "BAKE_FILE_INDEX_" +
str( context.get("BAKE_WEDGE:index", 0 ) ) + ".####.txt" )
"""
), "python" )
self["__outputIndexCommand"] = GafferDispatch.PythonCommand()
self["__outputIndexCommand"]["variables"].addChild( Gaffer.NameValuePlug( "bakeDirectory", Gaffer.StringPlug() ) )
self["__outputIndexCommand"]["variables"][0]["value"].setInput( self["bakeDirectory"] )
self["__outputIndexCommand"]["variables"].addChild( Gaffer.NameValuePlug( "indexFilePath", Gaffer.StringPlug() ) )
self["__outputIndexCommand"]["variables"][1]["value"].setInput( self["__indexFilePath"] )
self["__outputIndexCommand"]["variables"].addChild( Gaffer.NameValuePlug( "fileList", Gaffer.StringVectorDataPlug( defaultValue = IECore.StringVectorData() ) ) )
self["__outputIndexCommand"]["variables"][2]["value"].setInput( self["__CameraSetup"]["renderFileList"] )
self["__outputIndexCommand"]["command"].setValue( inspect.cleandoc(
"""
import os
import distutils.dir_util
# Ensure path exists
distutils.dir_util.mkpath( variables["bakeDirectory"] )
f = open( variables["indexFilePath"], "w" )
f.writelines( [ i + "\\n" for i in sorted( variables["fileList"] ) ] )
f.close()
IECore.msg( IECore.MessageHandler.Level.Info, "Bake Process", "Wrote list of bake files for this chunk to " + variables["indexFilePath"] )
"""
) )
self["__arnoldRender"] = GafferArnold.ArnoldRender()
self["__arnoldRender"]["preTasks"][0].setInput( self["__outputIndexCommand"]["task"] )
self["__arnoldRender"]["dispatcher"]["immediate"].setValue( True )
self["__arnoldRender"]["in"].setInput( self["__CameraSetup"]["out"] )
self["__bakeDirectoryContext"] = GafferDispatch.TaskContextVariables()
self["__bakeDirectoryContext"]["variables"].addChild( Gaffer.NameValuePlug( "bakeDirectory", Gaffer.StringPlug() ) )
self["__bakeDirectoryContext"]["variables"][0]["value"].setInput( self["bakeDirectory"] )
self["__bakeDirectoryContext"]["preTasks"][0].setInput( self["__arnoldRender"]["task"] )
# Now set up the image tasks. This involves merging all layers for a UDIM, filling in the
# background, writing out this image, converting it to tx, and optionally deleting all the exrs
self["__imageList"] = Gaffer.CompoundObjectPlug( "__imageList", defaultValue = IECore.CompoundObject() )
self["__imageList"].setFlags( Gaffer.Plug.Flags.Serialisable, False )
self["__ImageReader"] = GafferImage.ImageReader()
self["__CurInputFileExpression"] = Gaffer.Expression()
self["__CurInputFileExpression"].setExpression( inspect.cleandoc(
"""
l = parent["__imageList"]
outFile = context["wedge:outFile"]
loopIndex = context[ "loop:index" ]
parent["__ImageReader"]["fileName"] = l[outFile][ loopIndex ]
"""
), "python" )
# Find the max size of any input file
self["__SizeLoop"] = Gaffer.LoopComputeNode()
self["__SizeLoop"].setup( Gaffer.IntPlug() )
self["__SizeMaxExpression"] = Gaffer.Expression()
self["__SizeMaxExpression"].setExpression( inspect.cleandoc(
"""
f = parent["__ImageReader"]["out"]["format"]
parent["__SizeLoop"]["next"] = max( f.width(), parent["__SizeLoop"]["previous"] )
"""
), "python" )
# Loop over all input files for this output file, and merge them all together
self["__ImageLoop"] = Gaffer.LoopComputeNode()
self["__ImageLoop"].setup( GafferImage.ImagePlug() )
self["__NumInputsForCurOutputExpression"] = Gaffer.Expression()
self["__NumInputsForCurOutputExpression"].setExpression( inspect.cleandoc(
"""
l = parent["__imageList"]
outFile = context["wedge:outFile"]
numInputs = len( l[outFile] )
parent["__ImageLoop"]["iterations"] = numInputs
parent["__SizeLoop"]["iterations"] = numInputs
"""
), "python" )
self["__Resize"] = GafferImage.Resize()
self["__Resize"]["format"]["displayWindow"]["min"].setValue( imath.V2i( 0, 0 ) )
self["__Resize"]['format']["displayWindow"]["max"]["x"].setInput( self["__SizeLoop"]["out"] )
self["__Resize"]['format']["displayWindow"]["max"]["y"].setInput( self["__SizeLoop"]["out"] )
self["__Resize"]['in'].setInput( self["__ImageReader"]["out"] )
self["__Merge"] = GafferImage.Merge()
self["__Merge"]["in"][0].setInput( self["__Resize"]["out"] )
self["__Merge"]["in"][1].setInput( self["__ImageLoop"]["previous"] )
self["__Merge"]["operation"].setValue( GafferImage.Merge.Operation.Add )
self["__ImageLoop"]["next"].setInput( self["__Merge"]["out"] )
# Write out the combined image, so we can immediately read it back in
# This is just because we're doing enough image processing that we
# could saturate the cache, and Gaffer wouldn't know that this is
# the important result to keep
self["__ImageIntermediateWriter"] = GafferImage.ImageWriter()
self["__ImageIntermediateWriter"]["in"].setInput( self["__ImageLoop"]["out"] )
self["__ImageIntermediateReader"] = GafferImage.ImageReader()
# Now that we've merged everything together, we can use a BleedFill to fill in the background,
# so that texture filtering across the edges will pull in colors that are at least reasonable.
self["__BleedFill"] = GafferImage.BleedFill()
self["__BleedFill"]["in"].setInput( self["__ImageIntermediateReader"]["out"] )
self["__Median"] = GafferImage.Median()
self["__Median"]["in"].setInput( self["__BleedFill"]["out"] )
self["__Median"]["enabled"].setInput( self["applyMedianFilter"] )
self["__Median"]["radius"]["x"].setInput( self["medianRadius"] )
self["__Median"]["radius"]["y"].setInput( self["medianRadius"] )
# Write out the result
self["__ImageWriter"] = GafferImage.ImageWriter()
self["__ImageWriter"]["in"].setInput( self["__Median"]["out"] )
self["__ImageWriter"]["preTasks"][0].setInput( self["__ImageIntermediateWriter"]["task"] )
# Convert result to texture
self["__ConvertCommand"] = GafferDispatch.SystemCommand()
# We shouldn't need a sub-shell and this prevents S.I.P on the Mac from
# blocking the dylibs loaded by maketx.
self["__ConvertCommand"]["shell"].setValue( False )
self["__ConvertCommand"]["substitutions"].addChild( Gaffer.NameValuePlug( "inFile", IECore.StringData(), "member1" ) )
self["__ConvertCommand"]["substitutions"].addChild( Gaffer.NameValuePlug( "outFile", IECore.StringData(), "member1" ) )
self["__ConvertCommand"]["preTasks"][0].setInput( self["__ImageWriter"]["task"] )
self["__ConvertCommand"]["command"].setValue( 'maketx --wrap clamp {inFile} -o {outFile}' )
self["__CommandSetupExpression"] = Gaffer.Expression()
self["__CommandSetupExpression"].setExpression( inspect.cleandoc(
"""
outFileBase = context["wedge:outFile"]
intermediateExr = outFileBase + ".intermediate.exr"
parent["__ImageIntermediateWriter"]["fileName"] = intermediateExr
parent["__ImageIntermediateReader"]["fileName"] = intermediateExr
tmpExr = outFileBase + ".tmp.exr"
parent["__ImageWriter"]["fileName"] = tmpExr
parent["__ConvertCommand"]["substitutions"]["member1"]["value"] = tmpExr
parent["__ConvertCommand"]["substitutions"]["member2"]["value"] = outFileBase + ".tx"
"""
), "python" )
self["__ImageWedge"] = GafferDispatch.Wedge()
self["__ImageWedge"]["preTasks"][0].setInput( self["__ConvertCommand"]["task"] )
self["__ImageWedge"]["variable"].setValue( 'wedge:outFile' )
self["__ImageWedge"]["indexVariable"].setValue( 'wedge:outFileIndex' )
self["__ImageWedge"]["mode"].setValue( int( GafferDispatch.Wedge.Mode.StringList ) )
self["__CleanUpCommand"] = GafferDispatch.PythonCommand()
self["__CleanUpCommand"]["preTasks"][0].setInput( self["__ImageWedge"]["task"] )
self["__CleanUpCommand"]["variables"].addChild( Gaffer.NameValuePlug( "filesToDelete", Gaffer.StringVectorDataPlug( defaultValue = IECore.StringVectorData() ), "member1" ) )
self["__CleanUpCommand"]["command"].setValue( inspect.cleandoc(
"""
import os
for tmpFile in variables["filesToDelete"]:
os.remove( tmpFile )
"""
) )
self["__CleanUpExpression"] = Gaffer.Expression()
self["__CleanUpExpression"].setExpression( inspect.cleandoc(
"""
imageList = parent["__imageList"]
toDelete = []
for outFileBase, inputExrs in imageList.items():
tmpExr = outFileBase + ".tmp.exr"
intermediateExr = outFileBase + ".intermediate.exr"
toDelete.extend( inputExrs )
toDelete.append( tmpExr )
toDelete.append( intermediateExr )
toDelete.append( parent["__indexFilePath"] )
parent["__CleanUpCommand"]["variables"]["member1"]["value"] = IECore.StringVectorData( toDelete )
"""
), "python" )
self["__CleanUpSwitch"] = GafferDispatch.TaskSwitch()
self["__CleanUpSwitch"]["preTasks"][0].setInput( self["__ImageWedge"]["task"] )
self["__CleanUpSwitch"]["preTasks"][1].setInput( self["__CleanUpCommand"]["task"] )
self["__CleanUpSwitch"]["index"].setInput( self["cleanupIntermediateFiles"] )
# Set up the list of input image files to process, and the corresponding list of
# output files to wedge over
self["__ImageSetupExpression"] = Gaffer.Expression()
self["__ImageSetupExpression"].setExpression( inspect.cleandoc(
"""
f = open( parent["__indexFilePath"], "r" )
fileList = f.read().splitlines()
fileDict = {}
for i in fileList:
rootName = i.rsplit( ".exr", 1 )[0]
if rootName in fileDict:
fileDict[ rootName ].append( i )
else:
fileDict[ rootName ] = IECore.StringVectorData( [i] )
parent["__imageList"] = IECore.CompoundObject( fileDict )
parent["__ImageWedge"]["strings"] = IECore.StringVectorData( fileDict.keys() )
"""
), "python" )
IECore.registerRunTimeTyped( ArnoldTextureBake, typeName = "GafferArnold::ArnoldTextureBake" )
| 47.196339
| 175
| 0.692649
| 2,883
| 28,365
| 6.627818
| 0.212973
| 0.035797
| 0.013921
| 0.014287
| 0.211325
| 0.157159
| 0.10001
| 0.066517
| 0.05673
| 0.05673
| 0
| 0.003247
| 0.142323
| 28,365
| 600
| 176
| 47.275
| 0.782185
| 0.136224
| 0
| 0.069264
| 0
| 0
| 0.360552
| 0.056976
| 0
| 0
| 0
| 0
| 0
| 1
| 0.008658
| false
| 0
| 0.038961
| 0
| 0.056277
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bccd1fa8fe336f245d1474aeb673c6c021c08a1b
| 20,598
|
py
|
Python
|
aea/protocols/generator/common.py
|
valory-xyz/agents-aea
|
8f38efa96041b0156ed1ae328178e395dbabf2fc
|
[
"Apache-2.0"
] | null | null | null |
aea/protocols/generator/common.py
|
valory-xyz/agents-aea
|
8f38efa96041b0156ed1ae328178e395dbabf2fc
|
[
"Apache-2.0"
] | null | null | null |
aea/protocols/generator/common.py
|
valory-xyz/agents-aea
|
8f38efa96041b0156ed1ae328178e395dbabf2fc
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2022 Valory AG
# Copyright 2018-2021 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains utility code for generator modules."""
import inspect
import os
import re
import shutil
import subprocess # nosec
import sys
import tempfile
from pathlib import Path
from typing import Tuple
from aea.configurations.base import ProtocolSpecification
from aea.configurations.constants import (
DEFAULT_PROTOCOL_CONFIG_FILE,
PACKAGES,
PROTOCOL_LANGUAGE_JS,
PROTOCOL_LANGUAGE_PYTHON,
)
from aea.configurations.loader import ConfigLoader
from aea.helpers.io import open_file
SPECIFICATION_PRIMITIVE_TYPES = ["pt:bytes", "pt:int", "pt:float", "pt:bool", "pt:str"]
SPECIFICATION_COMPOSITIONAL_TYPES = [
"pt:set",
"pt:list",
"pt:dict",
"pt:union",
"pt:optional",
]
PYTHON_COMPOSITIONAL_TYPES = [
"FrozenSet",
"Tuple",
"Dict",
"Union",
"Optional",
]
MESSAGE_IMPORT = "from aea.protocols.base import Message"
SERIALIZER_IMPORT = "from aea.protocols.base import Serializer"
PATH_TO_PACKAGES = PACKAGES
INIT_FILE_NAME = "__init__.py"
PROTOCOL_YAML_FILE_NAME = DEFAULT_PROTOCOL_CONFIG_FILE
MESSAGE_DOT_PY_FILE_NAME = "message.py"
DIALOGUE_DOT_PY_FILE_NAME = "dialogues.py"
CUSTOM_TYPES_DOT_PY_FILE_NAME = "custom_types.py"
SERIALIZATION_DOT_PY_FILE_NAME = "serialization.py"
PYTHON_TYPE_TO_PROTO_TYPE = {
"bytes": "bytes",
"int": "int32",
"float": "float",
"bool": "bool",
"str": "string",
}
CURRENT_DIR = os.path.dirname(inspect.getfile(inspect.currentframe())) # type: ignore
ISORT_CONFIGURATION_FILE = os.path.join(CURRENT_DIR, "isort.cfg")
ISORT_CLI_ARGS = [
"--settings-path",
ISORT_CONFIGURATION_FILE,
"--quiet",
]
PROTOLINT_CONFIGURATION_FILE_NAME = "protolint.yaml"
PROTOLINT_CONFIGURATION = """lint:
rules:
remove:
- MESSAGE_NAMES_UPPER_CAMEL_CASE
- ENUM_FIELD_NAMES_ZERO_VALUE_END_WITH
- PACKAGE_NAME_LOWER_CASE
- REPEATED_FIELD_NAMES_PLURALIZED
- FIELD_NAMES_LOWER_SNAKE_CASE"""
PROTOLINT_INDENTATION_ERROR_STR = "incorrect indentation style"
PROTOLINT_ERROR_WHITELIST = [PROTOLINT_INDENTATION_ERROR_STR]
def _to_camel_case(text: str) -> str:
"""
Convert a text in snake_case format into the CamelCase format.
:param text: the text to be converted.
:return: The text in CamelCase format.
"""
return "".join(word.title() for word in text.split("_"))
def _camel_case_to_snake_case(text: str) -> str:
"""
Convert a text in CamelCase format into the snake_case format.
:param text: the text to be converted.
:return: The text in CamelCase format.
"""
return re.sub(r"(?<!^)(?=[A-Z])", "_", text).lower()
def _match_brackets(text: str, index_of_open_bracket: int) -> int:
"""
Give the index of the matching close bracket for the opening bracket at 'index_of_open_bracket' in the input 'text'.
:param text: the text containing the brackets.
:param index_of_open_bracket: the index of the opening bracket.
:return: the index of the matching closing bracket (if any).
:raises SyntaxError if there are no matching closing bracket.
"""
if text[index_of_open_bracket] != "[":
raise SyntaxError(
"Index {} in 'text' is not an open bracket '['. It is {}".format(
index_of_open_bracket,
text[index_of_open_bracket],
)
)
open_bracket_stack = []
for index in range(index_of_open_bracket, len(text)):
if text[index] == "[":
open_bracket_stack.append(text[index])
elif text[index] == "]":
open_bracket_stack.pop()
if not open_bracket_stack:
return index
raise SyntaxError(
"No matching closing bracket ']' for the opening bracket '[' at {} "
+ str(index_of_open_bracket)
)
def _has_matched_brackets(text: str) -> bool:
"""
Evaluate whether every opening bracket '[' in the 'text' has a matching closing bracket ']'.
:param text: the text.
:return: Boolean result, and associated message.
"""
open_bracket_stack = []
for index, _ in enumerate(text):
if text[index] == "[":
open_bracket_stack.append(index)
elif text[index] == "]":
if len(open_bracket_stack) == 0:
return False
open_bracket_stack.pop()
return len(open_bracket_stack) == 0
def _get_sub_types_of_compositional_types(compositional_type: str) -> Tuple[str, ...]:
"""
Extract the sub-types of compositional types.
This method handles both specification types (e.g. pt:set[], pt:dict[]) as well as python types (e.g. FrozenSet[], Union[]).
:param compositional_type: the compositional type string whose sub-types are to be extracted.
:return: tuple containing all extracted sub-types.
"""
sub_types_list = list()
for valid_compositional_type in (
SPECIFICATION_COMPOSITIONAL_TYPES + PYTHON_COMPOSITIONAL_TYPES
):
if compositional_type.startswith(valid_compositional_type):
inside_string = compositional_type[
compositional_type.index("[") + 1 : compositional_type.rindex("]")
].strip()
while inside_string != "":
do_not_add = False
if inside_string.find(",") == -1: # No comma; this is the last sub-type
provisional_sub_type = inside_string.strip()
if (
provisional_sub_type == "..."
): # The sub-string is ... used for Tuple, e.g. Tuple[int, ...]
do_not_add = True
else:
sub_type = provisional_sub_type
inside_string = ""
else: # There is a comma; this MAY not be the last sub-type
sub_string_until_comma = inside_string[
: inside_string.index(",")
].strip()
if (
sub_string_until_comma.find("[") == -1
): # No open brackets; this is a primitive type and NOT the last sub-type
sub_type = sub_string_until_comma
inside_string = inside_string[
inside_string.index(",") + 1 :
].strip()
else: # There is an open bracket'['; this is a compositional type
try:
closing_bracket_index = _match_brackets(
inside_string, inside_string.index("[")
)
except SyntaxError:
raise SyntaxError(
"Bad formatting. No matching close bracket ']' for the open bracket at {}".format(
inside_string[
: inside_string.index("[") + 1
].strip()
)
)
sub_type = inside_string[: closing_bracket_index + 1].strip()
the_rest_of_inside_string = inside_string[
closing_bracket_index + 1 :
].strip()
if (
the_rest_of_inside_string.find(",") == -1
): # No comma; this is the last sub-type
inside_string = the_rest_of_inside_string.strip()
else: # There is a comma; this is not the last sub-type
inside_string = the_rest_of_inside_string[
the_rest_of_inside_string.index(",") + 1 :
].strip()
if not do_not_add:
sub_types_list.append(sub_type)
return tuple(sub_types_list)
raise SyntaxError(
"{} is not a valid compositional type.".format(compositional_type)
)
def _union_sub_type_to_protobuf_variable_name(
content_name: str, content_type: str
) -> str:
"""
Given a content of type union, create a variable name for its sub-type for protobuf.
:param content_name: the name of the content
:param content_type: the sub-type of a union type
:return: The variable name
"""
if content_type.startswith("FrozenSet"):
sub_type = _get_sub_types_of_compositional_types(content_type)[0]
expanded_type_str = "set_of_{}".format(sub_type)
elif content_type.startswith("Tuple"):
sub_type = _get_sub_types_of_compositional_types(content_type)[0]
expanded_type_str = "list_of_{}".format(sub_type)
elif content_type.startswith("Dict"):
sub_type_1 = _get_sub_types_of_compositional_types(content_type)[0]
sub_type_2 = _get_sub_types_of_compositional_types(content_type)[1]
expanded_type_str = "dict_of_{}_{}".format(sub_type_1, sub_type_2)
else:
expanded_type_str = content_type
protobuf_variable_name = "{}_type_{}".format(content_name, expanded_type_str)
return protobuf_variable_name
def _python_pt_or_ct_type_to_proto_type(content_type: str) -> str:
"""
Convert a PT or CT from python to their protobuf equivalent.
:param content_type: the python type
:return: The protobuf equivalent
"""
if content_type in PYTHON_TYPE_TO_PROTO_TYPE.keys():
proto_type = PYTHON_TYPE_TO_PROTO_TYPE[content_type]
else:
proto_type = content_type
return proto_type
def _includes_custom_type(content_type: str) -> bool:
"""
Evaluate whether a content type is a custom type or has a custom type as a sub-type.
:param content_type: the content type
:return: Boolean result
"""
if content_type.startswith("Optional"):
sub_type = _get_sub_types_of_compositional_types(content_type)[0]
result = _includes_custom_type(sub_type)
elif content_type.startswith("Union"):
sub_types = _get_sub_types_of_compositional_types(content_type)
result = False
for sub_type in sub_types:
if _includes_custom_type(sub_type):
result = True
break
elif (
content_type.startswith("FrozenSet")
or content_type.startswith("Tuple")
or content_type.startswith("Dict")
or content_type in PYTHON_TYPE_TO_PROTO_TYPE.keys()
):
result = False
else:
result = True
return result
def is_installed(programme: str) -> bool:
"""
Check whether a programme is installed on the system.
:param programme: the name of the programme.
:return: True if installed, False otherwise
"""
res = shutil.which(programme)
return res is not None
def base_protolint_command() -> str:
"""
Return the base protolint command.
:return: The base protolint command
"""
if sys.platform.startswith("win"):
protolint_base_cmd = "protolint" # pragma: nocover
else:
protolint_base_cmd = "PATH=${PATH}:${GOPATH}/bin/:~/go/bin protolint"
return protolint_base_cmd
def check_prerequisites() -> None:
"""Check whether a programme is installed on the system."""
# check black code formatter is installed
if not is_installed("black"):
raise FileNotFoundError(
"Cannot find black code formatter! To install, please follow this link: https://black.readthedocs.io/en/stable/installation_and_usage.html"
)
# check isort code formatter is installed
if not is_installed("isort"):
raise FileNotFoundError(
"Cannot find isort code formatter! To install, please follow this link: https://pycqa.github.io/isort/#installing-isort"
)
# check protolint code formatter is installed
if subprocess.call(f"{base_protolint_command()} version", shell=True) != 0: # nosec
raise FileNotFoundError(
"Cannot find protolint protocol buffer schema file linter! To install, please follow this link: https://github.com/yoheimuta/protolint."
)
# check protocol buffer compiler is installed
if not is_installed("protoc"):
raise FileNotFoundError(
"Cannot find protocol buffer compiler! To install, please follow this link: https://developers.google.com/protocol-buffers/"
)
def get_protoc_version() -> str:
"""Get the protoc version used."""
result = subprocess.run( # nosec
["protoc", "--version"], stdout=subprocess.PIPE, check=True
)
result_str = result.stdout.decode("utf-8").strip("\n").strip("\r")
return result_str
def load_protocol_specification(specification_path: str) -> ProtocolSpecification:
"""
Load a protocol specification.
:param specification_path: path to the protocol specification yaml file.
:return: A ProtocolSpecification object
"""
config_loader = ConfigLoader(
"protocol-specification_schema.json", ProtocolSpecification
)
protocol_spec = config_loader.load_protocol_specification(
open_file(specification_path)
)
return protocol_spec
def _create_protocol_file(
path_to_protocol_package: str, file_name: str, file_content: str
) -> None:
"""
Create a file in the generated protocol package.
:param path_to_protocol_package: path to the file
:param file_name: the name of the file
:param file_content: the content of the file
"""
pathname = os.path.join(path_to_protocol_package, file_name)
with open_file(pathname, "w") as file:
file.write(file_content)
def try_run_black_formatting(path_to_protocol_package: str) -> None:
"""
Run Black code formatting via subprocess.
:param path_to_protocol_package: a path where formatting should be applied.
"""
subprocess.run( # nosec
[sys.executable, "-m", "black", path_to_protocol_package, "--quiet"],
check=True,
)
def try_run_isort_formatting(path_to_protocol_package: str) -> None:
"""
Run Isort code formatting via subprocess.
:param path_to_protocol_package: a path where formatting should be applied.
"""
subprocess.run( # nosec
[sys.executable, "-m", "isort", *ISORT_CLI_ARGS, path_to_protocol_package],
check=True,
)
def try_run_protoc(
path_to_generated_protocol_package: str,
name: str,
language: str = PROTOCOL_LANGUAGE_PYTHON,
) -> None:
"""
Run 'protoc' protocol buffer compiler via subprocess.
:param path_to_generated_protocol_package: path to the protocol buffer schema file.
:param name: name of the protocol buffer schema file.
:param language: the target language in which to compile the protobuf schema file
"""
# for closure-styled imports for JS, comment the first line and uncomment the second
js_commonjs_import_option = (
"import_style=commonjs,binary:" if language == PROTOCOL_LANGUAGE_JS else ""
)
language_part_of_the_command = f"--{language}_out={js_commonjs_import_option}{path_to_generated_protocol_package}"
subprocess.run( # nosec
[
"protoc",
f"-I={path_to_generated_protocol_package}",
language_part_of_the_command,
f"{path_to_generated_protocol_package}/{name}.proto",
],
stderr=subprocess.PIPE,
encoding="utf-8",
check=True,
env=os.environ.copy(),
)
def try_run_protolint(path_to_generated_protocol_package: str, name: str) -> None:
"""
Run 'protolint' linter via subprocess.
:param path_to_generated_protocol_package: path to the protocol buffer schema file.
:param name: name of the protocol buffer schema file.
"""
# path to proto file
path_to_proto_file = os.path.join(
path_to_generated_protocol_package,
f"{name}.proto",
)
# Dump protolint configuration into a temporary file
temp_dir = tempfile.mkdtemp()
path_to_configuration_in_tmp_file = Path(
temp_dir, PROTOLINT_CONFIGURATION_FILE_NAME
)
with open_file(path_to_configuration_in_tmp_file, "w") as file:
file.write(PROTOLINT_CONFIGURATION)
# Protolint command
cmd = f'{base_protolint_command()} lint -config_path={path_to_configuration_in_tmp_file} -fix "{path_to_proto_file}"'
# Execute protolint command
subprocess.run( # nosec
cmd,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
encoding="utf-8",
check=True,
env=os.environ.copy(),
shell=True,
)
# Delete temporary configuration file
shutil.rmtree(temp_dir) # pragma: no cover
def check_protobuf_using_protoc(
path_to_generated_protocol_package: str, name: str
) -> Tuple[bool, str]:
"""
Check whether a protocol buffer schema file is valid.
Validation is via trying to compile the schema file. If successfully compiled it is valid, otherwise invalid.
If valid, return True and a 'protobuf file is valid' message, otherwise return False and the error thrown by the compiler.
:param path_to_generated_protocol_package: path to the protocol buffer schema file.
:param name: name of the protocol buffer schema file.
:return: Boolean result and an accompanying message
"""
try:
try_run_protoc(path_to_generated_protocol_package, name)
os.remove(os.path.join(path_to_generated_protocol_package, name + "_pb2.py"))
return True, "protobuf file is valid"
except subprocess.CalledProcessError as e:
pattern = name + ".proto:[0-9]+:[0-9]+: "
error_message = re.sub(pattern, "", e.stderr[:-1])
return False, error_message
def compile_protobuf_using_protoc(
path_to_generated_protocol_package: str, name: str, language: str
) -> Tuple[bool, str]:
"""
Compile a protocol buffer schema file using protoc.
If successfully compiled, return True and a success message,
otherwise return False and the error thrown by the compiler.
:param path_to_generated_protocol_package: path to the protocol buffer schema file.
:param name: name of the protocol buffer schema file.
:param language: the target language in which to compile the protobuf schema file
:return: Boolean result and an accompanying message
"""
try:
try_run_protoc(path_to_generated_protocol_package, name, language)
return True, "protobuf schema successfully compiled"
except subprocess.CalledProcessError as e:
pattern = name + ".proto:[0-9]+:[0-9]+: "
error_message = re.sub(pattern, "", e.stderr[:-1])
return False, error_message
def apply_protolint(path_to_proto_file: str, name: str) -> Tuple[bool, str]:
"""
Apply protolint linter to a protocol buffer schema file.
If no output, return True and a success message,
otherwise return False and the output shown by the linter
(minus the indentation suggestions which are automatically fixed by protolint).
:param path_to_proto_file: path to the protocol buffer schema file.
:param name: name of the protocol buffer schema file.
:return: Boolean result and an accompanying message
"""
try:
try_run_protolint(path_to_proto_file, name)
return True, "protolint has no output"
except subprocess.CalledProcessError as e:
lines_to_show = []
for line in e.stderr.split("\n"):
to_show = True
for whitelist_error_str in PROTOLINT_ERROR_WHITELIST:
if whitelist_error_str in line:
to_show = False
break
if to_show:
lines_to_show.append(line)
error_message = "\n".join(lines_to_show)
return False, error_message
| 35.636678
| 151
| 0.646373
| 2,533
| 20,598
| 5.006317
| 0.154757
| 0.019399
| 0.030282
| 0.027206
| 0.408485
| 0.332702
| 0.285388
| 0.262676
| 0.216781
| 0.193045
| 0
| 0.003535
| 0.258326
| 20,598
| 577
| 152
| 35.69844
| 0.826537
| 0.289785
| 0
| 0.230321
| 0
| 0.011662
| 0.149911
| 0.041137
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061224
| false
| 0
| 0.052478
| 0
| 0.169096
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bccd22c451ca48a6b2b63fe4d46e6f3d5177271f
| 12,177
|
py
|
Python
|
tests/unit/python/foglamp/services/core/api/test_backup_restore.py
|
vaibhav-ScaleDB/FogLAMP
|
445e7a588f5ec5fcae0360b49fdc4e4de0ea2ec8
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/python/foglamp/services/core/api/test_backup_restore.py
|
vaibhav-ScaleDB/FogLAMP
|
445e7a588f5ec5fcae0360b49fdc4e4de0ea2ec8
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/python/foglamp/services/core/api/test_backup_restore.py
|
vaibhav-ScaleDB/FogLAMP
|
445e7a588f5ec5fcae0360b49fdc4e4de0ea2ec8
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# FOGLAMP_BEGIN
# See: http://foglamp.readthedocs.io/
# FOGLAMP_END
import os
import asyncio
import json
from unittest.mock import MagicMock, patch
from collections import Counter
from aiohttp import web
import pytest
from foglamp.services.core import routes
from foglamp.services.core import connect
from foglamp.plugins.storage.common.backup import Backup
from foglamp.plugins.storage.common.restore import Restore
from foglamp.plugins.storage.common import exceptions
from foglamp.services.core.api import backup_restore
from foglamp.common.storage_client.storage_client import StorageClientAsync
__author__ = "Vaibhav Singhal"
__copyright__ = "Copyright (c) 2017 OSIsoft, LLC"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
@asyncio.coroutine
def mock_coro(*args, **kwargs):
if len(args) > 0:
return args[0]
else:
return ""
@pytest.allure.feature("unit")
@pytest.allure.story("api", "backup")
class TestBackup:
"""Unit test the Backup functionality
"""
@pytest.fixture
def client(self, loop, test_client):
app = web.Application(loop=loop)
# fill the routes table
routes.setup(app)
return loop.run_until_complete(test_client(app))
@pytest.mark.parametrize("input_data, expected", [
(1, "RUNNING"),
(2, "COMPLETED"),
(3, "CANCELED"),
(4, "INTERRUPTED"),
(5, "FAILED"),
(6, "RESTORED"),
(7, "UNKNOWN")
])
def test_get_status(self, input_data, expected):
assert expected == backup_restore._get_status(input_data)
@pytest.mark.parametrize("request_params", [
'',
'?limit=1',
'?skip=1',
'?status=completed',
'?status=failed',
'?status=restored&skip=10',
'?status=running&limit=1',
'?status=canceled&limit=10&skip=0',
'?status=interrupted&limit=&skip=',
'?status=&limit=&skip='
])
async def test_get_backups(self, client, request_params):
storage_client_mock = MagicMock(StorageClientAsync)
response = [{'file_name': '1.dump',
'id': 1, 'type': '1', 'status': '2',
'ts': '2018-02-15 15:18:41.821978+05:30',
'exit_code': '0'}]
with patch.object(connect, 'get_storage_async', return_value=storage_client_mock):
with patch.object(Backup, 'get_all_backups', return_value=mock_coro(response)):
resp = await client.get('/foglamp/backup{}'.format(request_params))
assert 200 == resp.status
result = await resp.text()
json_response = json.loads(result)
assert 1 == len(json_response['backups'])
assert Counter({"id", "date", "status"}) == Counter(json_response['backups'][0].keys())
@pytest.mark.parametrize("request_params, response_code, response_message", [
('?limit=invalid', 400, "Limit must be a positive integer"),
('?limit=-1', 400, "Limit must be a positive integer"),
('?skip=invalid', 400, "Skip/Offset must be a positive integer"),
('?skip=-1', 400, "Skip/Offset must be a positive integer"),
('?status=BLA', 400, "'BLA' is not a valid status")
])
async def test_get_backups_bad_data(self, client, request_params, response_code, response_message):
resp = await client.get('/foglamp/backup{}'.format(request_params))
assert response_code == resp.status
assert response_message == resp.reason
async def test_get_backups_exceptions(self, client):
with patch.object(connect, 'get_storage_async', return_value=Exception):
resp = await client.get('/foglamp/backup')
assert 500 == resp.status
assert "Internal Server Error" == resp.reason
async def test_create_backup(self, client):
async def mock_create():
return "running_or_failed"
storage_client_mock = MagicMock(StorageClientAsync)
with patch.object(connect, 'get_storage_async', return_value=storage_client_mock):
with patch.object(Backup, 'create_backup', return_value=mock_create()):
resp = await client.post('/foglamp/backup')
assert 200 == resp.status
assert '{"status": "running_or_failed"}' == await resp.text()
async def test_create_backup_exception(self, client):
with patch.object(connect, 'get_storage_async', return_value=Exception):
with patch.object(Backup, 'create_backup', return_value=Exception):
resp = await client.post('/foglamp/backup')
assert 500 == resp.status
assert "Internal Server Error" == resp.reason
async def test_get_backup_details(self, client):
storage_client_mock = MagicMock(StorageClientAsync)
response = {'id': 1, 'file_name': '1.dump', 'ts': '2018-02-15 15:18:41.821978+05:30',
'status': '2', 'type': '1', 'exit_code': '0'}
with patch.object(connect, 'get_storage_async', return_value=storage_client_mock):
with patch.object(Backup, 'get_backup_details', return_value=mock_coro(response)):
resp = await client.get('/foglamp/backup/{}'.format(1))
assert 200 == resp.status
result = await resp.text()
json_response = json.loads(result)
assert 3 == len(json_response)
assert Counter({"id", "date", "status"}) == Counter(json_response.keys())
@pytest.mark.parametrize("input_exception, response_code, response_message", [
(exceptions.DoesNotExist, 404, "Backup id 8 does not exist"),
(Exception, 500, "Internal Server Error")
])
async def test_get_backup_details_exceptions(self, client, input_exception, response_code, response_message):
storage_client_mock = MagicMock(StorageClientAsync)
with patch.object(connect, 'get_storage_async', return_value=storage_client_mock):
with patch.object(Backup, 'get_backup_details', side_effect=input_exception):
resp = await client.get('/foglamp/backup/{}'.format(8))
assert response_code == resp.status
assert response_message == resp.reason
async def test_get_backup_details_bad_data(self, client):
resp = await client.get('/foglamp/backup/{}'.format('BLA'))
assert 400 == resp.status
assert "Invalid backup id" == resp.reason
async def test_delete_backup(self, client):
storage_client_mock = MagicMock(StorageClientAsync)
with patch.object(connect, 'get_storage_async', return_value=storage_client_mock):
with patch.object(Backup, 'delete_backup', return_value=mock_coro(None)):
resp = await client.delete('/foglamp/backup/{}'.format(1))
assert 200 == resp.status
result = await resp.text()
json_response = json.loads(result)
assert {'message': 'Backup deleted successfully'} == json_response
@pytest.mark.parametrize("input_exception, response_code, response_message", [
(exceptions.DoesNotExist, 404, "Backup id 8 does not exist"),
(Exception, 500, "Internal Server Error")
])
async def test_delete_backup_exceptions(self, client, input_exception, response_code, response_message):
storage_client_mock = MagicMock(StorageClientAsync)
with patch.object(connect, 'get_storage_async', return_value=storage_client_mock):
with patch.object(Backup, 'delete_backup', side_effect=input_exception):
resp = await client.delete('/foglamp/backup/{}'.format(8))
assert response_code == resp.status
assert response_message == resp.reason
async def test_delete_backup_bad_data(self, client):
resp = await client.delete('/foglamp/backup/{}'.format('BLA'))
assert 400 == resp.status
assert "Invalid backup id" == resp.reason
async def test_get_backup_status(self, client):
resp = await client.get('/foglamp/backup/status')
assert 200 == resp.status
result = await resp.text()
json_response = json.loads(result)
assert {'backupStatus': [{'index': 1, 'name': 'RUNNING'},
{'index': 2, 'name': 'COMPLETED'},
{'index': 3, 'name': 'CANCELED'},
{'index': 4, 'name': 'INTERRUPTED'},
{'index': 5, 'name': 'FAILED'},
{'index': 6, 'name': 'RESTORED'}]} == json_response
@pytest.mark.parametrize("input_exception, response_code, response_message", [
(ValueError, 400, "Invalid backup id"),
(exceptions.DoesNotExist, 404, "Backup id 8 does not exist"),
(Exception, 500, "Internal Server Error")
])
async def test_get_backup_download_exceptions(self, client, input_exception, response_code, response_message):
storage_client_mock = MagicMock(StorageClientAsync)
with patch.object(connect, 'get_storage_async', return_value=storage_client_mock):
with patch.object(Backup, 'get_backup_details', side_effect=input_exception):
resp = await client.get('/foglamp/backup/{}/download'.format(8))
assert response_code == resp.status
assert response_message == resp.reason
async def test_get_backup_download(self, client):
storage_client_mock = MagicMock(StorageClientAsync)
response = {'id': 1, 'file_name': '/usr/local/foglamp/data/backup/foglamp.db', 'ts': '2018-02-15 15:18:41',
'status': '2', 'type': '1'}
with patch("aiohttp.web.FileResponse", return_value=web.FileResponse(path=os.path.realpath(__file__))) as file_res:
with patch.object(connect, 'get_storage_async', return_value=storage_client_mock):
with patch.object(Backup, 'get_backup_details', return_value=mock_coro(response)) as patch_backup_detail:
with patch('tarfile.open'):
resp = await client.get('/foglamp/backup/{}/download'.format(1))
assert 200 == resp.status
assert 'OK' == resp.reason
patch_backup_detail.assert_called_once_with(1)
assert 1 == file_res.call_count
@pytest.allure.feature("unit")
@pytest.allure.story("api", "restore")
class TestRestore:
"""Unit test the Restore functionality"""
@pytest.fixture
def client(self, loop, test_client):
app = web.Application(loop=loop)
# fill the routes table
routes.setup(app)
return loop.run_until_complete(test_client(app))
async def test_restore_backup(self, client):
async def mock_restore():
return "running"
storage_client_mock = MagicMock(StorageClientAsync)
with patch.object(connect, 'get_storage_async', return_value=storage_client_mock):
with patch.object(Restore, 'restore_backup', return_value=mock_restore()):
resp = await client.put('/foglamp/backup/{}/restore'.format(1))
assert 200 == resp.status
r = await resp.text()
assert {'status': 'running'} == json.loads(r)
@pytest.mark.parametrize("backup_id, input_exception, code, message", [
(8, exceptions.DoesNotExist, 404, "Backup with 8 does not exist"),
(2, Exception, 500, "Internal Server Error"),
('blah', ValueError, 400, 'Invalid backup id')
])
async def test_restore_backup_exceptions(self, client, backup_id, input_exception, code, message):
storage_client_mock = MagicMock(StorageClientAsync)
with patch.object(connect, 'get_storage_async', return_value=storage_client_mock):
with patch.object(Restore, 'restore_backup', side_effect=input_exception):
resp = await client.put('/foglamp/backup/{}/restore'.format(backup_id))
assert code == resp.status
assert message == resp.reason
| 46.834615
| 123
| 0.635214
| 1,398
| 12,177
| 5.334049
| 0.139485
| 0.030173
| 0.046265
| 0.035403
| 0.747351
| 0.686335
| 0.642484
| 0.606812
| 0.525412
| 0.525412
| 0
| 0.021977
| 0.241439
| 12,177
| 259
| 124
| 47.015444
| 0.78532
| 0.016753
| 0
| 0.400922
| 0
| 0
| 0.195283
| 0.030693
| 0
| 0
| 0
| 0
| 0.170507
| 1
| 0.018433
| false
| 0
| 0.064516
| 0
| 0.119816
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bccd4ecf3e75810f078465ed5395ba34d886f56a
| 3,690
|
py
|
Python
|
pyemits/core/preprocessing/dimensional_reduction.py
|
thompson0012/PyEmits
|
9cb6fbf27ca7e8952ed5aca26118055e04492c23
|
[
"Apache-2.0"
] | 6
|
2021-10-21T14:13:25.000Z
|
2021-12-26T12:22:51.000Z
|
pyemits/core/preprocessing/dimensional_reduction.py
|
thompson0012/PyEmits
|
9cb6fbf27ca7e8952ed5aca26118055e04492c23
|
[
"Apache-2.0"
] | null | null | null |
pyemits/core/preprocessing/dimensional_reduction.py
|
thompson0012/PyEmits
|
9cb6fbf27ca7e8952ed5aca26118055e04492c23
|
[
"Apache-2.0"
] | null | null | null |
"""
Why need dimensional reduction
The following is the use of dimensionality reduction in the data set:
• As data dimensions continue to decrease, the space required for data storage will also decrease.
• Low-dimensional data helps reduce calculation/training time.
• Some algorithms tend to perform poorly on high-dimensional data, and dimensionality reduction can improve algorithm availability.
• Dimensionality reduction can solve the problem of multicollinearity by removing redundant features. For example, we have two variables: "On the treadmill for a period of time
Time spent” and “calorie consumption”. These two variables are highly correlated. The longer the time spent on the treadmill, the more calories burned.
Naturally, the more. Therefore, it does not make much sense to store these two data at the same time, just one is enough.
• Dimensionality reduction helps data visualization. As mentioned earlier, if the dimensionality of the data is very high, the visualization will become quite difficult, while drawing two-dimensional three-dimensional
The graph of dimensional data is very simple.
Common dimensional reduction techniques:
1. missing value ratio
2. low variance filter
3. high correlation filter
4. random forest
5. backward feature elimination
6. forward feature selection
7. factor analysis
8. principle components analysis
9. independent component analysis
10. IOSMAP
11. t-SNE
12. UMAP
"""
random_state = 0
from enum import Enum
class FeatureSelection(Enum):
@classmethod
def missing_value_ratio(cls, threshold):
return
@classmethod
def low_variance_filter(cls, threshold):
return
@classmethod
def high_correlation_filter(cls, threshold):
return
@classmethod
def random_forest(cls):
from sklearn.ensemble import RandomForestRegressor
RF = RandomForestRegressor()
RF.fit()
RF.feature_importances_
return
@classmethod
def backward_feature_extraction(cls):
from sklearn.linear_model import LinearRegression
from sklearn.feature_selection import RFE
clf = LinearRegression()
rfe = RFE(clf, 10)
rfe = rfe.fit_transform()
return
@classmethod
def forward_feature_extraction(cls):
from sklearn.feature_selection import f_regression
ffs = f_regression()
return
class ProjectionBased(Enum):
@classmethod
def isomap(cls):
from sklearn.manifold import Isomap
ISOMAP = Isomap(neighbors_algorithm=5, n_components=3, n_jobs=-1)
ISOMAP.fit_transform()
return
@classmethod
def tsne(cls):
from sklearn.manifold import TSNE
tsne = TSNE(n_components=3, n_iter=300)
tsne.fit_transform()
return
@classmethod
def umap(cls):
# install umap
return
class ComponentsFactorsBased(Enum):
@classmethod
def factor_analysis(cls):
from sklearn.decomposition import FactorAnalysis
FA = FactorAnalysis(n_components=3)
FA.fit_transform()
return
@classmethod
def pca(cls):
from sklearn.decomposition import PCA
pca = PCA(n_components=3)
pca.fit_transform()
return
@classmethod
def ica(cls):
from sklearn.decomposition import FastICA
ICA = FastICA(n_components=3)
ICA.fit_transform()
return
@classmethod
def lda(cls, solver='svd', n_components=3):
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
LDA = LinearDiscriminantAnalysis(solver=solver, n_components=n_components)
LDA.fit_transform()
return
| 30.495868
| 217
| 0.714363
| 453
| 3,690
| 5.743929
| 0.403974
| 0.069946
| 0.076864
| 0.066872
| 0.219831
| 0.029208
| 0
| 0
| 0
| 0
| 0
| 0.010186
| 0.228455
| 3,690
| 120
| 218
| 30.75
| 0.902002
| 0.389431
| 0
| 0.356164
| 0
| 0
| 0.001337
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.178082
| false
| 0
| 0.164384
| 0.054795
| 0.561644
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bccf9e77bf6eaccd18d5b5a8053e3859146a0272
| 2,727
|
py
|
Python
|
scrapy/clarinetear/spiders/pagina12.py
|
ramiror/clarinete
|
4ebf37cf9f705e04e2aad15015be12c48fe25fd3
|
[
"BSD-2-Clause"
] | null | null | null |
scrapy/clarinetear/spiders/pagina12.py
|
ramiror/clarinete
|
4ebf37cf9f705e04e2aad15015be12c48fe25fd3
|
[
"BSD-2-Clause"
] | null | null | null |
scrapy/clarinetear/spiders/pagina12.py
|
ramiror/clarinete
|
4ebf37cf9f705e04e2aad15015be12c48fe25fd3
|
[
"BSD-2-Clause"
] | null | null | null |
from datetime import datetime
import scrapy
import lxml
from lxml.html.clean import Cleaner
import re
SOURCE = 'Página 12'
LANGUAGE = 'es'
cleaner = Cleaner(allow_tags=['p', 'br', 'b', 'a', 'strong', 'i', 'em'])
class Pagina12Spider(scrapy.Spider):
name = 'pagina12'
allowed_domains = ['www.pagina12.com.ar']
start_urls = ['https://www.pagina12.com.ar/']
def start_requests(self):
url = getattr(self, 'article_url', None)
if url is not None:
yield scrapy.Request(url, callback=self.parse_article, cb_kwargs=dict(url=url))
def parse(self, response):
urls = []
for article in response.css('article'):
link = article.css('a')
url = link.attrib['href']
if not url:
continue
if not url.startswith('http'):
url = 'https://www.pagina12.com.ar' + url
urls.append(url)
maybe_img = article.css('img.show-for-large-only')
obj = {
'title': article.css('.article-title a::text, a .title::text').get(),
'volanta': (article.css('.article-title a .title-prefix::text').get() or '').strip(),
'url': url,
'image': maybe_img.attrib['src'] if maybe_img else None,
'source': SOURCE,
'source_language': LANGUAGE,
}
yield obj
request = scrapy.Request(url, callback=self.parse_article, cb_kwargs=dict(url=url))
yield request
yield {'homepage': urls, 'source': SOURCE}
def parse_article(self, response, url):
html = ''.join(response.xpath('//div[@class="article-main-content article-text "]/p').extract())
if not html:
return
content = lxml.html.tostring(cleaner.clean_html(lxml.html.fromstring(html))).decode('utf-8')
date = response.css('div.date span::text').get().strip()
date_fragments = re.match(r'^([0-9]{1,2}) de ([a-z]+) de ([0-9]{4})$', date)
months = {
'enero': 1,
'febrero': 2,
'marzo': 3,
'abril': 4,
'mayo': 5,
'junio': 6,
'julio': 7,
'agosto': 8,
'septiembre': 9,
'octubre': 10,
'noviembre': 11,
'diciembre': 12,
}
day = int(date_fragments.group(1))
month = months[date_fragments.group(2)]
year = int(date_fragments.group(3))
hour = 0
minute = 0
date = datetime(year, month, day, hour, minute)
obj = {
'url': url,
'content': content,
'date': date.isoformat()
}
yield obj
| 32.855422
| 104
| 0.521819
| 316
| 2,727
| 4.443038
| 0.417722
| 0.017094
| 0.029915
| 0.034188
| 0.145299
| 0.082621
| 0.082621
| 0.082621
| 0.082621
| 0.082621
| 0
| 0.021727
| 0.324899
| 2,727
| 82
| 105
| 33.256098
| 0.740902
| 0
| 0
| 0.083333
| 0
| 0.013889
| 0.181885
| 0.020902
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.069444
| 0
| 0.180556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bccfcca536c98cf3954ec419341b10079911dafc
| 6,978
|
py
|
Python
|
svd.py
|
christyc14/fyp
|
c63e719e383a84eb49ffa0c8bd901bfd4aef5864
|
[
"MIT"
] | null | null | null |
svd.py
|
christyc14/fyp
|
c63e719e383a84eb49ffa0c8bd901bfd4aef5864
|
[
"MIT"
] | null | null | null |
svd.py
|
christyc14/fyp
|
c63e719e383a84eb49ffa0c8bd901bfd4aef5864
|
[
"MIT"
] | null | null | null |
from calendar import c
from typing import Dict, List, Union
from zlib import DEF_BUF_SIZE
import json_lines
import numpy as np
import re
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.manifold import TSNE
from sklearn.preprocessing import StandardScaler
import pandas as pd
import json
from scipy.sparse.linalg import svds
from scipy.spatial import distance
import os
import streamlit as st
def preprocess_ingredients(ingredients):
processed_ingredients = []
for i in range(len(ingredients)):
processed_ingredient = re.sub(
r"\(([^)]*)\)|(([0-9]\d{0,2}(\.\d{1,3})*(,\d+)?)(%|mg|units))|(<\/?i>)|(\/.+)|(\\.+)|\[([^\]]*)\]",
"",
ingredients[i],
).strip()
if (
processed_ingredient.lower() == "water"
or processed_ingredient.lower() == "aqua"
or processed_ingredient.lower() == "eau"
):
processed_ingredient = "Water"
processed_ingredients.append(processed_ingredient)
return processed_ingredients
@st.experimental_memo
def content_recommender(opt, _item1, _item2, _item3, df) -> pd.DataFrame:
content_df = df[df.category == opt]
content_df["ingredients"] = content_df["ingredients"].map(preprocess_ingredients)
mlb = MultiLabelBinarizer()
output = mlb.fit_transform(content_df.ingredients.values)
content_df = content_df.drop(["ingredients"], axis=1)
model = TSNE(n_components=2, learning_rate=200)
tsne_features = model.fit_transform(output)
content_df["X"] = tsne_features[:, 0]
content_df["Y"] = tsne_features[:, 1]
content_df["dist"] = 0.0
item1 = content_df[content_df["product_name"] == _item1]
item2 = content_df[content_df["product_name"] == _item2]
item3 = content_df[content_df["product_name"] == _item3]
p1 = np.array([item1["X"], item1["Y"]]).reshape(1, -1)
p2 = np.array([item2["X"], item2["Y"]]).reshape(1, -1)
p3 = np.array([item3["X"], item3["Y"]]).reshape(1, -1)
for ind, item in content_df.iterrows():
pn = np.array([item.X, item.Y]).reshape(-1, 1)
df.at[ind, "dist"] = min(
distance.chebyshev(p1, pn),
distance.chebyshev(p2, pn),
distance.chebyshev(p3, pn),
)
content_df = content_df[~content_df.product_name.isin([_item1, _item2, _item3])]
content_df = content_df.sort_values("dist")
return content_df
@st.experimental_memo
def collab_recommender(df_tmp, num_recs, username):
reviews = df_tmp.explode("review_data")
reviews["username"] = reviews["review_data"].apply(lambda x: x["UserNickname"])
reviews["rating"] = reviews["review_data"].apply(lambda x: x["Rating"])
grouped_reviews = reviews.groupby("username")["review_data"].apply(list)
multiple_rating_users = set(grouped_reviews[grouped_reviews.map(len) > 1].index)
multi_reviews = reviews[reviews.username.isin(multiple_rating_users)]
products_reviewed_per_user = {u: set() for u in multiple_rating_users}
product_index = dict(zip(df_tmp["url"].values, range(len(df_tmp["url"]))))
username_index = dict(zip(multiple_rating_users, range(len(multiple_rating_users))))
matrix = np.zeros((len(multiple_rating_users), len(df_tmp["url"])))
for user, rating, url in zip(
multi_reviews.username.values,
multi_reviews.rating.values,
multi_reviews.url.values,
):
matrix[username_index[user]][product_index[url]] = rating
products_reviewed_per_user[user].add(url)
ss = StandardScaler()
normatrix = ss.fit_transform(matrix)
print(normatrix)
U, S, V = svds(normatrix)
all_user_predicted_rating = ss.inverse_transform(U @ np.diag(S) @ V)
preds_df = pd.DataFrame(
all_user_predicted_rating, columns=product_index, index=username_index
)
sorted_user_preds = preds_df.loc[username].sort_values(ascending=False)
sorted_user_preds = sorted_user_preds[
~sorted_user_preds.index.isin(products_reviewed_per_user[username])
]
sorted_user_preds = sorted_user_preds.head(num_recs)
# we want those that they haven't already tested
collab_df = pd.merge(
df_tmp,
sorted_user_preds.to_frame(),
left_on="url",
right_index=True,
how="right",
)
collab_df.rename(columns={username: "pred_rating"}, inplace=True)
return collab_df
if __name__ == "__main__":
file_path = os.path.dirname(__file__)
if file_path != "":
os.chdir(file_path)
products: List[Dict[str, Union[str, List[str]]]] = []
# input data into List
with open("../cbscraper/product_urls_with_reviews.jsonlines", "rb") as f:
unique = set()
lines = f.read().splitlines()
df_inter = pd.DataFrame(lines)
df_inter.columns = ["json_element"]
df_inter["json_element"].apply(json.loads)
df = pd.json_normalize(df_inter["json_element"].apply(json.loads))
# to save myself if i do something dumb and run the scraper without deleting the .jsonlines file
df.drop_duplicates(subset=["url"], inplace=True)
# option: category of product, eg cleanser
categories = set(df.category.values)
# filter data by given option
print("Hello world!")
print("Welcome!")
print(categories)
print("pls enter the category:")
cat = str(input())
display_product_names = df[df.category == cat]
print(display_product_names[["brand", "product_name"]])
print("pls enter your top 3 products indices, separated by a new line")
item1 = int(input())
item2 = int(input())
item3 = int(input())
print("pls enter # of recs:")
num_recs = int(input())
reviews = display_product_names.explode("review_data")
reviews["username"] = reviews["review_data"].apply(lambda x: x["UserNickname"])
grouped_reviews = reviews.groupby("username")["review_data"].apply(list)
multiple_rating_users = set(grouped_reviews[grouped_reviews.map(len) > 1].index)
print(multiple_rating_users)
print("pls enter sephora userid, if you don't have one just enter 'none':")
username = str(input())
if username == "none":
print("your ingredients based recommendations are:")
cbf = content_recommender(
cat,
df.product_name.values[item1],
df.product_name.values[item2],
df.product_name.values[item3],
num_recs,
df,
)
print(cbf[["brand", "product_name", "url", "avg_rating"]])
else:
cbf = content_recommender(
cat,
df.product_name.values[item1],
df.product_name.values[item2],
df.product_name.values[item3],
num_recs + 10,
df,
)
cf = collab_recommender(cbf, num_recs, username)
print("your hybrid recommendations are:")
print(cf[["brand", "product_name", "url", "pred_rating"]])
print("thank u for using this service :)")
| 38.131148
| 111
| 0.655775
| 894
| 6,978
| 4.894855
| 0.285235
| 0.045247
| 0.029708
| 0.028793
| 0.208181
| 0.208181
| 0.15585
| 0.134369
| 0.134369
| 0.134369
| 0
| 0.011025
| 0.207079
| 6,978
| 182
| 112
| 38.340659
| 0.779866
| 0.033104
| 0
| 0.139241
| 0
| 0.006329
| 0.126391
| 0.021213
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018987
| false
| 0
| 0.094937
| 0
| 0.132911
| 0.094937
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bccff1b3d6077ecdb8e86f1fedd69c5761247393
| 22,448
|
py
|
Python
|
esp32/tools/flasher.py
|
rodgergr/pycom-micropython-sigfox
|
50a31befc40a39b1e4c3513f20da968792227b0e
|
[
"MIT"
] | null | null | null |
esp32/tools/flasher.py
|
rodgergr/pycom-micropython-sigfox
|
50a31befc40a39b1e4c3513f20da968792227b0e
|
[
"MIT"
] | null | null | null |
esp32/tools/flasher.py
|
rodgergr/pycom-micropython-sigfox
|
50a31befc40a39b1e4c3513f20da968792227b0e
|
[
"MIT"
] | 1
|
2019-09-22T01:28:52.000Z
|
2019-09-22T01:28:52.000Z
|
#!/usr/bin/env python
#
# Copyright (c) 2018, Pycom Limited.
#
# This software is licensed under the GNU GPL version 3 or any
# later version, with permitted additional terms. For more information
# see the Pycom Licence v1.0 document supplied with this file, or
# available at https://www.pycom.io/opensource/licensing
#
"""
Flash the ESP32 (bootloader, partitions table and factory app).
How to call esptool:
python esptool.py '--chip', 'esp32', '--port', /dev/ttyUSB0, '--baud', '921600', 'write_flash', '-z', '--flash_mode', 'dio', '--flash_freq', '40m', '--flash_size', 'detect', '0x1000', bootloader.bin, '0x8000', partitions.bin, '0x10000', application.bin, '0x3FF000', 'config_no_wifi.bin'
"""
from esptool import ESP32ROM
import os
import sys
import struct
import sqlite3
import argparse
import subprocess
import threading
import time
import fw_version
import csv
working_threads = {}
macs_db = None
wmacs = {}
DB_MAC_UNUSED = 0
DB_MAC_ERROR = -1
DB_MAC_LOCK = -2
DB_MAC_OK = 1
def open_macs_db(db_filename):
global macs_db
if not os.path.exists(db_filename):
print("MAC addresses database not found")
sys.exit(1)
macs_db = sqlite3.connect(db_filename)
def fetch_MACs(number):
return [x[0].encode('ascii', 'ignore') for x in macs_db.execute("select mac from macs where status = 0 order by rowid asc limit ?", (number,)).fetchall()]
def set_mac_status(mac, wmac, status):
macs_db.execute("update macs set status = ?, last_touch = strftime('%s','now'), wmac = ? where mac = ?", (status, wmac, mac))
macs_db.commit()
def print_exception(e):
print ('Exception: {}, on line {}'.format(e, sys.exc_info()[-1].tb_lineno))
def erase_flash(port, command):
global working_threads
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
num_erases = 0
# poll the process for new output until finished
while True:
nextline = process.stdout.readline()
if nextline == '' and process.poll() != None:
break
if 'Chip erase completed successfully' in nextline:
sys.stdout.write('Board erased OK on port %s\n' % port)
num_erases += 1
sys.stdout.flush()
# hack to give feedback to the main thread
if process.returncode != 0 or num_erases != 1:
working_threads[port] = None
def read_wlan_mac(port, command):
global working_threads
global wmacs
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
mac_read = False
# poll the process for new output until finished
while True:
nextline = process.stdout.readline()
if nextline == '' and process.poll() != None:
break
if 'MAC: ' in nextline:
wmacs[port] = nextline[5:-1].replace(":", "-").upper()
sys.stdout.write('MAC address %s read OK on port %s\n' % (nextline[5:-1], port))
mac_read = True
sys.stdout.flush()
# hack to give feedback to the main thread
if process.returncode != 0 or not mac_read:
working_threads[port] = None
def set_vdd_sdio_voltage(port, command):
global working_threads
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# poll the process for new output until finished
while True:
nextline = process.stdout.readline()
if nextline == '' and process.poll() != None:
break
if 'VDD_SDIO setting complete' in nextline:
sys.stdout.write('Board VDD_SDIO Voltage configured OK on port %s\n' % port)
sys.stdout.flush()
# hack to give feedback to the main thread
if process.returncode != 0:
working_threads[port] = None
def flash_firmware(port, command):
global working_threads
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
num_hashes = 0
# poll the process for new output until finished
while True:
nextline = process.stdout.readline()
if nextline == '' and process.poll() != None:
break
if 'at 0x00001000' in nextline:
sys.stdout.write('Bootloader programmed OK on port %s\n' % port)
elif 'at 0x00008000' in nextline:
sys.stdout.write('Partition table programmed OK on port %s\n' % port)
elif 'at 0x00010000' in nextline:
sys.stdout.write('Application programmed OK on port %s\n' % port)
elif 'Hash of data verified' in nextline:
num_hashes += 1
sys.stdout.flush()
# hack to give feedback to the main thread
if process.returncode != 0 or num_hashes != 3:
working_threads[port] = None
def run_initial_test(port, board):
global working_threads
if board == 'LoPy':
import run_initial_lopy_test as run_test
elif board == 'LoPy4':
import run_initial_lopy4_test as run_test
elif board == 'SiPy':
import run_initial_sipy_test as run_test
else:
import run_initial_wipy_test as run_test
try:
if not run_test.test_board(port):
# same trick to give feedback to the main thread
working_threads[port] = None
except Exception:
working_threads[port] = None
def flash_lpwan_mac(port, mac):
import flash_lpwan_mac
global working_threads
try:
if not flash_lpwan_mac.program_board(port, mac):
# same trick to give feedback to the main thread
working_threads[port] = None
except Exception:
working_threads[port] = None
def run_final_test(port, board, mac):
if board == 'LoPy':
import run_final_lopy_test as run_test
elif board == 'LoPy4':
import run_final_lopy4_test as run_test
else:
import run_final_sipy_test as run_test
try:
if not run_test.test_board(port, mac, fw_version.number):
# same trick to give feedback to the main thread
working_threads[port] = None
except Exception:
working_threads[port] = None
def run_qa_test(port, board):
global working_threads
if board == 'LoPy':
import run_qa_lopy_test as run_test
elif board == 'LoPy4':
import run_qa_lopy4_test as run_test
elif board == 'SiPy':
import run_qa_sipy_test as run_test
else:
import run_qa_wipy_test as run_test
try:
if not run_test.test_board(port, fw_version.number):
# same trick to give feedback to the main thread
working_threads[port] = None
except Exception:
working_threads[port] = None
def main():
cmd_parser = argparse.ArgumentParser(description='Flash the ESP32 and optionally run a small test on it.')
cmd_parser.add_argument('--esptool', default=None, help='the path to the esptool')
cmd_parser.add_argument('--espefuse', default=None, help='the path to the espefuse')
cmd_parser.add_argument('--boot', default=None, help='the path to the bootloader binary')
cmd_parser.add_argument('--table', default=None, help='the path to the partitions table')
cmd_parser.add_argument('--app', default=None, help='the path to the application binary')
cmd_parser.add_argument('--macs', default="macs.db", help='the path to the MAC addresses database')
cmd_parser.add_argument('--ports', default=['/dev/ttyUSB0'], nargs='+', help="the serial ports of the ESP32's to program")
cmd_parser.add_argument('--erase', default=None, help='set to True to erase the boards first')
cmd_parser.add_argument('--qa', action='store_true', help='just do some quality asurance test')
cmd_parser.add_argument('--board', default='LoPy', help='identifies the board to be flashed and tested')
cmd_parser.add_argument('--revision', default='1', help='identifies the hardware revision')
cmd_args = cmd_parser.parse_args()
global working_threads
global wmacs
output = ""
ret = 0
global_ret = 0
if cmd_args.qa:
raw_input("Please reset all the boards, wait until the LED starts blinking and then press enter...")
time.sleep(2.5) # wait for the board to reset
try:
for port in cmd_args.ports:
working_threads[port] = threading.Thread(target=run_qa_test, args=(port, cmd_args.board))
working_threads[port].start()
for port in cmd_args.ports:
if working_threads[port]:
working_threads[port].join()
for port in cmd_args.ports:
if working_threads[port] == None:
print("Failed QA test on board connected to %s" % port)
ret = 1
except Exception as e:
ret = 1
print_exception(e)
if ret == 0:
print("=============================================================")
print("QA test succeeded on all boards:-)")
print("=============================================================")
else:
print("=============================================================")
print("ERROR: Some boards failed the QA test!")
print("=============================================================")
global_ret = 1
else:
print("Reading the WLAN MAC address...")
try:
for port in cmd_args.ports:
cmd = ['python', 'esptool.py', '--port', port, 'read_mac']
working_threads[port] = threading.Thread(target=read_wlan_mac, args=(port, cmd))
working_threads[port].start()
for port in cmd_args.ports:
if working_threads[port]:
working_threads[port].join()
_ports = list(cmd_args.ports)
for port in _ports:
if working_threads[port] == None:
print("Error reading the WLAN MAC on the board on port %s" % port)
cmd_args.ports.remove(port)
ret = 1
except Exception as e:
ret = 1
print_exception(e)
if ret == 0:
print("=============================================================")
print("WLAN MAC address reading succeeded :-)")
print("=============================================================")
else:
print("=============================================================")
print("ERROR: WLAN MAC address reading failed in some boards!")
print("=============================================================")
global_ret = 1
raw_input("Please reset all the boards and press enter to continue with the flashing process...")
if int(cmd_args.revision) > 1:
# program the efuse bits to set the VDD_SDIO voltage to 1.8V
try:
print('Configuring the VDD_SDIO voltage...')
for port in cmd_args.ports:
cmd = ['python', cmd_args.espefuse, '--port', port, '--do-not-confirm', 'set_flash_voltage', '1.8V']
working_threads[port] = threading.Thread(target=set_vdd_sdio_voltage, args=(port, cmd))
working_threads[port].start()
for port in cmd_args.ports:
if working_threads[port]:
working_threads[port].join()
_ports = list(cmd_args.ports)
for port in _ports:
if working_threads[port] == None:
print("Error setting the VDD_SDIO voltage on the board on port %s" % port)
cmd_args.ports.remove(port)
ret = 1
except Exception as e:
ret = 1
print_exception(e)
if ret == 0:
print("=============================================================")
print("VDD_SDIO voltage setting succeeded :-)")
print("=============================================================")
else:
print("=============================================================")
print("ERROR: VDD_SDIO voltage setting failed in some boards!")
print("=============================================================")
global_ret = 1
raw_input("Please reset all the boards and press enter to continue with the flashing process...")
time.sleep(1.0) # wait for the board to reset
working_threads = {}
if cmd_args.erase:
try:
print('Erasing flash memory... (will take a few seconds)')
for port in cmd_args.ports:
cmd = ['python', cmd_args.esptool, '--chip', 'esp32', '--port', port, '--baud', '921600',
'erase_flash']
working_threads[port] = threading.Thread(target=erase_flash, args=(port, cmd))
working_threads[port].start()
for port in cmd_args.ports:
if working_threads[port]:
working_threads[port].join()
_ports = list(cmd_args.ports)
for port in _ports:
if working_threads[port] == None:
print("Error erasing board on port %s" % port)
cmd_args.ports.remove(port)
ret = 1
except Exception as e:
ret = 1
print_exception(e)
if ret == 0:
print("=============================================================")
print("Batch erasing succeeded :-)")
print("=============================================================")
else:
print("=============================================================")
print("ERROR: Batch erasing failed in some boards!")
print("=============================================================")
global_ret = 1
raw_input("Please reset all the boards and press enter to continue with the flashing process...")
time.sleep(1.0) # wait for the board to reset
working_threads = {}
try:
if cmd_args.board == 'LoPy' or cmd_args.board == 'SiPy' or cmd_args.board == 'LoPy4':
open_macs_db(cmd_args.macs)
macs_list = fetch_MACs(len(cmd_args.ports))
if len(macs_list) < len(cmd_args.ports):
print("No enough remaining MAC addresses to use")
sys.exit(1)
mac_per_port = {}
i = 0
for port in cmd_args.ports:
mac_per_port[port] = macs_list[i]
i += 1
for port in cmd_args.ports:
cmd = ['python', cmd_args.esptool, '--chip', 'esp32', '--port', port, '--baud', '921600',
'write_flash', '-z', '--flash_mode', 'dio', '--flash_freq', '40m', '--flash_size', 'detect', '0x1000', cmd_args.boot,
'0x8000', cmd_args.table, '0x10000', cmd_args.app]
working_threads[port] = threading.Thread(target=flash_firmware, args=(port, cmd))
working_threads[port].start()
for port in cmd_args.ports:
if working_threads[port]:
working_threads[port].join()
_ports = list(cmd_args.ports)
for port in _ports:
if working_threads[port] == None:
print("Error programming board on port %s" % port)
cmd_args.ports.remove(port)
ret = 1
else:
print("Board on port %s programmed OK" % port)
except Exception as e:
ret = 1
print_exception(e)
if ret == 0:
print("=============================================================")
print("Batch programming succeeded :-)")
print("=============================================================")
else:
print("=============================================================")
print("ERROR: Batch firmware programming failed on some boards!")
print("=============================================================")
global_ret = 1
raw_input("Please place all boards into run mode, RESET them and then \n press enter to continue with the testing process...")
time.sleep(5.0) # wait for the board to reset
working_threads = {}
try:
for port in cmd_args.ports:
working_threads[port] = threading.Thread(target=run_initial_test, args=(port, cmd_args.board))
working_threads[port].start()
for port in cmd_args.ports:
if working_threads[port]:
working_threads[port].join()
_ports = list(cmd_args.ports)
for port in _ports:
if working_threads[port] == None:
print("Error testing board on port %s" % port)
cmd_args.ports.remove(port)
ret = 1
elif cmd_args.board == 'WiPy':
print("Batch test OK on port %s, firmware version %s" % (port, fw_version.number))
with open('%s_Flasher_Results.csv' % (cmd_args.board), 'ab') as csv_file:
csv_writer = csv.writer(csv_file, delimiter=',')
csv_writer.writerow(['%s' % (cmd_args.board), '%s' % (fw_version.number), ' ', 'OK'])
except Exception as e:
ret = 1
print_exception(e)
if ret == 0:
print("=============================================================")
print("Batch testing succeeded :-)")
print("=============================================================")
else:
print("=============================================================")
print("ERROR: Batch testing failed in some boards!")
print("=============================================================")
global_ret = 1
# only do the MAC programming and MAC verificacion for the LoPy, SiPy and LoPy4
if cmd_args.board == 'LoPy' or cmd_args.board == 'SiPy' or cmd_args.board == 'LoPy4':
print("Waiting before programming the LPWAN MAC address...")
time.sleep(3.5) # wait for the board to reset
working_threads = {}
try:
for port in cmd_args.ports:
set_mac_status(mac_per_port[port], "", DB_MAC_LOCK) # mark them as locked, so if the script fails and doesn't get to save, they wont be accidentally reused
working_threads[port] = threading.Thread(target=flash_lpwan_mac, args=(port, mac_per_port[port]))
working_threads[port].start()
for port in cmd_args.ports:
if working_threads[port]:
working_threads[port].join()
_ports = list(cmd_args.ports)
for port in _ports:
if working_threads[port] == None:
print("Error programing MAC address on port %s" % port)
cmd_args.ports.remove(port)
ret = 1
set_mac_status(mac_per_port[port], wmacs[port], DB_MAC_ERROR)
except Exception as e:
ret = 1
print_exception(e)
if ret == 0:
print("=============================================================")
print("Batch MAC programming succeeded :-)")
print("=============================================================")
else:
print("=============================================================")
print("ERROR: Batch MAC programming failed in some boards!")
print("=============================================================")
global_ret = 1
print("Waiting for the board(s) to reboot...")
time.sleep(4.5) # wait for the board to reset
working_threads = {}
try:
for port in cmd_args.ports:
working_threads[port] = threading.Thread(target=run_final_test, args=(port, cmd_args.board, mac_per_port[port]))
working_threads[port].start()
for port in cmd_args.ports:
if working_threads[port]:
working_threads[port].join()
for port in cmd_args.ports:
if working_threads[port] == None:
ret = 1
set_mac_status(mac_per_port[port], wmacs[port], DB_MAC_ERROR)
print("Error performing MAC address test on port %s" % port)
else:
set_mac_status(mac_per_port[port], wmacs[port], DB_MAC_OK)
print("Final test OK on port %s, firmware version %s, MAC address %s" % (port, fw_version.number, mac_per_port[port]))
with open('%s_Flasher_Results.csv' % (cmd_args.board), 'ab') as csv_file:
csv_writer = csv.writer(csv_file, delimiter=',')
csv_writer.writerow(['%s' % (cmd_args.board), '%s' % (fw_version.number), '%s' % (mac_per_port[port]), 'OK'])
except Exception as e:
ret = 1
print_exception(e)
if ret == 0:
print("=============================================================")
print("Final test succeeded on all boards :-)")
print("=============================================================")
else:
print("=============================================================")
print("ERROR: Some boards failed the final test!")
print("=============================================================")
global_ret = 1
macs_db.close()
sys.exit(global_ret)
if __name__ == "__main__":
main()
| 42.116323
| 286
| 0.513587
| 2,506
| 22,448
| 4.442538
| 0.135674
| 0.082997
| 0.084074
| 0.039522
| 0.659481
| 0.625438
| 0.589149
| 0.552052
| 0.521513
| 0.51262
| 0
| 0.012657
| 0.30662
| 22,448
| 532
| 287
| 42.195489
| 0.702602
| 0.07239
| 0
| 0.608076
| 0
| 0.004751
| 0.252345
| 0.097041
| 0
| 0
| 0.002357
| 0
| 0
| 1
| 0.030879
| false
| 0
| 0.054632
| 0.002375
| 0.087886
| 0.180523
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bccff8756b8fd9c49c849a5ee7e86c1a5271fe95
| 2,315
|
py
|
Python
|
hknweb/events/tests/models/utils.py
|
jyxzhang/hknweb
|
a01ffd8587859bf63c46213be6a0c8b87164a5c2
|
[
"MIT"
] | null | null | null |
hknweb/events/tests/models/utils.py
|
jyxzhang/hknweb
|
a01ffd8587859bf63c46213be6a0c8b87164a5c2
|
[
"MIT"
] | null | null | null |
hknweb/events/tests/models/utils.py
|
jyxzhang/hknweb
|
a01ffd8587859bf63c46213be6a0c8b87164a5c2
|
[
"MIT"
] | null | null | null |
import datetime
from django.utils import timezone
from django.contrib.auth.models import User
from hknweb.events.models import Event, EventType, Rsvp
class ModelFactory:
@staticmethod
def create_user(**kwargs):
default_kwargs = {
"username": "default username",
}
kwargs = {**default_kwargs, **kwargs}
return User.objects.create(**kwargs)
@staticmethod
def create_event_type(**kwargs):
default_kwargs = {
"type": "default event type",
}
kwargs = {**default_kwargs, **kwargs}
return EventType.objects.create(**kwargs)
@staticmethod
def create_event(name, event_type, created_by, **kwargs):
required_kwargs = {
"name": name,
"event_type": event_type,
"created_by": created_by,
}
default_kwargs = {
"start_time": timezone.now(),
"end_time": timezone.now() + datetime.timedelta(hours=2),
"location": "default location",
"description": "default description",
}
kwargs = {**required_kwargs, **default_kwargs, **kwargs}
return Event.objects.create(**kwargs)
@staticmethod
def create_rsvp(user, event, **kwargs):
required_kwargs = {
"user": user,
"event": event,
}
kwargs = {**required_kwargs, **kwargs}
return Rsvp.objects.create(**kwargs)
@staticmethod
def create_event_with_rsvps():
event_create_user = ModelFactory.create_user(username="event create user")
num_rsvps = 3
rsvp_users = [
ModelFactory.create_user(username="rsvp_user_{}".format(str(i)))
for i in range(1, 1 + num_rsvps)
]
event_type = ModelFactory.create_event_type()
event_name = "custom event name"
event = ModelFactory.create_event(
name=event_name,
event_type=event_type,
created_by=event_create_user,
rsvp_limit=num_rsvps - 1,
)
rsvps = [ModelFactory.create_rsvp(rsvp_user, event) for rsvp_user in rsvp_users]
return (
event_create_user,
rsvp_users,
event_type,
event_name,
event,
rsvps,
)
| 28.9375
| 88
| 0.581425
| 234
| 2,315
| 5.508547
| 0.226496
| 0.069822
| 0.081458
| 0.096199
| 0.280838
| 0.183863
| 0.152832
| 0
| 0
| 0
| 0
| 0.003149
| 0.314039
| 2,315
| 79
| 89
| 29.303797
| 0.808564
| 0
| 0
| 0.179104
| 0
| 0
| 0.085097
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074627
| false
| 0
| 0.059701
| 0
| 0.223881
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bcd22bd32e41749d160e83a36693fbb03e02a7c0
| 2,232
|
py
|
Python
|
algorithms/329. Longest Increasing Path in a Matrix.py
|
woozway/py3-leetcode
|
e51a9ce7a6bb3e35c0fcb8c8f4f6cd5763708dbf
|
[
"MIT"
] | 1
|
2020-12-02T13:54:30.000Z
|
2020-12-02T13:54:30.000Z
|
algorithms/329. Longest Increasing Path in a Matrix.py
|
woozway/py3-leetcode
|
e51a9ce7a6bb3e35c0fcb8c8f4f6cd5763708dbf
|
[
"MIT"
] | null | null | null |
algorithms/329. Longest Increasing Path in a Matrix.py
|
woozway/py3-leetcode
|
e51a9ce7a6bb3e35c0fcb8c8f4f6cd5763708dbf
|
[
"MIT"
] | null | null | null |
"""
1. Clarification
2. Possible solutions
- dfs + memoization
- Topological sort
3. Coding
4. Tests
"""
# T=O(m*n), S=O(m*n)
from functools import lru_cache
class Solution:
DIRS = [(-1, 0), (1, 0), (0, -1), (0, 1)]
def longestIncreasingPath(self, matrix: List[List[int]]) -> int:
if not matrix:
return 0
@lru_cache(None)
def dfs(row: int, col: int) -> int:
best = 1
for dx, dy in Solution.DIRS:
newRow, newCol = row + dx, col + dy
if 0 <= newRow < rows and 0 <= newCol < cols and matrix[newRow][newCol] > matrix[row][col]:
best = max(best, dfs(newRow, newCol) + 1)
return best
ans = 0
rows, cols = len(matrix), len(matrix[0])
for i in range(rows):
for j in range(cols):
ans = max(ans, dfs(i, j))
return ans
# T=O(m*n), S=O(m*n)
class Solution:
DIRS = [(-1, 0), (1, 0), (0, -1), (0, 1)]
def longestIncreasingPath(self, matrix: List[List[int]]) -> int:
if not matrix:
return 0
rows, cols = len(matrix), len(matrix[0])
outdegrees = [[0] * cols for _ in range(rows)]
queue = collections.deque()
for i in range(rows):
for j in range(cols):
for dx, dy in Solution.DIRS:
newRow, newCol = i + dx, j + dy
if 0 <= newRow < rows and 0 <= newCol < cols and matrix[newRow][newCol] > matrix[i][j]:
outdegrees[i][j] += 1
if outdegrees[i][j] == 0:
queue.append((i, j))
ans = 0
while queue:
ans += 1
size = len(queue)
for _ in range(size):
row, col = queue.popleft()
for dx, dy in Solution.DIRS:
newRow, newCol = row + dx, col + dy
if 0 <= newRow < rows and 0 <= newCol < cols and matrix[newRow][newCol] < matrix[row][col]:
outdegrees[newRow][newCol] -= 1
if outdegrees[newRow][newCol] == 0:
queue.append((newRow, newCol))
return ans
| 30.162162
| 111
| 0.471774
| 283
| 2,232
| 3.706714
| 0.222615
| 0.114395
| 0.011439
| 0.025739
| 0.5796
| 0.5796
| 0.5796
| 0.5796
| 0.480458
| 0.480458
| 0
| 0.029696
| 0.396505
| 2,232
| 73
| 112
| 30.575342
| 0.749072
| 0.064068
| 0
| 0.510204
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061224
| false
| 0
| 0.020408
| 0
| 0.265306
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bcd344e1483580a8d86580469eef57c0ac31bfc7
| 1,511
|
py
|
Python
|
cocos2d/tools/coding-style/tailing-spaces.py
|
NIKEA-SOFT/TestGame
|
04f13e5f1324bca9f1e47f02037ea1eddd3bcc8f
|
[
"MIT"
] | 898
|
2020-01-09T12:03:08.000Z
|
2022-03-31T07:59:46.000Z
|
cocos2d/tools/coding-style/tailing-spaces.py
|
NIKEA-SOFT/TestGame
|
04f13e5f1324bca9f1e47f02037ea1eddd3bcc8f
|
[
"MIT"
] | 172
|
2020-02-21T08:56:42.000Z
|
2021-05-12T03:18:40.000Z
|
cocos2d/tools/coding-style/tailing-spaces.py
|
NIKEA-SOFT/TestGame
|
04f13e5f1324bca9f1e47f02037ea1eddd3bcc8f
|
[
"MIT"
] | 186
|
2020-01-13T09:34:30.000Z
|
2022-03-22T04:48:48.000Z
|
#!/usr/bin/env python
#coding=utf-8
'''
Remove tailing whitespaces and ensures one and only one empty ending line.
'''
import os, re
def scan(*dirs, **kwargs):
files = []
extensions = kwargs['extensions'] if kwargs.has_key('extensions') else None
excludes = kwargs['excludes'] if kwargs.has_key('excludes') else []
for top in dirs:
for root, dirnames, filenames in os.walk(top):
dirnames = [i for i in dirnames if i in excludes]
for f in filenames:
if f in excludes:
continue
ext = os.path.splitext(f)[1].lower()
if extensions is None or ext in extensions:
files.append(os.path.join(root, f))
return files
def fixone(src):
lines = open(src, 'r').readlines()
trimed = []
for line in lines:
trimed.append(re.sub('\s+$', '', line))
while len(trimed) > 1 and not trimed[-1]:
trimed.pop()
trimed.append('')
with open(src, 'w') as f:
for line in trimed:
f.write('%s\n' % line)
def lint(root):
print('Checking tailing whitespaces in: %s' % root)
dirs = [
os.path.join(root, 'cocos'),
os.path.join(root, 'extensions'),
os.path.join(root, 'templates'),
os.path.join(root, 'tests'),
os.path.join(root, 'tools', 'simulator')
]
files = scan(*dirs, extensions=['.c', '.cpp', '.h', '.hpp', '.m', '.mm', '.java'])
for f in files:
print(f)
fixone(f)
def main():
default_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
lint(default_root)
main()
| 24.370968
| 85
| 0.608868
| 221
| 1,511
| 4.126697
| 0.40724
| 0.065789
| 0.076754
| 0.092105
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003376
| 0.215751
| 1,511
| 61
| 86
| 24.770492
| 0.766245
| 0.070814
| 0
| 0
| 0
| 0
| 0.107527
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.093023
| false
| 0
| 0.023256
| 0
| 0.139535
| 0.046512
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bcd3b0b0dedcabbec5fd0840549ab45783c9eb2d
| 4,096
|
py
|
Python
|
three.py/TestPostprocessing-8Bit.py
|
Michael-Pascale/three.py
|
9912f5f850245fb9456a25b6737e12290ae54a2d
|
[
"MIT"
] | null | null | null |
three.py/TestPostprocessing-8Bit.py
|
Michael-Pascale/three.py
|
9912f5f850245fb9456a25b6737e12290ae54a2d
|
[
"MIT"
] | null | null | null |
three.py/TestPostprocessing-8Bit.py
|
Michael-Pascale/three.py
|
9912f5f850245fb9456a25b6737e12290ae54a2d
|
[
"MIT"
] | null | null | null |
from core import *
from cameras import *
from geometry import *
from material import *
from lights import *
class TestPostprocessing2(Base):
def initialize(self):
self.setWindowTitle('Pixellation and Reduced Color Palette')
self.setWindowSize(1024,768)
self.renderer = Renderer()
self.renderer.setViewportSize(1024,768)
self.renderer.setClearColor(0.5,0.5,0.5)
self.scene = Scene()
self.camera = PerspectiveCamera()
self.camera.setAspectRatio(1024/768)
self.camera.transform.setPosition(0, 0, 6)
self.cameraControls = FirstPersonController(self.input, self.camera)
self.renderTarget = RenderTarget.RenderTarget(1024,768)
crateTexture = OpenGLUtils.initializeTexture("images/crate.jpg")
ballTexture = OpenGLUtils.initializeTexture("images/basketball.png")
self.cube = Mesh( BoxGeometry(), SurfaceLambertMaterial(objTexture=crateTexture) )
self.cube.transform.translate(1.5, 0, 0, Matrix.LOCAL)
self.scene.add(self.cube)
self.sphere = Mesh( SphereGeometry(), SurfaceLambertMaterial(objTexture=ballTexture) )
self.sphere.transform.translate(-1.5, 0, 0, Matrix.LOCAL)
self.scene.add(self.sphere)
ambientLight = AmbientLight(color=[0.1,0.1,0.2])
self.scene.add( ambientLight )
directionalLight = DirectionalLight(color=[1,1,1], position=[4,4,-2], direction=[-1,-1,-1])
self.scene.add( directionalLight )
# add postprocessing content
self.postScene = Scene()
postGeo = Geometry()
vertexPositionData = [[-1,-1],[1,-1],[1,1], [-1,-1],[1,1],[-1,1]]
postGeo.setAttribute("vec2", "vertexPosition", vertexPositionData)
postGeo.vertexCount = 6
vsCode = """
in vec2 vertexPosition;
void main()
{
gl_Position = vec4(vertexPosition, 0, 1);
}
"""
fsCode = """
uniform sampler2D image;
uniform vec2 textureSize;
// round x to the nearest 1/denominator
float roundFrac(float x, float denominator)
{
return round(x*denominator) / denominator;
}
void main()
{
// pixellate original image
int k = 8;
vec2 rounded = k * floor(gl_FragCoord.xy / k);
vec2 UV = rounded / textureSize;
vec4 color = vec4(0,0,0,0);
for (int x = 0; x < k; x++)
{
for (int y = 0; y < k; y++)
{
color += texture(image, UV + vec2(x,y)/textureSize);
}
}
color /= (k*k);
// reduce color to a smaller palette
color.r = roundFrac(color.r, 8);
color.g = roundFrac(color.g, 8);
color.b = roundFrac(color.b, 8);
// combine sepia tones with vignette
gl_FragColor = color;
}
"""
uniforms = [
["vec2", "textureSize", [1024,768]],
["sampler2D", "image", self.renderTarget.textureID] ]
postMat = Material(vsCode, fsCode, uniforms)
postMesh = Mesh(postGeo, postMat)
self.postScene.add(postMesh)
def update(self):
self.cameraControls.update()
# rotate main scene objects
self.cube.transform.rotateX(0.005, Matrix.LOCAL)
self.cube.transform.rotateY(0.008, Matrix.LOCAL)
self.sphere.transform.rotateX(0.005, Matrix.LOCAL)
self.sphere.transform.rotateY(0.008, Matrix.LOCAL)
# first, render scene into target (texture)
self.renderer.render(self.scene, self.camera, self.renderTarget)
# second, render post-processed scene to window.
# (note: camera irrelevant since projection/view matrices are not used in shader.)
self.renderer.render(self.postScene, self.camera)
# instantiate and run the program
TestPostprocessing2().run()
| 32
| 99
| 0.577148
| 430
| 4,096
| 5.490698
| 0.351163
| 0.012706
| 0.015248
| 0.015248
| 0.109276
| 0.102499
| 0.076239
| 0.04659
| 0.04659
| 0.04659
| 0
| 0.043325
| 0.306885
| 4,096
| 127
| 100
| 32.251969
| 0.788306
| 0.062012
| 0
| 0.045455
| 0
| 0
| 0.324465
| 0.016954
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022727
| false
| 0
| 0.056818
| 0
| 0.102273
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bcd61a8f67cde91f10cbb1a9264485fd9ef2e8b8
| 3,205
|
py
|
Python
|
myvenv/lib/python3.6/site-packages/nltk/test/unit/test_senna.py
|
catb0y/twitter_feeling
|
9092a26f2554bbf6b14b33d797abaffa48cda99c
|
[
"MIT"
] | 69
|
2020-03-31T06:40:17.000Z
|
2022-02-25T11:48:18.000Z
|
myvenv/lib/python3.6/site-packages/nltk/test/unit/test_senna.py
|
catb0y/twitter_feeling
|
9092a26f2554bbf6b14b33d797abaffa48cda99c
|
[
"MIT"
] | 11
|
2019-12-26T17:21:03.000Z
|
2022-03-21T22:17:07.000Z
|
myvenv/lib/python3.6/site-packages/nltk/test/unit/test_senna.py
|
catb0y/twitter_feeling
|
9092a26f2554bbf6b14b33d797abaffa48cda99c
|
[
"MIT"
] | 28
|
2020-04-15T15:24:17.000Z
|
2021-12-26T04:05:02.000Z
|
# -*- coding: utf-8 -*-
"""
Unit tests for Senna
"""
from __future__ import unicode_literals
from os import environ, path, sep
import logging
import unittest
from nltk.classify import Senna
from nltk.tag import SennaTagger, SennaChunkTagger, SennaNERTagger
# Set Senna executable path for tests if it is not specified as an environment variable
if 'SENNA' in environ:
SENNA_EXECUTABLE_PATH = path.normpath(environ['SENNA']) + sep
else:
SENNA_EXECUTABLE_PATH = '/usr/share/senna-v3.0'
senna_is_installed = path.exists(SENNA_EXECUTABLE_PATH)
@unittest.skipUnless(senna_is_installed, "Requires Senna executable")
class TestSennaPipeline(unittest.TestCase):
"""Unittest for nltk.classify.senna"""
def test_senna_pipeline(self):
"""Senna pipeline interface"""
pipeline = Senna(SENNA_EXECUTABLE_PATH, ['pos', 'chk', 'ner'])
sent = 'Dusseldorf is an international business center'.split()
result = [(token['word'], token['chk'], token['ner'], token['pos']) for token in pipeline.tag(sent)]
expected = [('Dusseldorf', 'B-NP', 'B-LOC', 'NNP'), ('is', 'B-VP',
'O', 'VBZ'), ('an', 'B-NP', 'O', 'DT'), ('international', 'I-NP',
'O', 'JJ'), ('business', 'I-NP', 'O', 'NN'), ('center', 'I-NP',
'O', 'NN')]
self.assertEqual(result, expected)
@unittest.skipUnless(senna_is_installed, "Requires Senna executable")
class TestSennaTagger(unittest.TestCase):
"""Unittest for nltk.tag.senna"""
def test_senna_tagger(self):
tagger = SennaTagger(SENNA_EXECUTABLE_PATH)
result = tagger.tag('What is the airspeed of an unladen swallow ?'.split())
expected = [('What', 'WP'), ('is', 'VBZ'), ('the', 'DT'), ('airspeed',
'NN'),('of', 'IN'), ('an', 'DT'), ('unladen', 'NN'), ('swallow',
'NN'), ('?', '.')]
self.assertEqual(result, expected)
def test_senna_chunk_tagger(self):
chktagger = SennaChunkTagger(SENNA_EXECUTABLE_PATH)
result_1 = chktagger.tag('What is the airspeed of an unladen swallow ?'.split())
expected_1 = [('What', 'B-NP'), ('is', 'B-VP'), ('the', 'B-NP'), ('airspeed',
'I-NP'), ('of', 'B-PP'), ('an', 'B-NP'), ('unladen', 'I-NP'), ('swallow',
'I-NP'), ('?', 'O')]
result_2 = list(chktagger.bio_to_chunks(result_1, chunk_type='NP'))
expected_2 = [('What', '0'), ('the airspeed', '2-3'), ('an unladen swallow',
'5-6-7')]
self.assertEqual(result_1, expected_1)
self.assertEqual(result_2, expected_2)
def test_senna_ner_tagger(self):
nertagger = SennaNERTagger(SENNA_EXECUTABLE_PATH)
result_1 = nertagger.tag('Shakespeare theatre was in London .'.split())
expected_1 = [('Shakespeare', 'B-PER'), ('theatre', 'O'), ('was', 'O'),
('in', 'O'), ('London', 'B-LOC'), ('.', 'O')]
result_2 = nertagger.tag('UN headquarters are in NY , USA .'.split())
expected_2 = [('UN', 'B-ORG'), ('headquarters', 'O'), ('are', 'O'),
('in', 'O'), ('NY', 'B-LOC'), (',', 'O'), ('USA', 'B-LOC'), ('.', 'O')]
self.assertEqual(result_1, expected_1)
self.assertEqual(result_2, expected_2)
| 42.733333
| 108
| 0.597504
| 397
| 3,205
| 4.685139
| 0.287154
| 0.080645
| 0.08172
| 0.040323
| 0.282796
| 0.188172
| 0.188172
| 0.188172
| 0.188172
| 0.121505
| 0
| 0.01018
| 0.20312
| 3,205
| 74
| 109
| 43.310811
| 0.718089
| 0.067083
| 0
| 0.153846
| 0
| 0
| 0.219825
| 0.00708
| 0
| 0
| 0
| 0
| 0.115385
| 1
| 0.076923
| false
| 0
| 0.115385
| 0
| 0.230769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bcd716fdc72869755eef1e517937f6675edfef9d
| 8,191
|
py
|
Python
|
eoxserver/services/opensearch/v11/description.py
|
kalxas/eoxserver
|
8073447d926f3833923bde7b7061e8a1658dee06
|
[
"OML"
] | 25
|
2015-08-10T19:34:34.000Z
|
2021-02-05T08:28:01.000Z
|
eoxserver/services/opensearch/v11/description.py
|
kalxas/eoxserver
|
8073447d926f3833923bde7b7061e8a1658dee06
|
[
"OML"
] | 153
|
2015-01-20T08:35:49.000Z
|
2022-03-16T11:00:56.000Z
|
eoxserver/services/opensearch/v11/description.py
|
kalxas/eoxserver
|
8073447d926f3833923bde7b7061e8a1658dee06
|
[
"OML"
] | 10
|
2015-01-23T15:48:30.000Z
|
2021-01-21T15:41:18.000Z
|
#-------------------------------------------------------------------------------
#
# Project: EOxServer <http://eoxserver.org>
# Authors: Fabian Schindler <fabian.schindler@eox.at>
#
#-------------------------------------------------------------------------------
# Copyright (C) 2015 EOX IT Services GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#-------------------------------------------------------------------------------
from itertools import chain
from lxml.builder import ElementMaker
try:
from django.core.urlresolvers import reverse
except ImportError:
from django.urls import reverse
from django.shortcuts import get_object_or_404
from eoxserver.core.config import get_eoxserver_config
from eoxserver.core.util.xmltools import (
XMLEncoder, NameSpace, NameSpaceMap
)
from eoxserver.resources.coverages import models
from eoxserver.services.opensearch.formats import get_formats
from eoxserver.services.opensearch.extensions import get_extensions
from eoxserver.services.opensearch.config import OpenSearchConfigReader
class OpenSearch11DescriptionEncoder(XMLEncoder):
content_type = "application/opensearchdescription+xml"
def __init__(self, search_extensions):
ns_os = NameSpace("http://a9.com/-/spec/opensearch/1.1/", None)
self.ns_param = ns_param = NameSpace(
"http://a9.com/-/spec/opensearch/extensions/parameters/1.0/",
"parameters"
)
ns_atom = NameSpace("http://www.w3.org/2005/Atom", "atom")
nsmap = NameSpaceMap(ns_os, ns_param, ns_atom)
for search_extension in search_extensions:
nsmap.add(search_extension.namespace)
self.OS = ElementMaker(namespace=ns_os.uri, nsmap=nsmap)
self.PARAM = ElementMaker(namespace=ns_param.uri, nsmap=nsmap)
self.ATOM = ElementMaker(namespace=ns_atom.uri, nsmap=nsmap)
self.search_extensions = search_extensions
def encode_description(self, request, collection, result_formats):
""" Encode an OpenSearch 1.1 description document.
"""
OS = self.OS
description = OS("OpenSearchDescription",
OS("ShortName",
collection.identifier if collection is not None else ""
),
OS("Description")
)
for method in ("GET", "POST"):
description.extend([
self.encode_url(
request, collection, result_format, method
)
for result_format in result_formats
])
description.extend([
OS("Contact"),
OS("Tags", "CEOS-OS-BP-V1.1/L1"),
OS("LongName"),
OS("Developer"),
OS("Attribution"),
OS("SyndicationRight", "open"),
OS("AdultContent"),
OS("Language"),
OS("InputEncoding"),
OS("OutputEncoding")
])
return description
def encode_url(self, request, collection, result_format, method):
""" Encode a single opensearch URL, either for a specific collection, or
the whole service.
"""
if collection is not None:
search_url = reverse("opensearch:collection:search",
kwargs={
"collection_id": collection.identifier,
"format_name": result_format.name
}
)
else:
search_url = reverse("opensearch:search",
kwargs={
"format_name": result_format.name
}
)
conf = OpenSearchConfigReader(get_eoxserver_config())
search_url = request.build_absolute_uri(search_url)
default_parameters = (
dict(name="q", type="searchTerms", profiles=[
]),
dict(name="count", type="count", min=0, max=conf.max_count),
dict(name="startIndex", type="startIndex", min=0),
)
parameters = list(chain(default_parameters, *[
[
dict(parameter, **{"namespace": search_extension.namespace})
for parameter in search_extension.get_schema(
collection,
models.Collection if collection is None else models.Product
)
] for search_extension in self.search_extensions
]))
query_template = "&".join(
"%s={%s%s%s%s}" % (
parameter["name"],
parameter["namespace"].prefix
if "namespace" in parameter else "",
":" if "namespace" in parameter else "",
parameter["type"],
"?" if parameter.get("optional", True) else ""
)
for parameter in parameters
)
url = self.OS("Url", *[
self.encode_parameter(parameter, parameter.get("namespace"))
for parameter in parameters
],
type=result_format.mimetype,
template="%s?%s" % (search_url, query_template)
if method == "GET" else search_url,
rel="results" if collection is not None else "collection", ** {
self.ns_param("method"): method,
self.ns_param("enctype"): "application/x-www-form-urlencoded",
"indexOffset": "0"
}
)
return url
def encode_parameter(self, parameter, namespace):
options = parameter.pop("options", [])
profiles = parameter.pop("profiles", [])
attributes = {"name": parameter["name"]}
if namespace:
attributes["value"] = "{%s:%s}" % (
namespace.prefix, parameter.pop("type")
)
else:
attributes["value"] = "{%s}" % parameter.pop("type")
if 'min' in parameter:
attributes['minInclusive'] = str(parameter['min'])
if 'max' in parameter:
attributes['maxInclusive'] = str(parameter['max'])
pattern = parameter.get("pattern")
if pattern:
attributes["pattern"] = pattern
return self.PARAM("Parameter", *[
self.PARAM("Option", value=option, label=option)
for option in options
] + [
self.ATOM("link",
rel="profile", href=profile["href"], title=profile["title"]
)
for profile in profiles
], minimum="0" if parameter.get("optional", True) else "1", maximum="1",
**attributes
)
class OpenSearch11DescriptionHandler(object):
def handle(self, request, collection_id=None):
collection = None
if collection_id:
collection = get_object_or_404(models.Collection,
identifier=collection_id
)
encoder = OpenSearch11DescriptionEncoder([
extension() for extension in get_extensions()
])
return (
encoder.serialize(
encoder.encode_description(
request, collection, [format_() for format_ in get_formats()]
)
),
encoder.content_type
)
| 37.746544
| 81
| 0.580393
| 827
| 8,191
| 5.654172
| 0.307134
| 0.016467
| 0.011976
| 0.019889
| 0.078914
| 0.037211
| 0
| 0
| 0
| 0
| 0
| 0.006554
| 0.29215
| 8,191
| 216
| 82
| 37.921296
| 0.799931
| 0.18911
| 0
| 0.093168
| 0
| 0
| 0.118031
| 0.018077
| 0
| 0
| 0
| 0
| 0
| 1
| 0.031056
| false
| 0
| 0.074534
| 0
| 0.149068
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bcda0fb17ff31d81f09ba63207547e8568fa2ae6
| 2,085
|
py
|
Python
|
lab1/text_recognizer/models/mlp.py
|
Agyey/fsdl-text-recognizer-2021-labs
|
4bd85042ab9f6decd78849bb655c197cc13ffc11
|
[
"MIT"
] | null | null | null |
lab1/text_recognizer/models/mlp.py
|
Agyey/fsdl-text-recognizer-2021-labs
|
4bd85042ab9f6decd78849bb655c197cc13ffc11
|
[
"MIT"
] | null | null | null |
lab1/text_recognizer/models/mlp.py
|
Agyey/fsdl-text-recognizer-2021-labs
|
4bd85042ab9f6decd78849bb655c197cc13ffc11
|
[
"MIT"
] | null | null | null |
from typing import Any, Dict
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
FC1_DIM = 1024
FC2_DIM = 128
class MLP(nn.Module):
"""Simple MLP suitable for recognizing single characters."""
def __init__(
self,
data_config: Dict[str, Any],
args: argparse.Namespace = None,
) -> None:
super().__init__()
self.args = vars(args) if args is not None else {}
input_dim = np.prod(data_config["input_dims"])
num_classes = len(data_config["mapping"])
self.dropout = nn.Dropout(0.5)
layers = self.args.get("layers", FC1_DIM)
self.layers = layers
if layers:
fcn = (int(FC1_DIM - x * ((FC1_DIM - FC2_DIM)//(layers-1))) for x in range(layers))
fcl = input_dim
fcv = []
for fci in fcn:
fcv.append(nn.Linear(fcl, fci))
fcl = fci
fcv.append(nn.Linear(fcl, num_classes))
self.fcv = nn.Sequential(*fcv)
else:
fc1_dim = self.args.get("fc1", FC1_DIM)
fc2_dim = self.args.get("fc2", FC2_DIM)
self.fc1 = nn.Linear(input_dim, fc1_dim)
self.fc2 = nn.Linear(fc1_dim, fc2_dim)
self.fc3 = nn.Linear(fc2_dim, num_classes)
def forward(self, x):
x = torch.flatten(x, 1)
if self.layers:
for fci in self.fcv[:-1]:
x = fci(x)
x = F.relu(x)
x = self.dropout(x)
x = self.fcv[-1](x)
else:
x = self.fc1(x)
x = F.relu(x)
x = self.dropout(x)
x = self.fc2(x)
x = F.relu(x)
x = self.dropout(x)
x = self.fc3(x)
return x
@staticmethod
def add_to_argparse(parser):
parser.add_argument("--layers", type=int, default=None, choices=range(2, 20))
parser.add_argument("--fc1", type=int, default=1024)
parser.add_argument("--fc2", type=int, default=128)
return parser
| 29.366197
| 95
| 0.533813
| 284
| 2,085
| 3.788732
| 0.285211
| 0.018587
| 0.033457
| 0.033457
| 0.139405
| 0.072491
| 0.072491
| 0.072491
| 0.072491
| 0.072491
| 0
| 0.034207
| 0.341007
| 2,085
| 70
| 96
| 29.785714
| 0.748908
| 0.025899
| 0
| 0.135593
| 0
| 0
| 0.02321
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.050847
| false
| 0
| 0.101695
| 0
| 0.20339
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bcda1861cc6349c05142c05367f155b32d44ad1c
| 979
|
py
|
Python
|
frontend/widgets/button.py
|
AzoeDesarrollos/PyMavisDatabase
|
bfcd0557f63a4d8a73f0f8e891c47b47a1de1b45
|
[
"MIT"
] | null | null | null |
frontend/widgets/button.py
|
AzoeDesarrollos/PyMavisDatabase
|
bfcd0557f63a4d8a73f0f8e891c47b47a1de1b45
|
[
"MIT"
] | 2
|
2019-10-05T14:20:11.000Z
|
2019-10-05T14:22:31.000Z
|
frontend/widgets/button.py
|
AzoeDesarrollos/PyMavisDatabase
|
bfcd0557f63a4d8a73f0f8e891c47b47a1de1b45
|
[
"MIT"
] | null | null | null |
from pygame import Surface, font
from .basewidget import BaseWidget
from frontend import Renderer, WidgetHandler
class Button(BaseWidget):
action = None
def __init__(self, x, y, texto, action=None):
self.f = font.SysFont('Verdana', 16)
imagen = self.crear(texto)
rect = imagen.get_rect(topleft=(x, y))
super().__init__(imagen, rect)
Renderer.add_widget(self, 1)
WidgetHandler.add_widget(self, 1)
self.action = action
def crear(self, texto):
w, h = self.f.size(texto)
image = Surface((w + 4, h + 2))
image.fill((125, 125, 125), (1, 1, w+2, h))
render = self.f.render(texto, 1, (255, 255, 255), (125, 125, 125))
image.blit(render, (2, 1))
return image
def on_mousebuttondown(self, button):
if button == 1 and self.action is not None:
self.action()
def on_mouseover(self):
pass
def update(self):
self.dirty = 1
| 27.971429
| 74
| 0.592441
| 133
| 979
| 4.263158
| 0.406015
| 0.042328
| 0.045855
| 0.049383
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.058156
| 0.279877
| 979
| 34
| 75
| 28.794118
| 0.746099
| 0
| 0
| 0
| 0
| 0
| 0.00715
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.185185
| false
| 0.037037
| 0.111111
| 0
| 0.407407
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bcda32ab85ecef62e60d41fc5f944271b774ca47
| 709
|
py
|
Python
|
tensorflow_rnn/mnist_lstm.py
|
naoki009/samples
|
dac3bbddbd06374c39768cbe17fefd0110fe316f
|
[
"BSD-2-Clause"
] | null | null | null |
tensorflow_rnn/mnist_lstm.py
|
naoki009/samples
|
dac3bbddbd06374c39768cbe17fefd0110fe316f
|
[
"BSD-2-Clause"
] | null | null | null |
tensorflow_rnn/mnist_lstm.py
|
naoki009/samples
|
dac3bbddbd06374c39768cbe17fefd0110fe316f
|
[
"BSD-2-Clause"
] | 1
|
2020-08-14T11:44:42.000Z
|
2020-08-14T11:44:42.000Z
|
import numpy as np
import tensorflow as tf
"""
Do an MNIST classification line by line by LSTM
"""
(x_train, y_train), \
(x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train, x_test = x_train/255.0, x_test/255.0
model = tf.keras.Sequential()
model.add(tf.keras.layers.LSTM(128, input_shape=(None, 28)))
#model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.Dense(10))
model.add(tf.keras.layers.Activation("softmax"))
model.summary()
model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(),
optimizer="sgd",
metrics=["accuracy"])
model.fit(x_train, y_train, validation_data=(x_test, y_test),
batch_size=100, epochs=100)
| 27.269231
| 67
| 0.70945
| 108
| 709
| 4.509259
| 0.481481
| 0.100616
| 0.082136
| 0.123203
| 0.172485
| 0
| 0
| 0
| 0
| 0
| 0
| 0.03437
| 0.138223
| 709
| 25
| 68
| 28.36
| 0.762684
| 0.066291
| 0
| 0
| 0
| 0
| 0.029752
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.133333
| 0
| 0.133333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bcdd9f6e351b12352ead172914df612d99371de2
| 984
|
py
|
Python
|
scrap/CloudCoverUndersampling.py
|
cseale/kaggle-amazon-rainforests
|
cf42941bb3c70ba19257764b66fe33550be88e0b
|
[
"Apache-2.0"
] | null | null | null |
scrap/CloudCoverUndersampling.py
|
cseale/kaggle-amazon-rainforests
|
cf42941bb3c70ba19257764b66fe33550be88e0b
|
[
"Apache-2.0"
] | null | null | null |
scrap/CloudCoverUndersampling.py
|
cseale/kaggle-amazon-rainforests
|
cf42941bb3c70ba19257764b66fe33550be88e0b
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
# In[1]:
import numpy as np
import pandas as pd
import os
from random import shuffle
from tqdm import tqdm
DATA_DIR = '../input/amazon/'
TRAIN_TIF_DIR = DATA_DIR + 'train-tif/'
TRAIN_CSV = DATA_DIR + 'train.csv'
TEST_TIF_DIR = DATA_DIR + 'test-tif/'
IMG_SIZE = 100
LR = 1e-3
MODEL_NAME = 'amazon=-{}-{}.model'.format(LR, '2conv-basic')
CLOUD_COVER_LABELS = [
'clear',
'cloudy',
'haze',
'partly_cloudy']
# read our data and take a look at what we are dealing with
train_csv = pd.read_csv(TRAIN_CSV)
train_csv.head()
tags = pd.DataFrame()
for label in CLOUD_COVER_LABELS:
tags[label] = train_csv.tags.apply(lambda x: np.where(label in x, 1, 0))
train_csv = pd.concat([train_csv, tags], axis=1)
# In[17]:
pd.concat([train_csv[train_csv.clear == 1].sample(n=7251),
train_csv[train_csv.cloudy == 1].sample(n=7251),
train_csv[train_csv.haze == 1],
train_csv[train_csv.partly_cloudy == 1].sample(n=7251)], axis=0, ignore_index=True)
| 20.93617
| 83
| 0.690041
| 168
| 984
| 3.845238
| 0.434524
| 0.198142
| 0.102167
| 0.123839
| 0.123839
| 0.086687
| 0.086687
| 0.086687
| 0
| 0
| 0
| 0.036188
| 0.15752
| 984
| 46
| 84
| 21.391304
| 0.743064
| 0.087398
| 0
| 0
| 0
| 0
| 0.114478
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.185185
| 0
| 0.185185
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bcde4233b8d9a36e066c7f656e904c7a4e46422b
| 3,247
|
py
|
Python
|
chintai-scrape/A001_parse_htmls.py
|
GINK03/itmedia-scraping
|
5afbe06dd0aa12db1694a2b387aa2eeafb20e981
|
[
"MIT"
] | 16
|
2018-02-06T14:43:41.000Z
|
2021-01-23T05:07:33.000Z
|
chintai-scrape/A001_parse_htmls.py
|
GINK03/itmedia-scraping
|
5afbe06dd0aa12db1694a2b387aa2eeafb20e981
|
[
"MIT"
] | null | null | null |
chintai-scrape/A001_parse_htmls.py
|
GINK03/itmedia-scraping
|
5afbe06dd0aa12db1694a2b387aa2eeafb20e981
|
[
"MIT"
] | 4
|
2018-01-16T13:50:43.000Z
|
2019-12-16T19:45:54.000Z
|
import glob
import bs4
import gzip
import pickle
import re
import os
from concurrent.futures import ProcessPoolExecutor as PPE
import json
from pathlib import Path
from hashlib import sha256
import shutil
Path('json').mkdir(exist_ok=True)
def sanitize(text):
text = re.sub(r'(\t|\n|\r)', '', text)
text = re.sub(r'\xa0', '', text)
text = re.sub(r'\\r', '', text)
text = re.sub('地図で物件の周辺環境をチェック!', '', text)
return text
def is_train(x):
if '線' in x:
return False
else:
return True
def pmap(arg):
key, fns = arg
SIZE = len(fns)
for index, fn in enumerate(fns):
try:
print('now', key,index, 'size', SIZE, fn)
html = gzip.decompress(open(fn, 'rb').read())
soup = bs4.BeautifulSoup(html, 'lxml')
if soup.find('link', {'rel':'canonical'}) is None:
Path(fn).unlink()
continue
canonical = soup.find('link', {'rel':'canonical'})['href']
if '/detail/' not in canonical:
Path(fn).unlink()
continue
basic_table = soup.find('div', {'class':'detail_basicInfo'})
if basic_table is None:
Path(fn).unlink()
continue
basic_table = basic_table.find('table')
# ズレの処理
tds = list(basic_table.find_all('td'))
tds.pop(0)
#print(tds.pop(0).text)
tds = [td for td in tds if is_train(td)]
print(len(basic_table.find_all('th')), len(tds))
if len(basic_table.find_all('th')) == 13 and len(tds) == 14:
tds.pop(4)
...
basic_obj = {sanitize(th.text):sanitize(td.text) for th, td in zip(basic_table.find_all('th'),tds)}
detail_obj = {}
for table in soup.find('div', {'class':'detail_specTable'}).find_all('table'):
#print(table)
for th, td in zip(table.find_all('th'), table.find_all('td')):
detail_obj[sanitize(th.text)] = sanitize(td.text)
obj = {'basic':basic_obj, 'detail':detail_obj, 'canonical':canonical, 'title':soup.title.text}
last_fn = fn.split('/')[-1]
shutil.move(fn, f'parsed_htmls/{last_fn}' )
with open(f'json/{last_fn}', 'w') as fp:
fp.write(json.dumps(obj, indent=2, ensure_ascii=False))
except Exception as ex:
#Path(fn).unlink()
print(ex)
#detail_table = soup.find('table', {'class':'bukken_detail_table'})
#detail_obj = {re.sub(r'\t', '', th.text):re.sub(r'(\t|\n)', '', td.text) for th, td in zip(detail_table.find_all('th'), detail_table.find_all('td'))}
#print(detail_obj)
#urls = [sha256(bytes(v, 'utf8')).hexdigest() for v in json.load(fp=open('./hash_url.json')).values()]
#fns = [f'./htmls/{url}' for url in urls]
import random
files = glob.glob('./htmls/*')
random.shuffle(files)
args = {}
for index, fn in enumerate(files):
key = index%8
if args.get(key) is None:
args[key] = []
args[key].append(fn)
args = [(key,fns) for key,fns in args.items()]
#[pmap(arg) for arg in args]
with PPE(max_workers=8) as exe:
exe.map(pmap, args)
| 36.077778
| 158
| 0.55559
| 450
| 3,247
| 3.917778
| 0.297778
| 0.045944
| 0.054453
| 0.039705
| 0.263188
| 0.142938
| 0.052184
| 0
| 0
| 0
| 0
| 0.008959
| 0.278103
| 3,247
| 89
| 159
| 36.483146
| 0.743174
| 0.140437
| 0
| 0.081081
| 0
| 0
| 0.085252
| 0.007914
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040541
| false
| 0
| 0.162162
| 0
| 0.243243
| 0.040541
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bcde81a6deec0252f40277dde895c56c9a4836eb
| 5,047
|
py
|
Python
|
google-datacatalog-apache-atlas-connector/src/google/datacatalog_connectors/apache_atlas/scrape/metadata_scraper.py
|
ricardolsmendes/datacatalog-connectors-hive
|
9e71588133c0b0227e789c8d6bb26cfa031d2cfb
|
[
"Apache-2.0"
] | 19
|
2020-04-27T21:55:47.000Z
|
2022-03-22T19:45:14.000Z
|
google-datacatalog-apache-atlas-connector/src/google/datacatalog_connectors/apache_atlas/scrape/metadata_scraper.py
|
ricardolsmendes/datacatalog-connectors-hive
|
9e71588133c0b0227e789c8d6bb26cfa031d2cfb
|
[
"Apache-2.0"
] | 12
|
2020-05-28T14:48:29.000Z
|
2022-01-15T17:52:09.000Z
|
google-datacatalog-apache-atlas-connector/src/google/datacatalog_connectors/apache_atlas/scrape/metadata_scraper.py
|
mesmacosta/datacatalog-connectors-hive
|
ab7e49fbef8599dd9053c2260b261ce01f510a47
|
[
"Apache-2.0"
] | 15
|
2020-05-03T17:25:51.000Z
|
2022-01-11T22:10:35.000Z
|
#!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from google.datacatalog_connectors.apache_atlas import scrape
class MetadataScraper:
def __init__(self, connection_args):
self._apache_atlas_facade = scrape.apache_atlas_facade.\
ApacheAtlasFacade(connection_args)
self.__metadata_enricher = scrape.metadata_enricher.\
MetadataEnricher(self._apache_atlas_facade)
def get_metadata(self, **kwargs):
self._log_scrape_start('Scraping all Metadata...')
classifications_dict = {}
entity_types_dict = {}
enum_types_dict = {}
self._log_scrape_start('Scraping admin metrics...')
admin_metrics = self._apache_atlas_facade.get_admin_metrics()
logging.info(admin_metrics)
self._log_single_object_scrape_result(admin_metrics)
self._log_scrape_start('Scraping typedefs...')
for typedef in self._apache_atlas_facade.get_typedefs():
self._scrape_classification_types(classifications_dict, typedef)
self._scrape_enum_types(enum_types_dict, typedef)
self._scrape_entity_types(entity_types_dict, typedef)
self.__metadata_enricher.enrich_entity_relationships(entity_types_dict)
return {
'classifications': classifications_dict,
'enum_types': enum_types_dict,
'entity_types': entity_types_dict
}, None
def _scrape_entity_types(self, entity_types_dict, typedef):
self._log_scrape_start('Scraping EntityTypes...')
for entity_type in typedef.entityDefs:
entity_type_name = entity_type.name
entity_type_dict = {
'name': entity_type_name,
'data': entity_type._data,
'superTypes': entity_type.superTypes,
'entities': {}
}
entities = self.__scrape_entity_type(entity_type)
entity_type_dict['entities'] = entities
entity_types_dict[entity_type_name] = entity_type_dict
def _scrape_classification_types(self, classifications_dict, typedef):
self._log_scrape_start('Scraping Classifications/Templates...')
for classification_type in typedef.classificationDefs:
classification_data = classification_type._data
logging.info('Classification: %s', classification_type.name)
logging.debug(classification_data)
classifications_dict[classification_type.name] = {
'name': classification_type.name,
'guid': classification_type.guid,
'data': classification_data
}
def _scrape_enum_types(self, enum_types_dict, typedef):
self._log_scrape_start('Scraping Enum types...')
for enum_type in typedef.enumDefs:
enum_data = enum_type._data
logging.info('Enum type: %s', enum_type.name)
logging.debug(enum_data)
enum_types_dict[enum_type.name] = {
'name': enum_type.name,
'guid': enum_type.guid,
'data': enum_data
}
def __scrape_entity_type(self, entity_type):
searched_entries = {}
entity_type_name = entity_type.name
logging.info('=> Entity Type: %s', entity_type_name)
logging.debug(entity_type._data)
search_results = self._apache_atlas_facade.\
search_entities_from_entity_type(entity_type_name)
guids = []
for entity in search_results:
# Collecting guids and storing entity to enricher data later on.
guid = entity.guid
guids.append(guid)
searched_entries[guid] = {'guid': guid, 'data': entity._data}
fetched_entities_dict = {}
if guids:
fetched_entities_dict = self._apache_atlas_facade.fetch_entities(
guids)
self.__metadata_enricher.enrich_entity_classifications(
fetched_entities_dict, searched_entries)
logging.info('Entity Type: %s scrapped!', entity_type_name)
logging.info('')
return fetched_entities_dict
@classmethod
def _log_scrape_start(cls, message, *args):
logging.info('')
logging.info(message, *args)
logging.info('-------------------------------------------------')
@classmethod
def _log_single_object_scrape_result(cls, the_object):
logging.info('Found!' if the_object else 'NOT found!')
| 37.385185
| 79
| 0.656628
| 563
| 5,047
| 5.520426
| 0.248668
| 0.074003
| 0.040541
| 0.040541
| 0.211068
| 0.070463
| 0.038932
| 0.027027
| 0
| 0
| 0
| 0.002118
| 0.251437
| 5,047
| 134
| 80
| 37.664179
| 0.82054
| 0.12443
| 0
| 0.065217
| 0
| 0
| 0.089257
| 0.017488
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0
| 0.021739
| 0
| 0.141304
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bcded4531d60ca947d6fb59affac50e25540dcfc
| 7,490
|
py
|
Python
|
aviary/roost/data.py
|
sxie22/aviary
|
74b87eee86067f69af6e5b86bd12fca2202c4de5
|
[
"MIT"
] | null | null | null |
aviary/roost/data.py
|
sxie22/aviary
|
74b87eee86067f69af6e5b86bd12fca2202c4de5
|
[
"MIT"
] | null | null | null |
aviary/roost/data.py
|
sxie22/aviary
|
74b87eee86067f69af6e5b86bd12fca2202c4de5
|
[
"MIT"
] | null | null | null |
import functools
import json
from os.path import abspath, dirname, exists, join
from typing import Dict, Sequence
import numpy as np
import pandas as pd
import torch
from pymatgen.core import Composition
from torch.utils.data import Dataset
class CompositionData(Dataset):
def __init__(
self,
df: pd.DataFrame,
task_dict: Dict[str, str],
elem_emb: str = "matscholar200",
inputs: Sequence[str] = ["composition"],
identifiers: Sequence[str] = ["material_id", "composition"],
):
"""Data class for Roost models.
Args:
df (pd.DataFrame): Pandas dataframe holding input and target values.
task_dict (dict[str, "regression" | "classification"]): Map from target names to task
type.
elem_emb (str, optional): One of "matscholar200", "cgcnn92", "megnet16", "onehot112" or
path to a file with custom embeddings. Defaults to "matscholar200".
inputs (list[str], optional): df column name holding material compositions.
Defaults to ["composition"].
identifiers (list, optional): df columns for distinguishing data points. Will be
copied over into the model's output CSV. Defaults to ["material_id", "composition"].
"""
assert len(identifiers) == 2, "Two identifiers are required"
assert len(inputs) == 1, "One input column required are required"
self.inputs = inputs
self.task_dict = task_dict
self.identifiers = identifiers
self.df = df
if elem_emb in ["matscholar200", "cgcnn92", "megnet16", "onehot112"]:
elem_emb = join(
dirname(abspath(__file__)), f"../embeddings/element/{elem_emb}.json"
)
else:
assert exists(elem_emb), f"{elem_emb} does not exist!"
with open(elem_emb) as f:
self.elem_features = json.load(f)
self.elem_emb_len = len(list(self.elem_features.values())[0])
self.n_targets = []
for target, task in self.task_dict.items():
if task == "regression":
self.n_targets.append(1)
elif task == "classification":
n_classes = np.max(self.df[target].values) + 1
self.n_targets.append(n_classes)
def __len__(self):
return len(self.df)
@functools.lru_cache(maxsize=None) # Cache data for faster training
def __getitem__(self, idx):
"""[summary]
Args:
idx (int): dataset index
Raises:
AssertionError: [description]
ValueError: [description]
Returns:
atom_weights: torch.Tensor shape (M, 1)
weights of atoms in the material
atom_fea: torch.Tensor shape (M, n_fea)
features of atoms in the material
self_fea_idx: torch.Tensor shape (M*M, 1)
list of self indices
nbr_fea_idx: torch.Tensor shape (M*M, 1)
list of neighbor indices
target: torch.Tensor shape (1,)
target value for material
cry_id: torch.Tensor shape (1,)
input id for the material
"""
df_idx = self.df.iloc[idx]
composition = df_idx[self.inputs][0]
cry_ids = df_idx[self.identifiers].values
comp_dict = Composition(composition).get_el_amt_dict()
elements = list(comp_dict.keys())
weights = list(comp_dict.values())
weights = np.atleast_2d(weights).T / np.sum(weights)
try:
atom_fea = np.vstack([self.elem_features[element] for element in elements])
except AssertionError:
raise AssertionError(
f"cry-id {cry_ids[0]} [{composition}] contains element types not in embedding"
)
except ValueError:
raise ValueError(
f"cry-id {cry_ids[0]} [{composition}] composition cannot be parsed into elements"
)
nele = len(elements)
self_fea_idx = []
nbr_fea_idx = []
for i, _ in enumerate(elements):
self_fea_idx += [i] * nele
nbr_fea_idx += list(range(nele))
# convert all data to tensors
atom_weights = torch.Tensor(weights)
atom_fea = torch.Tensor(atom_fea)
self_fea_idx = torch.LongTensor(self_fea_idx)
nbr_fea_idx = torch.LongTensor(nbr_fea_idx)
targets = []
for target in self.task_dict:
if self.task_dict[target] == "regression":
targets.append(torch.Tensor([df_idx[target]]))
elif self.task_dict[target] == "classification":
targets.append(torch.LongTensor([df_idx[target]]))
return (
(atom_weights, atom_fea, self_fea_idx, nbr_fea_idx),
targets,
*cry_ids,
)
def collate_batch(dataset_list):
"""
Collate a list of data and return a batch for predicting crystal
properties.
Parameters
----------
dataset_list: list of tuples for each data point.
(atom_fea, nbr_fea, nbr_fea_idx, target)
atom_fea: torch.Tensor shape (n_i, atom_fea_len)
nbr_fea: torch.Tensor shape (n_i, M, nbr_fea_len)
self_fea_idx: torch.LongTensor shape (n_i, M)
nbr_fea_idx: torch.LongTensor shape (n_i, M)
target: torch.Tensor shape (1, )
cif_id: str or int
Returns
-------
N = sum(n_i); N0 = sum(i)
batch_atom_weights: torch.Tensor shape (N, 1)
batch_atom_fea: torch.Tensor shape (N, orig_atom_fea_len)
Atom features from atom type
batch_self_fea_idx: torch.LongTensor shape (N, M)
Indices of mapping atom to copies of itself
batch_nbr_fea_idx: torch.LongTensor shape (N, M)
Indices of M neighbors of each atom
crystal_atom_idx: list of torch.LongTensor of length N0
Mapping from the crystal idx to atom idx
target: torch.Tensor shape (N, 1)
Target value for prediction
batch_comps: list
batch_ids: list
"""
# define the lists
batch_atom_weights = []
batch_atom_fea = []
batch_self_fea_idx = []
batch_nbr_fea_idx = []
crystal_atom_idx = []
batch_targets = []
batch_cry_ids = []
cry_base_idx = 0
for i, (inputs, target, *cry_ids) in enumerate(dataset_list):
atom_weights, atom_fea, self_fea_idx, nbr_fea_idx = inputs
# number of atoms for this crystal
n_i = atom_fea.shape[0]
# batch the features together
batch_atom_weights.append(atom_weights)
batch_atom_fea.append(atom_fea)
# mappings from bonds to atoms
batch_self_fea_idx.append(self_fea_idx + cry_base_idx)
batch_nbr_fea_idx.append(nbr_fea_idx + cry_base_idx)
# mapping from atoms to crystals
crystal_atom_idx.append(torch.tensor([i] * n_i))
# batch the targets and ids
batch_targets.append(target)
batch_cry_ids.append(cry_ids)
# increment the id counter
cry_base_idx += n_i
return (
(
torch.cat(batch_atom_weights, dim=0),
torch.cat(batch_atom_fea, dim=0),
torch.cat(batch_self_fea_idx, dim=0),
torch.cat(batch_nbr_fea_idx, dim=0),
torch.cat(crystal_atom_idx),
),
tuple(torch.stack(b_target, dim=0) for b_target in zip(*batch_targets)),
*zip(*batch_cry_ids),
)
| 33.738739
| 100
| 0.607076
| 960
| 7,490
| 4.517708
| 0.214583
| 0.037353
| 0.029052
| 0.029052
| 0.198063
| 0.110906
| 0.076089
| 0.061794
| 0.048421
| 0.031358
| 0
| 0.009901
| 0.298798
| 7,490
| 221
| 101
| 33.891403
| 0.81588
| 0.338451
| 0
| 0.017699
| 0
| 0
| 0.090372
| 0.008096
| 0
| 0
| 0
| 0
| 0.044248
| 1
| 0.035398
| false
| 0
| 0.079646
| 0.00885
| 0.150442
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bce1c979a2eb7695c7ea999525e47a17d52983b8
| 68,069
|
py
|
Python
|
symblic_game/NEW_GAME.py
|
zishanqin/Symbolic-transfer
|
b553f188ad3f6c6492fcff556ac6f597e56cf43e
|
[
"MIT"
] | 3
|
2021-07-28T11:28:25.000Z
|
2021-07-28T11:56:58.000Z
|
symblic_game/NEW_GAME.py
|
zishanqin/Symbolic-transfer
|
b553f188ad3f6c6492fcff556ac6f597e56cf43e
|
[
"MIT"
] | null | null | null |
symblic_game/NEW_GAME.py
|
zishanqin/Symbolic-transfer
|
b553f188ad3f6c6492fcff556ac6f597e56cf43e
|
[
"MIT"
] | 1
|
2021-07-28T11:40:45.000Z
|
2021-07-28T11:40:45.000Z
|
'Author: Aimore Resende Riquetti Dutra'
'''email: aimorerrd@hotmail.com'''
# -------------------------------------------------------------------------------------------------- #
# This code can run 4 different models of Reinforcement Learning:
# Q-Learning (QL), DQN, SRL (DSRL), SRL+CS(DSRL_object_near) and some other variations of SRL
# The setting for each run can be set at the end of the code
# It can load and save the models in Excel form
# There are some pre-defined environments, but you can create your own
# Press G to get intermediate Graphs and P to stop
# -------------------------------------------------------------------------------------------------- #
import Class
import pprint
import random
import sys
import numpy as np
import pygame
# from pyglet import clock
import pandas as pd
import time
import json
from time import sleep
import math
import matplotlib.pyplot as plt
import os
import glob
## Comment this part if not using DQN model:
# import keras
# from keras.models import Sequential
# from keras.layers import Dense, Activation, Flatten
# from keras.models import model_from_json
# from keras.optimizers import sgd
# from keras.utils import plot_model
# import tensorflow as tf
# from keras.backend.tensorflow_backend import set_session
# config = tf.ConfigProto()
# config.gpu_options.per_process_gpu_memory_fraction = 0.3
# set_session(tf.Session(config=config))
# ------ environments ------
# region COLOR DEFINITION
explore_set = set()
explore_dict = dict()
white = (255, 255, 255)
black = (0, 0, 0)
grey = (80, 80, 80)
red = (255, 0, 0)
blue = (0, 0, 255)
green = (0, 255, 0)
yellow = (250, 250, 0)
pink = (250, 105, 180)
# endregion
# region PANDAS DEFINITION
pd.set_option('display.max_columns', None)
pd.set_option('display.large_repr', 'info')
desired_width = 180
pd.set_option('display.width', desired_width)
pd.set_option('precision', 4)
# endregion
np.random.seed(123) # For reproducibility
pygame.init() # Pygame initialialization
pp = pprint.PrettyPrinter(indent=4)
actions = ['up', 'down', 'right', 'left']
actions_dict = {'up':0, 'down':1, 'right':2, 'left':3}
p_keys = [pygame.K_w, pygame.K_a, pygame.K_s, pygame.K_d]
# clock.tick(20)
def pop(self):
'''Removes a layer instance on top of the layer stack.
'''
while self.outputs:
self.layers.pop()
if not self.layers:
self.outputs = []
self.inbound_nodes = []
self.outbound_nodes = []
else:
self.layers[-1].outbound_nodes = []
self.outputs = [self.layers[-1].output]
self.built = False
# region REWARDS
negative_reward = 5 # Negative Reward
positive_reward = 1 # Positive Reward
step_reward = 0 # Reward received by each step
# endregion
# ------ environments configuration (till line 640) ------
# region TEXT FONTS DEFINITION
smallfont = pygame.font.SysFont('comicsansms', 13)
smallfont_act = pygame.font.SysFont('arial', 13)
mediumfont_act = pygame.font.SysFont('arial', 18, bold=True)
pygame.font.init()
# endregion
# region DISPLAY FUNCTIONS
def show_Alg(alg, screen):
text = smallfont.render("Alg: " + alg, True, black)
screen.blit(text, [5 + 90 * 0, 0])
def show_Samples(sample, screen):
text = smallfont.render("Sample: " + str(sample), True, black)
screen.blit(text, [60+100*1, 0])
def show_Level(level, screen):
text = smallfont.render("Episode: " + str(level), True, black)
screen.blit(text, [50+100*2, 0])
def show_Score(score, screen):
text = smallfont.render("Score: " + str(score), True, black)
screen.blit(text, [50+100*3, 0])
def show_Steps(steps, screen):
text = smallfont.render("Steps: " + str(steps), True, black)
screen.blit(text, [50+100*4, 0])
def show_Percent(percent, screen):
text = smallfont.render("Percent: " + str(['%.2f' % elem for elem in percent]), True, black)
screen.blit(text, [5, 30 * 4])
def show_Steps_list(steps_list, screen):
text = smallfont.render("Steps_list: " + str(steps_list), True, black)
screen.blit(text, [5, 30 * 1])
def show_Act_List(act_list, screen):
text = smallfont_act.render("act_list: " + str(act_list), True, black)
screen.blit(text, [5, 30 * 2])
def show_Action(act, screen):
text = smallfont_act.render("Chosen Action: " + act, True, black)
screen.blit(text, [5, 30 * 3])
def show_Env(env, screen):
text = mediumfont_act.render("Environment: " + str(env), True, black)
screen.blit(text, [50, 30 * 5])
# endregion
# region CREATE OBJ_LIST FROM STATE AND RELATIONSHIP LIST BETWEEN AGENT AND OBJECTS
''' CREATE obj_list - FROM env '''
def create_obj_list(env):
obj_list_fun = []
tp_list = []
loc_list = []
env = env.transpose()
h_max = env.shape[0]
# print("h_max", h_max)
v_max = env.shape[1]
# print("v_max",v_max)
for h in range(1, (h_max - 1)):
for v in range(1, (v_max - 1)):
if env[h][v] != 0:
tp_list.append(env[h][v])
loc_list.append((h, v))
for i in range(len(loc_list)):
tp = tp_list[i]
loc = loc_list[i]
obj = Class.Obj(tp, loc)
obj_list_fun.append(obj)
return obj_list_fun
''' CREATE A RELATIONSHIP LIST BETWEEN AGENT AND OBJECTS - FROM obj_list '''
def relation_obj_list(obj_list, agent_pos):
rel_list = []
xA = agent_pos[0]
yA = agent_pos[1]
# print("xA", xA)
# print("yA", yA)
for obj in obj_list:
xB = obj.loc[0]
yB = obj.loc[1]
x = xA - xB
y = yA - yB
loc_dif = (x, y)
# loc_dif = (x[0], y[0])
tp = obj.tp
obj = Class.Obj(tp, loc_dif)
rel_list.append(obj)
return rel_list
# endregion
# region DRAW OBJECTS
x_zero_screen = 50
y_zero_screen = 180
size_obj = 37
def draw_objects(agent, positivo_list, negativo_list, wall_list, screen):
# Class.Grid.draw_grid(screen) # Uncomment to display a Grid
for i in positivo_list: # POSITIVO
screen.blit(i.icon, (i.pos[0] * size_obj + x_zero_screen, y_zero_screen + i.pos[1] * size_obj))
for i in negativo_list: # NEGATIVO
screen.blit(i.icon, (i.pos[0] * size_obj + x_zero_screen, y_zero_screen + i.pos[1] * size_obj))
screen.blit(agent.icon, (agent.pos[0] * size_obj + x_zero_screen, y_zero_screen + agent.pos[1] * size_obj)) # AGENT
for i in wall_list: # WALL
screen.blit(i.icon, (i.pos[0] * size_obj + x_zero_screen, y_zero_screen + i.pos[1] * size_obj))
# endregion
# region CREATE THE STATE FROM THE ENVIRONMENT
def update_state(h_max, v_max, agent, positivo_list, negativo_list, wall_list):
# state is defined in terms of symbols not pixels...
state = np.zeros((v_max, h_max)).astype(np.int16)
for i in positivo_list:
state[i.pos[1]][i.pos[0]] = 60 # SYMBOL 60 POSITIVE
for i in negativo_list:
state[i.pos[1]][i.pos[0]] = 180 # SYMBOL 180 NEGATIVE
for i in wall_list:
state[i.pos[1]][i.pos[0]] = 255 # SYMBOL 255
# state[agent.pos[1]][agent.pos[0]] = 120 # SYMBOL 60
return state
# TODO I have to check if this v_max and h_max have to be declared eveytime
# endregion
# region ENVIRONMENT CONFIGURATION
def environment_conf(s_env):
if s_env == 1:
v_max = 4
h_max = 5
x_agent = 1
y_agent = 2
m_nega = np.matrix([[0, 0, 0],
[0, 1, 0]])
m_posi = np.matrix([[0, 1, 0],
[0, 0, 0]])
elif s_env == 2:
v_max = 4
h_max = 5
x_agent = 1
y_agent = 2
m_nega = np.matrix([[0, 0, 0],
[0, 0, 1]])
m_posi = np.matrix([[0, 0, 1],
[0, 0, 0]])
elif s_env == 3:
v_max = 4
h_max = 5
x_agent = 1
y_agent = 2
m_nega = np.matrix([[1, 0, 0],
[0, 0, 0]])
m_posi = np.matrix([[0, 1, 0],
[0, 0, 0]])
elif s_env == 4:
v_max = 4
h_max = 4
x_agent = 1
y_agent = 1
m_nega = np.matrix([[0, 0],
[0, 0]])
m_posi = np.matrix([[0, 0],
[0, 1]])
elif s_env == 5:
v_max = 5
h_max = 5
x_agent = 2
y_agent = 2
m_nega = np.zeros(shape=(v_max - 2, h_max - 2))
m_posi = np.zeros(shape=(v_max - 2, h_max - 2))
while (True):
x = random.randrange(0, h_max - 2)
y = random.randrange(0, v_max - 2)
if x != x_agent-1 or y != y_agent-1:
element = (x, y)
break
m_posi[element] = 1
elif s_env == 6:
v_max = 7
h_max = 7
x_agent = 3
y_agent = 3
m_nega = np.zeros(shape=(v_max - 2, h_max - 2))
m_posi = np.zeros(shape=(v_max - 2, h_max - 2))
while (True):
x = random.randrange(0, h_max - 2)
y = random.randrange(0, v_max - 2)
if x != x_agent - 1 or y != y_agent - 1:
element = (x, y)
break
m_posi[element] = 1
elif s_env == 7:
v_max = 9
h_max = 9
x_agent = 4
y_agent = 4
m_nega = np.zeros(shape=(v_max - 2, h_max - 2))
m_posi = np.zeros(shape=(v_max - 2, h_max - 2))
while (True):
x = random.randrange(0, h_max - 2)
y = random.randrange(0, v_max - 2)
if x != x_agent - 1 or y != y_agent - 1:
element = (x, y)
break
m_posi[element] = 1
elif s_env == 8:
v_max = 5
h_max = 5
x_agent = 2
y_agent = 2
m_nega = np.matrix([[0, 0, 0],
[0, 0, 0],
[1, 0, 1]])
m_posi = np.matrix([[1, 0, 1],
[0, 0, 0],
[0, 0, 0]])
elif s_env == 9:
v_max = 5
h_max = 5
x_agent = 2
y_agent = 2
m_nega = np.matrix([[1, 0, 0],
[0, 0, 0],
[0, 0, 1]])
m_posi = np.matrix([[0, 0, 1],
[0, 0, 0],
[1, 0, 0]])
elif s_env == 10:
v_max = 9
h_max = 9
x_agent = 4
y_agent = 4
m_nega = np.matrix([[1, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 1]])
m_posi = np.matrix([[0, 0, 1, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 1, 0, 0]])
elif s_env == 11:
v_max = 9
h_max = 9
x_agent = 4
y_agent = 4
element_list = []
for n in range(14):
while(True):
x = random.randrange(0,7)
y = random.randrange(0,7)
if x != 3 and y != 3 and (x,y) not in element_list:
element = (x, y)
break
element_list.append(element)
m_nega = np.zeros(shape=(v_max-2, h_max-2))
m_posi = np.zeros(shape=(v_max-2, h_max-2))
half = len(element_list) / 2
nega_list = element_list[:int(half)]
posi_list = element_list[int(half):]
for ele in nega_list:
m_nega[ele] = 1
for ele in posi_list:
m_posi[ele] = 1
elif s_env == 12:
v_max = 3
h_max = 5
x_agent = 2
y_agent = 1
m_nega = np.matrix([1, 0, 0])
m_posi = np.matrix([0, 0, 1])
elif s_env == 13:
v_max = 3
h_max = 5
x_agent = 2
y_agent = 1
m_nega = np.matrix([0, 0, 0])
m_posi = np.matrix([1, 0, 1])
elif s_env == 14:
v_max = 3
h_max = 6
x_agent = 2
y_agent = 1
m_nega = np.matrix([1, 0, 0, 0])
m_posi = np.matrix([0, 0, 0, 1])
elif s_env == 15:
v_max = 3
h_max = 6
x_agent = 2
y_agent = 1
m_nega = np.matrix([0, 0, 0, 0])
m_posi = np.matrix([1, 0, 0, 1])
elif s_env == 16:
v_max = 3
h_max = 7
x_agent = 3
y_agent = 1
m_nega = np.matrix([1, 0, 0, 0, 0])
m_posi = np.matrix([0, 0, 0, 0, 1])
elif s_env == 17:
v_max = 3
h_max = 7
x_agent = 3
y_agent = 1
m_nega = np.matrix([0, 0, 0, 0, 0])
m_posi = np.matrix([1, 0, 0, 0, 1])
elif s_env == 18:
v_max = 3
h_max = 9
x_agent = 4
y_agent = 1
m_nega = np.matrix([1, 0, 0, 0, 0, 0, 0])
m_posi = np.matrix([0, 0, 0, 0, 0, 0, 1])
elif s_env == 19:
v_max = 3
h_max = 9
x_agent = 4
y_agent = 1
m_nega = np.matrix([0, 0, 0, 0, 0, 0, 0])
m_posi = np.matrix([1, 0, 0, 0, 0, 0, 1])
elif s_env == 20:
v_max = 5
h_max = 5
x_agent = 2
y_agent = 2
m_nega = np.matrix([[0, 0, 0],
[0, 0, 0],
[0, 0, 0]])
m_posi = np.matrix([[1, 0, 1],
[0, 0, 0],
[0, 1, 0]])
elif s_env == 21:
v_max = 5
h_max = 5
x_agent = 2
y_agent = 2
m_nega = np.matrix([[0, 1, 0],
[0, 0, 0],
[1, 0, 1]])
m_posi = np.matrix([[1, 0, 1],
[0, 0, 0],
[0, 1, 0]])
elif s_env == 22:
v_max = 5
h_max = 5
x_agent = 2
y_agent = 2
m_nega = np.matrix([[0, 0, 0],
[0, 0, 0],
[0, 0, 0]])
m_posi = np.matrix([[1, 0, 1],
[0, 0, 0],
[1, 0, 1]])
if s_env == 31:
v_max = 5
h_max = 5
x_agent = 1
y_agent = 2
m_nega = np.matrix([[0, 0, 0],
[0, 1, 0],
[0, 0, 0]])
m_posi = np.matrix([[0, 1, 0],
[0, 0, 0],
[0, 0, 0]])
elif s_env == 32:
v_max = 5
h_max = 5
x_agent = 1
y_agent = 2
m_nega = np.matrix([[0, 0, 0],
[0, 0, 1],
[0, 0, 0]])
m_posi = np.matrix([[0, 0, 1],
[0, 0, 0],
[0, 0, 0]])
elif s_env == 33:
v_max = 5
h_max = 5
x_agent = 1
y_agent = 2
m_nega = np.matrix([[1, 0, 0],
[0, 0, 0],
[0, 0, 0]])
m_posi = np.matrix([[0, 1, 0],
[0, 0, 0],
[0, 0, 0]])
else:
pass
"INSTANCE THE wall_list"
wall_list = []
for y in range(v_max):
for x in range(h_max):
if y == v_max - 1 or y == 0 or x == h_max - 1 or x == 0:
wall = Class.Wall('wall', x, y)
wall_list.append(wall)
"INSTANCE THE AGENT"
agent = Class.Agent('agent', x_agent, y_agent)
"INSTANCE POSITIVE OBJECTS"
positivo_list = []
for x in range(m_posi.shape[0]):
for y in range(m_posi.shape[1]):
if m_posi[x, y] == 1:
positivo = Class.Positivo('positivo', y + 1, x + 1)
positivo_list.append(positivo)
"INSTANCE NEGATIVE OBJECTS"
negativo_list = []
for x in range(m_nega.shape[0]):
for y in range(m_nega.shape[1]):
if m_nega[x, y] == 1:
negativo = Class.Negativo('negativo', y + 1, x + 1)
negativo_list.append(negativo)
return negativo_list, positivo_list, agent, wall_list, h_max, v_max
# endregion
# region SAVE - LOAD - CREATE
def save_model(model, path):
model.save_weights(path + ".h5", overwrite=True)
with open(path + ".json", "w") as outfile:
json.dump(model.to_json(), outfile)
def load_model(s_alg, path):
optimizer_config = []
print(path)
if s_alg == "QL":
path = path + ".xlsx"
model = pd.read_excel(path, sheetname="model")
elif s_alg == "DSRL":
path = path + ".xlsx"
model = pd.read_excel(path, sheetname="model", header=[0], index_col=[0,1])
elif s_alg == "DSRL_dist":
path = path + ".xlsx"
model = pd.read_excel(path, sheetname="model", header=[0], index_col=[0,1])
elif s_alg == "DSRL_dist_type":
path = path + ".xlsx"
model = pd.read_excel(path, sheetname="model", header=[0], index_col=[0,1])
elif s_alg == "DSRL_dist_type_near":
path = path + ".xlsx"
model = pd.read_excel(path, sheetname="model", header=[0], index_col=[0,1])
elif s_alg == "DSRL_dist_type_near_propNeg":
path = path + ".xlsx"
model = pd.read_excel(path, sheetname="model", header=[0], index_col=[0,1])
elif s_alg == "DSRL_object_near":
path = path + ".xlsx"
model = pd.read_excel(path, sheetname="model", header=[0], index_col=[0,1])
elif s_alg == "DSRL_object":
path = path + ".xlsx"
model = pd.read_excel(path, sheetname="model", header=[0], index_col=[0, 1])
elif s_alg == "DQN":
with open(path + ".json", "r") as jfile:
model = model_from_json(json.load(jfile))
model.load_weights(path + ".h5")
conf = pd.read_excel(path + ".xlsx", sheetname="Run_Conf", header=[0])
# net_conf = conf.loc[[16:20],:]
# print("net_conf", net_conf)
optimizer = conf.loc[19, "A"]
print("op_conf ", optimizer)
# pd.Series({'N_actions': net_conf["N_actions"]}),
# pd.Series({'Max_memory': net_conf["Max_memory"]}),
# pd.Series({'Hidden_size': net_conf["Hidden_size"]}),
# pd.Series({'Batch_size': net_conf["Batch_size"]}),
# pd.Series({'Optimizer': net_conf["Optimizer"]}),
# pd.Series({'lr': op_conf[0]}),
# pd.Series({'beta_1': op_conf[1]}),
# pd.Series({'beta_2': op_conf[2]}),
# pd.Series({'epsilon': op_conf[3]}),
# pd.Series({'decay': op_conf[4]}),
# pd.Series({'rho': op_conf[5]})
use_optimizer, optimizer_config = define_optimizer(optimizer)
model.compile(loss='mse', optimizer=use_optimizer)
model.summary()
# pass
return model, optimizer_config
def create_model(s_alg, state_shape, net_conf):
optimizer_config = []
if s_alg == "QL":
model = pd.DataFrame()
model.index.name = ["States", "Action"]
elif s_alg == "DSRL" or s_alg == "DSRL_dist" or s_alg == "DSRL_dist_type" or s_alg == "DSRL_dist_type_near" or s_alg == "DSRL_dist_type_near_propNeg" or s_alg == "DSRL_object_near" or s_alg == "DSRL_object":
m_index = pd.MultiIndex(levels=[[''], [""]],
labels=[[], []],
names=['state', 'actions'])
model = pd.DataFrame(index=m_index)
elif s_alg == "DQN":
model = Sequential()
pop(model)
model = Sequential()
model.add(Dense(net_conf["Hidden_size"],
input_dim=state_shape[0]*state_shape[1],
activation="relu",
name="DENSE_1"))
model.add(Dense(net_conf["Hidden_size"],
activation='relu',
name="DENSE_2"))
model.add(Dense(net_conf["N_actions"],
name="DENSE_3"))
use_optimizer, optimizer_config = define_optimizer(net_conf["Optimizer"])
model.compile(loss='mse', optimizer=use_optimizer)
print(model.summary())
# plot_model(model, to_file='model.png')
# d3v.d3viz(model.get_output(), 'test.html')
return model, optimizer_config
# endregion
# ------ RL algorithms (till line 1030) ------
# region DQN - CONFIGURATIONS
class ExperienceReplay(object):
"""
During gameplay all the experiences < s, a, r, s’ > are stored in a replay memory.
In training, batches of randomly drawn experiences are used to generate the input and target for training.
"""
def __init__(self, max_memory=100, discount=.9):
"""
Setup
max_memory: the maximum number of experiences we want to store
memory: a list of experiences
discount: the discount factor for future experience
In the memory the information whether the game ended at the state is stored seperately in a nested array
[...
[experience, game_over]
[experience, game_over]
...]
"""
self.max_memory = max_memory
self.memory = list()
self.discount = discount
def remember(self, states, game_over):
# Save a state to memory
self.memory.append([states, game_over])
# We don't want to store infinite memories, so if we have too many, we just delete the oldest one
if len(self.memory) > self.max_memory:
del self.memory[0]
# print(">>> states:", states)
def get_batch(self, model, batch_size=10):
# How many experiences do we have?
len_memory = len(self.memory)
# Calculate the number of actions that can possibly be taken in the game
num_actions = model.output_shape[-1]
# Dimensions of the game field
env_dim = self.memory[0][0][0].shape[1]
# We want to return an input and target vector with inputs from an observed state...
inputs = np.zeros((min(len_memory, batch_size), env_dim))
# ...and the target r + gamma * max Q(s’,a’)
# Note that our target is a matrix, with possible fields not only for the action taken but also for the other possible actions.
# The actions not take the same value as the prediction to not affect them
targets = np.zeros((inputs.shape[0], num_actions))
# We draw states to learn from randomly
for i, idx in enumerate(np.random.randint(0, len_memory, size=inputs.shape[0])):
"""
Here we load one transition <s, a, r, s’> from memory
state_t: initial state s
action_t: action taken a
reward_t: reward earned r
state_tp1: the state that followed s’
"""
state_t, action_t, reward_t, state_tp1 = self.memory[idx][0]
# We also need to know whether the game ended at this state
game_over = self.memory[idx][1]
inputs[i:i + 1] = state_t
# First we fill the target values with the predictions of the model.
# They will not be affected by training (since the training loss for them is 0)
targets[i] = model.predict(state_t)[0]
# print("targets\n", targets)
# print("action_t", action_t)
"""
If the game ended, the expected reward Q(s,a) should be the final reward r.
Otherwise the target value is r + gamma * max Q(s’,a’)
"""
# Here Q_sa is max_a'Q(s', a')
Q_sa = np.max(model.predict(state_tp1)[0])
# if the game ended, the reward is the final reward
if game_over: # if game_over is True
targets[i, action_t] = reward_t
else:
# r + gamma * max Q(s’,a’)
targets[i, action_t] = reward_t + self.discount * Q_sa
return inputs, targets
def define_optimizer(s_optimizer):
lr = 0
beta_1 = 0
beta_2 = 0
epsilon = 0
decay = 0
rho = 0
if s_optimizer == "adam":
lr = 0.001 # 0.001
beta_1 = 0.9 # 0.9
beta_2 = 0.999 # 0.999
epsilon = 1e-08 # 1e-08
decay = 0.0 # 0.0
optimizer_selected = keras.optimizers.Adam(lr=lr, beta_1=beta_1, beta_2=beta_2, epsilon=epsilon, decay=decay)
elif s_optimizer == "rms_opt":
lr = 0.001 # 0.001
rho = 0.9 # 0.9
epsilon = 1e-08 # e-08
decay = 0.0 # 0.0
optimizer_selected = keras.optimizers.RMSprop(lr=lr, rho=rho, epsilon=epsilon, decay=decay)
optimizer_config = [lr, beta_1, beta_2, epsilon, decay, rho]
return optimizer_selected, optimizer_config
#
def choose_action(s_alg, state, agent_pos, model, s_prob,step):
# print("\nPREVIOUS MODEL - CHOOSE ACTION\n", model)
zero = False
if s_alg == "QL":
state[agent_pos[1]][agent_pos[0]] = 120
s = str(state)
if s not in model.index:
indices = [np.array([s, s, s, s]), np.array(['up', 'down', 'right', 'left'])]
df_zero = pd.DataFrame(np.zeros([4, 1]), index=indices)
model = model.append(df_zero)
model = model.fillna(0)
n_action = np.argmax(model.loc[s][0]) # Choose the max argument
if max(model.loc[s][0]) == 0: zero = True
elif s_alg == "DSRL" or s_alg == "DSRL_dist" or s_alg == "DSRL_dist_type" or s_alg == "DSRL_dist_type_near" or s_alg == "DSRL_dist_type_near_propNeg" or s_alg == "DSRL_object_near" or s_alg == "DSRL_object":
a_v_list = []
d = {}
obj_list = create_obj_list(state)
rel_list = relation_obj_list(obj_list, agent_pos)
new_state = rel_list
for obj in new_state: # FOR ALL OBJECTS SEEN
tp_n_c = str(obj.tp) # GET THE TYPE FROM THE NEW STATE
s_n_c = str(obj.loc) # GET THE LOCATION FROM THE NEW STATE
if tp_n_c not in model.columns:
# print("tp_n_c not in model.columns", tp_n_c)
model[tp_n_c] = 0
if s_n_c not in model.index:
# print("s_n_c not in model.index", s_n_c)
m_index = pd.MultiIndex(levels=[[s_n_c], actions],
labels=[[0, 0, 0, 0], [0, 1, 2, 3]],
names=['state', 'actions'])
df_zero = pd.DataFrame(index=m_index)
model = model.append(df_zero)
model = model.fillna(0)
Qts_a = model[tp_n_c].loc[s_n_c]
# print("Qts_a - ", Qts_a)
global explore_dict
if s_alg == "DSRL_dist_type_near" or s_alg == "DSRL_dist_type_near_propNeg" or s_alg == "DSRL_object_near": # Calculate the distance
s_n_c_abs = [int(s) for s in s_n_c if s.isdigit()] # s_n_c_abs = state_new_absolute_distance
distance = np.sqrt(s_n_c_abs[0]**2 + s_n_c_abs[1]**2)
# print("distance",distance)
Qts_a = Qts_a.divide(distance*distance, axis=0)
a_v = []
for action, value in Qts_a.items():
pos_x = agent_pos[0]
pos_y = agent_pos[1]
if action == 'up':
pos_y-=1
elif action =="down":
pos_y+=1
elif action =="right":
pos_x +=1
else:
pos_x -=1
if (pos_x, pos_y) in explore_dict:
a_v.append((action, value-0.1*explore_dict[(pos_x, pos_y)]))
else:
a_v.append((action, value))
# a_v = [(value, key) for value, key in Qts_a.items()]
# print("Qts_a - NEW", Qts_a)
a_v_list.append(a_v) # Append Q-value
# print(a_v_list)
# Sum the values of all Qs into a single Q
for element in a_v_list:
for a in element:
act = a[0] # Action
val = a[1] # Value
d[act] = d.get(act, 0) + val # Sum values for each Q
# print('a_v_list: (List of the action values for each object in the scene): ')
# print('{0}'.format(a_v_list))
# print('\nd: (The sum of all object`s action values )')
# pp.pprint(d)
if d != {}: # BE CAREFUL THIS IS A DICT (argmax does not work as usual)
inverse = [(value, key) for key, value in d.items()] # CALCULATE ALL KEYS
n_action = max(inverse)[1] # Choose the max argument
if max(d.values()) == 0: zero = True
else:
# n_action = "down"
n_action = random.choice(actions)
elif s_alg == "DQN":
state[agent_pos[1]][agent_pos[0]] = 120
state = state.reshape((1, -1))
q = model.predict(state)
n_act = np.argmax(q[0])
n_action = actions[n_act]
if max(q[0]) == 0: zero = True
x = random.random() # E greedy exploration
# if x < s_prob:
if step < 5 or x < s_prob:
n_action = random.choice(actions)
print_action = 'Random Act (Prob):'
elif zero == True:
# n_action = random.choice(actions)
print_action = 'NOT Random Act (Zero):'
pass
else:
print_action = 'Chosen Act:'
# print("\nNEW MODEL - CHOOSE ACTION\n", model)
# explore_set.add(tuple(agent_pos))
return n_action, model, print_action
alfa = 1 # Learning Rate
gamma = 0.9 # Temporal Discount Factor
def learn(s_alg, model, state_t, state_t1, agent_t_pos, agent_t1_pos, reward, action_t, end_game, net_conf, exp_replay):
# print("\nPREVIOUS MODEL - LEARN\n", model)
batch_loss = 0
if s_alg == "QL":
state_t[agent_t_pos[1]][agent_t_pos[0]] = 120
state_t1[agent_t1_pos[1]][agent_t1_pos[0]] = 120
s_t = str(state_t)
s_t1 = str(state_t1)
if s_t1 not in model.index:
indices = [np.array([s_t1, s_t1, s_t1, s_t1]), np.array(['up', 'down', 'right', 'left'])]
df_zero = pd.DataFrame(np.zeros([4, 1]), index=indices)
model = model.append(df_zero)
if s_t not in model.index:
indices = [np.array([s_t, s_t, s_t, s_t]), np.array(['up', 'down', 'right', 'left'])]
df_zero = pd.DataFrame(np.zeros([4, 1]), index=indices)
model = model.append(df_zero)
model = model.fillna(0)
if end_game == False:
max_value = max(model.loc[s_t1][0]) # max(df.loc[new_state][0])
Q_value = model.loc[s_t, action_t][0]
updated_model = Q_value + alfa * (reward + (gamma * (max_value)) - Q_value)
else:
updated_model = reward
model.loc[s_t, action_t] = updated_model
elif s_alg == "DSRL" or s_alg == "DSRL_dist" or s_alg == "DSRL_dist_type" or s_alg == "DSRL_dist_type_near" or s_alg == "DSRL_dist_type_near_propNeg" or s_alg == "DSRL_object_near" or s_alg == "DSRL_object":
max_value = 0
obj_list = create_obj_list(state_t)
rel_list = relation_obj_list(obj_list, agent_t_pos)
old_state = rel_list
obj_list = create_obj_list(state_t1)
rel_list = relation_obj_list(obj_list, agent_t1_pos)
new_state = rel_list
for i in range(len(old_state)):
# Check all items in old state
obj_prev = old_state[i]
tp_prev = str(obj_prev.tp)
s_prev = str(obj_prev.loc)
# Check all items in new state
obj_new = new_state[i]
tp_new = str(obj_new.tp)
s_new = str(obj_new.loc)
if tp_new not in model.columns: # If type is new, then add type
model[tp_new] = 0
if s_new not in model.index: # If state is new, then add state
m_index = pd.MultiIndex(levels=[[s_new], actions],
labels=[[0, 0, 0, 0], [0, 1, 2, 3]],
names=['state', 'actions'])
df_zero = pd.DataFrame(index=m_index)
model = model.append(df_zero)
model = model.fillna(0)
max_value = max(model[tp_new].loc[s_new])
if s_alg == "DSRL": # THEY STILL HAVE THE PROBLEM OF NOT PROPAGATING THE NEGATIVE SIGNAL
if end_game == False:
Q_v = model[tp_prev].loc[s_prev, action_t]
model[tp_prev].loc[s_prev, action_t] = Q_v + alfa * (reward + (gamma * max_value) - Q_v)
else:
model[tp_prev].loc[s_prev, action_t] = reward
elif s_alg == "DSRL_dist": # THEY STILL HAVE THE PROBLEM OF NOT PROPAGATING THE NEGATIVE SIGNAL
if reward != 0:
s_p_c = [int(s) for s in s_prev if s.isdigit()]
if s_p_c[0] < 2 and s_p_c[1] < 2:
# EDITIONG DELETE
if end_game == False:
Q_v = model[tp_prev].loc[s_prev, action_t]
model[tp_prev].loc[s_prev, action_t] = Q_v + alfa * (reward + (gamma * max_value) - Q_v)
else:
model[tp_prev].loc[s_prev, action_t] = reward
else:
if end_game == False:
Q_v = model[tp_prev].loc[s_prev, action_t]
model[tp_prev].loc[s_prev, action_t] = Q_v + alfa * (reward + (gamma * max_value) - Q_v)
else:
model[tp_prev].loc[s_prev, action_t] = reward
elif s_alg == "DSRL_dist_type" or s_alg == "DSRL_dist_type_near": # THEY STILL HAVE THE PROBLEM OF NOT PROPAGATING THE NEGATIVE SIGNAL
max_value_positive = max(model[tp_new].loc[s_new])
if reward != 0:
s_p_c = [int(s) for s in s_prev if s.isdigit()] # s_p_c = state_previous_absolute_distance
if s_p_c[0] < 2 and s_p_c[1] < 2: # IF IT IS CLOSE BY, THEN UPDATE ONLY THE CLOSE ONE:
if reward < 0 and tp_new == "180": # IF REWARD IS NEGATIVE and NEW OBJECT IS NEGATIVE UPDATE ONLY NEGATIVE TYPE:
if end_game == False:
Q_v = model[tp_prev].loc[s_prev, action_t]
model[tp_prev].loc[s_prev, action_t] = Q_v + alfa * (reward + (gamma * max_value_positive) - Q_v)
else:
model[tp_prev].loc[s_prev, action_t] = reward
elif reward > 0 and tp_new == "60": # IF REWARD IS POSITIVE and NEW OBJECT IS POSITIVE UPDATE ONLY POSITIVE TYPE:
if end_game == False:
Q_v = model[tp_prev].loc[s_prev, action_t]
model[tp_prev].loc[s_prev, action_t] = Q_v + alfa * (reward + (gamma * max_value_positive) - Q_v)
else:
model[tp_prev].loc[s_prev, action_t] = reward
# IF reward is zero
else:
if end_game == False:
Q_v = model[tp_prev].loc[s_prev, action_t]
if tp_prev == "180": # IF THE PREVIOUS OBJECT WAS NEGATIVE
model[tp_prev].loc[s_prev, action_t] = Q_v + alfa * (reward + (gamma * max_value_positive) - Q_v)
elif tp_prev == "60": # IF THE PREVIOUS OBJECT WAS POSITIVE
model[tp_prev].loc[s_prev, action_t] = Q_v + alfa * (reward + (gamma * max_value_positive) - Q_v)
else:
model[tp_prev].loc[s_prev, action_t] = reward
elif s_alg == "DSRL_dist_type_near_propNeg": # I try to solve this with max and min, but it did not work very well(THEY STILL HAVE THE PROBLEM OF NOT PROPAGATING THE NEGATIVE SIGNAL)
max_value_positive = max(model[tp_new].loc[s_new])
min_value_negative = min(model[tp_new].loc[s_new])
if reward != 0:
s_p_c = [int(s) for s in s_prev if s.isdigit()] # s_p_c = state_previous_absolute_distance
if s_p_c[0] < 2 and s_p_c[1] < 2: # IF IT IS CLOSE BY, THEN UPDATE ONLY THE CLOSE ONE:
if reward < 0 and tp_new == "180": # IF REWARD IS NEGATIVE and NEW OBJECT IS NEGATIVE UPDATE ONLY NEGATIVE TYPE:
if end_game == False:
Q_v = model[tp_prev].loc[s_prev, action_t]
model[tp_prev].loc[s_prev, action_t] = Q_v + alfa * (reward + (gamma * min_value_negative) - Q_v)
else:
model[tp_prev].loc[s_prev, action_t] = reward
elif reward > 0 and tp_new == "60": # IF REWARD IS POSITIVE and NEW OBJECT IS POSITIVE UPDATE ONLY POSITIVE TYPE:
if end_game == False:
Q_v = model[tp_prev].loc[s_prev, action_t]
model[tp_prev].loc[s_prev, action_t] = Q_v + alfa * (reward + (gamma * max_value_positive) - Q_v)
else:
model[tp_prev].loc[s_prev, action_t] = reward
# IF reward is zero
else:
if end_game == False:
Q_v = model[tp_prev].loc[s_prev, action_t]
if tp_prev == "180": # IF THE PREVIOUS OBJECT WAS NEGATIVE
model[tp_prev].loc[s_prev, action_t] = Q_v + alfa * (reward + (gamma * min_value_negative) - Q_v)
elif tp_prev == "60": # IF THE PREVIOUS OBJECT WAS POSITIVE
model[tp_prev].loc[s_prev, action_t] = Q_v + alfa * (reward + (gamma * max_value_positive) - Q_v)
else:
model[tp_prev].loc[s_prev, action_t] = reward
elif s_alg == "DSRL_object_near" or s_alg == "DSRL_object":
max_value_positive = max(model[tp_new].loc[s_new])
# Find the object that the agent interacted with:
# This means that the agents has to know that the object which interacted with
# After finding it, he has to assign the value to that object.
# This means that I have to find the type and the state of this object that has now x=zero y=zero
# print("obj_new.loc[0]\n", obj_new.loc[0])
# print("obj_new.loc[1]\n", obj_new.loc[1])
# print("action_t\n", action_t)
# print("s_prev\n", s_prev)
if obj_new.loc[0] == 0 and obj_new.loc[1] == 0:
tp_to_update = tp_new
# print("tp_new\n", tp_new)
if action_t == "up":
s_prev_to_update = str((0,1))
elif action_t == "down":
s_prev_to_update = str((0,-1))
elif action_t == "right":
s_prev_to_update = str((-1,0))
elif action_t == "left":
s_prev_to_update = str((1,0))
# print("s_prev_to_update\n", s_prev_to_update)
if end_game == False:
Q_v = model[tp_to_update].loc[s_prev_to_update, action_t]
model[tp_to_update].loc[s_prev_to_update, action_t] = Q_v + alfa * (reward + (gamma * max_value_positive) - Q_v)
else:
model[tp_to_update].loc[s_prev_to_update, action_t] = reward
if reward == 0:
if end_game == False:
Q_v = model[tp_prev].loc[s_prev, action_t]
model[tp_prev].loc[s_prev, action_t] = Q_v + alfa * (reward + (gamma * max_value_positive) - Q_v)
else:
model[tp_prev].loc[s_prev, action_t] = reward
elif s_alg == "DQN":
state_t[agent_t_pos[1]][agent_t_pos[0]] = 120
state_t1[agent_t1_pos[1]][agent_t1_pos[0]] = 120
state_t = state_t.reshape((1, -1))
state_t1 = state_t1.reshape((1, -1))
action_t = actions_dict[action_t]
exp_replay.remember([state_t, action_t, reward, state_t1], end_game) # [old_state, old_action, reward, new_state]
inputs, targets = exp_replay.get_batch(model, batch_size=net_conf["Batch_size"])
batch_loss = model.train_on_batch(inputs, targets)
# print("\nNEW MODEL - LEARN\n", model)
return model, batch_loss, exp_replay
''' PROGRAM START '''
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
def run(s_env, s_alg, s_learn, s_load, s_print, s_auto, s_episode, s_cond_to_end, s_server, s_net_comb_param, s_load_path, s_prob, s_sample, s_save):
net_conf = {"N_actions": n_actions,
"Max_memory": max_memory_list[s_net_comb_param],
"Hidden_size": hidden_size_list[s_net_comb_param],
"Batch_size": batch_size_list[s_net_comb_param],
"Optimizer": optimizer_list[0]}
exp_replay = ExperienceReplay(max_memory=net_conf["Max_memory"])
begin = time.time()
begin_time = time.strftime('%X %x')
print("\n\n --- BEGINING --- s_sample: %s \n begin_time: %s \n" % (s_sample, begin_time))
df_score = pd.DataFrame()
df_percent_list = pd.DataFrame()
df_loss_list = pd.DataFrame()
df_time_sample = pd.DataFrame()
avg_last_score_list = []
if s_server == False: screen = pygame.display.set_mode((400 + 37 * 5, 330 + 37 * 5))
score_list_best = [0]
for sample in list(range(1, s_sample+1)):
experiment_configurations = (sample, s_env, s_alg, s_episode, s_learn, s_load, s_print, s_auto, s_cond_to_end, s_server, s_net_comb_param, s_prob)
print("\n - START - "
"\n sample: %s"
"\n s_env: %s"
"\n s_alg: %s"
"\n s_episode: %s"
"\n s_learn: %s"
"\n s_load: %s"
"\n s_print: %s"
"\n s_auto: %s"
"\n s_cond_to_end: %s"
"\n s_server: %s"
"\n s_net_comb_param: %s"
"\n s_prob: %s" % experiment_configurations)
start = time.time()
start_time = time.strftime('%X %x')
print("\nStart time: ", start_time)
negativo_list, positivo_list, agent, wall_list, h_max, v_max = environment_conf(s_env)
env_dim = [h_max, v_max]
# load file for transfer learning
if s_load == True:
try:
model, op_conf = load_model(s_alg, __location__ + s_load_path)
except Exception as e:
print("DID NOT FIND THE FILE", __location__ + s_load_path, str(e))
else:
model, op_conf = create_model(s_alg, env_dim, net_conf)
# region INITIALIZE VARIABLES 1
percent_list = []
score = 0
score_list = []
episodes = 0
episodes_list = []
steps = 0
steps_list = []
batch_loss = 0
loss_list = []
# endregion
# main component to an episode
while (episodes < s_episode): # max_episodes
negativo_list, positivo_list, agent, wall_list, h_max, v_max = environment_conf(s_env)
# region INITIALIZE VARIABLES 2
episodes += 1
episodes_list.append(episodes)
max_steps = 100
steps_list.append(steps)
steps = 0
act_list = []
last_move = False
action_chosen = ""
encountered = 0
pos_collected = 0
prob = s_prob
# endregion
if s_server == False:
# region DRAW SCREEN
screen.fill(white)
show_Alg(s_alg, screen)
show_Samples(sample, screen)
show_Level(episodes, screen)
show_Score(score, screen)
show_Steps(steps, screen)
show_Percent(percent_list[-10:], screen)
show_Steps_list(steps_list[-30:], screen)
show_Act_List(act_list[-20:], screen)
show_Action(action_chosen, screen)
show_Env(s_env, screen)
draw_objects(agent, positivo_list, negativo_list, wall_list, screen)
pygame.display.flip()
# endregion
# main reinforcement learning part
while (True): # max_steps or condition to finish
sleep(speed)
''' EVENT HANDLE '''
key_pressed = False
set_action = False
while (s_server == False):
for event in pygame.event.get():
# QUIT GAME
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
# ADD OR DELETE WALL
if event.type == pygame.MOUSEBUTTONDOWN:
pass
# if (pygame.mouse.get_pressed() == (1, 0, 0)): # LEFT BUTTON (add wall)
# pos = pygame.mouse.get_pos()
# x = (pos[0] - x_g) / (m + w)
# y = (pos[1] - y_g) / (m + h)
# x = math.trunc(x)
# y = math.trunc(y)
# w_has = False
# for item in wall_list:
# if math.trunc((item[0] - x_g) / (m + w)) == x and math.trunc(
# (item[1] - y_g) / (m + h)) == y:
# w_has = True
# if w_has == False:
# wall = Class.Wall('wall', x, y)
# print('wall ', wall, 'added')
# wall_list.append(wall)
# if (pygame.mouse.get_pressed() == (0, 0, 1)): # RIGHTBUTTON (delete wall)
# pos = pygame.mouse.get_pos()
# x = (pos[0] - x_g) / (m + w)
# y = (pos[1] - y_g) / (m + h)
# x = math.trunc(x)
# y = math.trunc(y)
# wall = Class.Wall('wall', x, y)
# for i in wall_list:
# if i == wall:
# wall_list.remove(wall)
# print('wall ', wall, 'removed')
# EVENT - ANY PRESSED KEY
# PRESS A KEY
if event.type == pygame.KEYDOWN:
# SAVE AND QUIT - KEY P
if event.key == pygame.K_p:
pygame.quit()
sys.exit()
# PLOT AGENT`S PERFORMENCE - KEY G
if event.key == pygame.K_g:
plt.plot(score_list)
plt.ylabel('Score')
plt.xlabel('Total Steps')
plt.title('Performance of the Agent')
plt.show()
plt.plot(percent_list)
plt.ylabel('Percentage of objects +')
plt.xlabel('Total Steps')
plt.title('Episode over 100 times step each')
plt.show()
if s_alg == "DQN":
plt.plot(loss_list)
plt.ylabel('loss')
plt.xlabel('Total Steps')
plt.title('batch_loss')
plt.show()
# MOVE - SPACE BAR
if event.key == pygame.K_SPACE:
key_pressed = True
break
# MOVE - ARROW KEYS
if event.key in p_keys:
key_pressed = True
set_action = True
if event.key == pygame.K_w: # North # add_act('↑') ⇦ ⇨ ⇧ ⇩
key_action = "up"
if event.key == pygame.K_s: # South # add_act('↓') ⬅ ➡ ⬆ ⬇
key_action = "down"
if event.key == pygame.K_d: # West # add_act('→')
key_action = "right"
if event.key == pygame.K_a: # East # add_act('←')
key_action = "left"
break
# Run game if key is preseed or automatic is selected
if key_pressed or s_auto:
break
# BREAK IF IT WAS THE LAST MOVE
if last_move == True:
break
# RUN_GAME
steps += 1
''' OLD STATE - S 1 - 1'''
state_t = update_state(h_max, v_max, agent, positivo_list, negativo_list, wall_list)
agent_t = agent.pos
''' CHOOSE ACTION - AGENT ACT - 2'''
action_chosen, model, print_action = choose_action(s_alg, state_t, agent_t, model, prob,steps)
if set_action: action_chosen = key_action
''' CHANGE THE WORLD - UP_ENV - 3'''
agent.try_move(action_chosen, wall_list)
act_list.append(action_chosen)
# if s_print: print(print_action, action_chosen)
''' NEW STATE - S2 - 4'''
state_t1 = update_state(h_max, v_max, agent, positivo_list, negativo_list, wall_list)
agent_t1 = agent.pos
global explore_set
global explore_dict
if s_print:
# print('\n>>>> Level: ' + str(episodes) + ' | Step: ' + str(
# steps) + ' | New_agent_pos: ' + str(agent.pos) + ' <<<<')
pos_tuple = tuple(agent.pos)
explore_set.add(pos_tuple)
if pos_tuple not in explore_dict:
explore_dict[pos_tuple] = 1
else:
explore_dict[pos_tuple] += 1
if steps==max_steps:
print("Number of explore node: "+str(len(explore_set)))
print("Explored Node postion: "+str(explore_dict))
explore_set = set()
explore_dict = dict()
''' GET REWARD - 5 '''
# region GET REWARD AND DELETE COLLECTED OBJECT
prev_score = score
score += step_reward
for positivo in positivo_list:
if agent.pos == positivo.pos:
encountered += 1
pos_collected += 1
score += positive_reward
positivo = Class.Positivo('positivo', agent.pos[0], agent.pos[1])
positivo_list.remove(positivo)
# if s_print == True and s_server == False:
# print(' Hit the Positivo')
for negativo in negativo_list:
if agent.pos == negativo.pos:
encountered += 1
score -= negative_reward
negativo = Class.Negativo('negativo', agent.pos[0], agent.pos[1])
negativo_list.remove(negativo)
# if s_print == True and s_server == False:
# print(' Hit the Negativo')
new_score = score
score_list.append(score)
reward = new_score - prev_score
# endregion
''' LEARN - 6 '''
# CONDITION TO FINISH THE Episode
if s_cond_to_end == 'max_steps':
if steps == max_steps:
last_move = True
elif s_cond_to_end == 'coll_all' or steps > max_steps:
if len(positivo_list) == 0 and len(negativo_list) == 0 or steps > max_steps:
last_move = True
elif s_cond_to_end == 'only_positive' or steps > max_steps:
if len(positivo_list) == 0 or steps > max_steps:
last_move = True
elif s_cond_to_end == 'only_negative' or steps > max_steps:
if len(negativo_list) == 0 or steps > max_steps:
last_move = True
# LEARN
if s_learn == True:
action_t = action_chosen
if last_move == False:
''' LEARN '''
model, batch_loss, exp_replay = learn(s_alg, model, state_t, state_t1, agent_t, agent_t1, reward, action_t, False, net_conf, exp_replay)
else:
''' LEARN FINAL '''
model, batch_loss, exp_replay = learn(s_alg, model, state_t, state_t1, agent_t, agent_t1, reward, action_t, True, net_conf, exp_replay)
if s_server == False:
# region DRAW SCREEN
screen.fill(white)
show_Alg(s_alg, screen)
show_Samples(sample, screen)
show_Level(episodes, screen)
show_Score(score, screen)
show_Steps(steps, screen)
show_Percent(percent_list[-10:], screen)
show_Steps_list(steps_list[-30:], screen)
show_Act_List(act_list[-20:], screen)
show_Action(action_chosen, screen)
show_Env(s_env, screen)
draw_objects(agent, positivo_list, negativo_list, wall_list, screen)
pygame.display.flip()
# endregion
try:
percent = pos_collected / encountered
except ZeroDivisionError:
percent = 0
percent_list.append(percent)
loss_list.append(batch_loss)
print("Episode: ", episodes)
# region TIME 1
print("Start time: ", start_time)
end = time.time()
end_time = time.strftime('%X %x')
print("End time: ", end_time)
time_elapsed = end - start
print("Time elapsed: ", time_elapsed)
# endregion
'''GET THE BEST MODEL'''
if max(score_list) > max(score_list_best):
best_model = model
score_list_best = score_list
# region MAKE LIST OF THE RESULTS
avg_last_score_list.append(score_list[-1])
score_list_df = pd.DataFrame({'Score': score_list})
percent_list_df = pd.DataFrame({'Percent': percent_list})
loss_list_df = pd.DataFrame({'Batch_loss': loss_list})
time_sample_df = pd.DataFrame({'Time': [time_elapsed]})
df_score = pd.concat([df_score, score_list_df], ignore_index=True, axis=1)
df_percent_list = pd.concat([df_percent_list, percent_list_df], ignore_index=True, axis=1)
df_loss_list = pd.concat([df_loss_list, loss_list_df], ignore_index=True, axis=1)
df_time_sample = pd.concat([df_time_sample, time_sample_df], ignore_index=True, axis=1)
# endregion
if s_save == True:
# region PATH TO SAVE
save_path_core = __location__ + "/Results/"
if s_learn == True: save_path = save_path_core + "Train/Env_" + str(s_env) + "/Train_Env_" + str(s_env) + "_" + s_alg
else: save_path = save_path_core + "Test/Env_" + str(s_env) + "/Test_Env_" + str(s_env) + "_" + s_alg
if s_alg == "DQN": save_path += "_" + str(s_net_comb_param)
# convert begin_time to string and format it
time_path = begin_time.replace(" ", " ")
time_path = time_path.replace(":", " ")
time_path = time_path.replace("/", "-")
# append to the save path
save_path = save_path + " " + time_path
if s_load == True:
load_path = " loaded_with " + s_load_path.replace("/", "_")
save_path = save_path + load_path
# If it doesnt find the path, then create a new path
if not os.path.exists(os.path.dirname(save_path)):
try:
os.makedirs(os.path.dirname(save_path))
except OSError as exc: # Guard against race condition
print("ERROR when saving the File")
# endregion
print("save_path: ", save_path)
# region SAVE ALL
# IF IT IS NOT DQN NULL NET CONF. VALUES
if s_alg != "DQN":
op_conf = [0, 0, 0, 0, 0, 0]
net_conf = {"N_actions":0, "Max_memory":0, "Hidden_size":0, "Batch_size":0, "Optimizer":"none"}
avg_last_score = np.average(avg_last_score_list)
config_list = pd.concat([pd.Series({'Run_Conf': "A"}),
pd.Series({'Env_conf': s_env}),
pd.Series({'Algort': s_alg}),
pd.Series({'Learn': s_learn}),
pd.Series({'Load': s_load}),
pd.Series({'Samples': s_sample}),
pd.Series({'Episode': s_episode}),
pd.Series({'Max_steps': max_steps}),
pd.Series({'s_cond_to_end': s_cond_to_end}),
pd.Series({'Auto': s_auto}),
pd.Series({'Server': s_server}),
pd.Series({'Print': s_print}),
pd.Series({'MODEL CONF': ""}),
pd.Series({'alfa': alfa}),
pd.Series({'gamma': gamma}),
pd.Series({'Prob': Prob}),
pd.Series({'N_actions': net_conf["N_actions"]}),
pd.Series({'Max_memory': net_conf["Max_memory"]}),
pd.Series({'Hidden_size': net_conf["Hidden_size"]}),
pd.Series({'Batch_size': net_conf["Batch_size"]}),
pd.Series({'Optimizer': net_conf["Optimizer"]}),
pd.Series({'lr': op_conf[0]}),
pd.Series({'beta_1': op_conf[1]}),
pd.Series({'beta_2': op_conf[2]}),
pd.Series({'epsilon': op_conf[3]}),
pd.Series({'decay': op_conf[4]}),
pd.Series({'rho': op_conf[5]}),
pd.Series({'': ""}),
pd.Series({'AVG SCORE': avg_last_score})])
config_list = config_list.to_frame()
if s_print: print("\nconfig_list:\n", config_list)
# Create a Pandas Excel writer using XlsxWriter as the engine.
writer = pd.ExcelWriter(save_path + ".xlsx", engine='xlsxwriter')
# SAVING CONFIG:
config_list.to_excel(writer, sheet_name='Run_Conf', header=False)
worksheet = writer.sheets['Run_Conf']
worksheet.set_column('A:B', 15)
# SAVING SCORE:
df_score_mean = df_score.mean(axis=1)
df_score.insert(0, "Avg " + str(s_sample), df_score_mean)
df_score.to_excel(writer, sheet_name='Score')
worksheet = writer.sheets['Score']
worksheet.write(0, 0, "Score")
# SAVING PERCENT:
df_percent_list_mean = df_percent_list.mean(axis=1)
df_percent_list.insert(0, "Avg " + str(s_sample), df_percent_list_mean)
df_percent_list.to_excel(writer, sheet_name='Percent')
worksheet = writer.sheets['Percent']
worksheet.write(0, 0, "Percent")
# SAVING LOSS:
df_loss_list.to_excel(writer, sheet_name='Loss')
worksheet = writer.sheets['Loss']
worksheet.write(0, 0, "Loss")
# SAVING TIME:
df_time_sample.to_excel(writer, sheet_name='Time')
worksheet = writer.sheets['Time']
worksheet.write(0, 0, "Time")
# region CELL SIZE
# worksheet = writer.sheets['Score']
# worksheet.set_column('A:B', 15)
# worksheet = writer.sheets['Time']
# worksheet.set_column('A:B', 15)
# endregion
# SAVING BEST MODEL (out of # Samples):
if s_alg == "DSRL" or s_alg == "QL" or s_alg == "DSRL_dist" or s_alg == "DSRL_dist_type" or s_alg == "DSRL_dist_type_near" or s_alg == "DSRL_dist_type_near_propNeg" or s_alg == "DSRL_object_near" or s_alg == "DSRL_object":
# SAVING MODEL CONFIGURATIONS:
best_model.to_excel(writer, sheet_name='model')
# CONDITIONAL COLOR
worksheet = writer.sheets['model']
for x in range(2, 700, 4):
cell = "C" + str(x) + ":D" + str(x + 3)
worksheet.conditional_format(cell, {'type': '3_color_scale'})
# CELL SIZE
worksheet = writer.sheets['model']
worksheet.set_column('A:A', 50)
# region ADD PLOTS
# worksheet = writer.sheets['results']
# workbook = writer.book
# chart = workbook.add_chart({'type': 'line'})
# chart2 = workbook.add_chart({'type': 'line'})
# chart.add_series({'values': '=results!$B$2:$B$100'})
# chart2.add_series({'values': '=results!$C$2:$C$10'})
# worksheet.insert_chart('F3', chart)
# worksheet.insert_chart('N3', chart2)
# SAVE DQN MODEL
if s_learn == True and s_alg == "DQN":
save_model(best_model, save_path)
writer.save()
# endregion
print("\n - END - "
"\n sample: %s"
"\n s_env: %s"
"\n s_alg: %s"
"\n s_episode: %s"
"\n s_learn: %s"
"\n s_load: %s"
"\n s_print: %s"
"\n s_auto: %s"
"\n s_cond_to_end: %s"
"\n s_server: %s"
"\n s_net_comb_param: %s"
"\n s_prob: %s" % experiment_configurations)
# region TIME 2
print("\n\nBegin time: ", begin_time)
finish = time.time()
finish_time = time.strftime('%X %x')
print("Final time: ", finish_time)
total_time = finish - begin
print("Total time: ", total_time)
# endregion
return
# -------------------------------------------------------------------------------------------------- #
''' SELECT PARAMETERS TO RUN THE SOFTWARE '''
# environment configuration
Env = 11
Alg_list = ["QL",
"DSRL",
"DSRL_object_near",
"DQN",
"DSRL_dist",
"DSRL_dist_type",
"DSRL_dist_type_near",
"DSRL_dist_type_near_propNeg",
"DSRL_object"]
Alg = Alg_list[2] # Select the algorithm to be used
Learn = False # To update its knowledge
Load = True # To load a learned model
Load_path = "/Results/Train/Env_11/Train_Env_11_DSRL 02 41 20 05-05-21"
# algorithm configuration
Samples = 2 # Usually 10 samples (repeat 100 episodes for 10 times)
Print = True # Print some info in the terminal
Auto = True # Agent moves Automatic or if False it moves by pressing the Spacebar key
Server = False # If running in the server since
# change Prob to 1 for probe training??
Prob = 0.3 # Probability to make a random move (exploration rate)
Cond_to_end = "max_steps" # Choose from below (there are 4)
Save = False # Save the model
speed = 0.05 # seconds per frame
# Cond_to_end = "max_steps"
# Cond_to_end = "coll_all"
# Cond_to_end = "only_negative"
Episodes = 500 # Usually 1000 or 100
# region DQN Model Configurations:
# max_memory_list = [5, 5, 5, 30, 30, 30, 100, 100, 100]
# hidden_size_list = [5, 30, 270, 5, 30, 270, 5, 30, 270]
# batch_size_list = [1, 1, 1, 10, 10, 10, 32, 32, 32]
max_memory_list = [100, 100, 100, 300, 300, 300, 900, 900, 900]
hidden_size_list = [5, 10, 15, 5, 10, 15, 5, 10, 15]
batch_size_list = [32, 32, 32, 32, 32, 32, 32, 32, 32]
optimizer_list = ["adam", "rms_opt"]
n_actions = 4 # [move_up, move_down, move_left, move_right]
# endregion
Net_comb_param = 4
# ------------------------------------------------------------------------------------------- #
run(Env, Alg, Learn, Load, Print, Auto, Episodes, Cond_to_end, Server, Net_comb_param, Load_path, Prob, Samples, Save)
# ------------------------------------------------------------------------------------------- #
''' REPEAT DQN Net_Comb_Param '''
# for i in range(9):
# Net_comb_param = i
# run(Env, Alg, Learn, Load, Print, Auto, Episodes, Cond_to_end, Server, Net_comb_param, Load_path, Prob, Samples, Save)
''' REPEAT Alg for a list of Env '''
# env_list = [2,3]
# for Env in env_list:
# run(Env, Alg, Learn, Load, Print, Auto, Episodes, Cond_to_end, Server, Net_comb_param, Load_path, Prob, Samples, Save)
''' Alg_list for Env_list '''
# env_list = [2,3]
# alg_list = ["QL", "DSRL", "DSRL_object_near", "DQN"]
# for Env in env_list:
# for Alg in alg_list:
# run(Env, Alg, Learn, Load, Print, Auto, Episodes, Cond_to_end, Server, Net_comb_param, Load_path, Prob, Samples, Save)
| 41.734519
| 231
| 0.491707
| 8,780
| 68,069
| 3.59123
| 0.083599
| 0.016111
| 0.016555
| 0.014589
| 0.47087
| 0.411627
| 0.367194
| 0.349244
| 0.32587
| 0.321049
| 0
| 0.033228
| 0.387651
| 68,069
| 1,630
| 232
| 41.760123
| 0.72295
| 0.177217
| 0
| 0.407504
| 0
| 0
| 0.059215
| 0.004821
| 0
| 0
| 0
| 0.000614
| 0
| 1
| 0.022688
| false
| 0.002618
| 0.012216
| 0
| 0.045375
| 0.028796
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bce25f2b08abacab5318cf6e45474c91216d772e
| 38,887
|
py
|
Python
|
tensor2tensor/trax/rlax/ppo.py
|
funtion/tensor2tensor
|
339295a276c4bfc93894c474979d0620d14b9710
|
[
"Apache-2.0"
] | 1
|
2020-09-22T02:07:16.000Z
|
2020-09-22T02:07:16.000Z
|
tensor2tensor/trax/rlax/ppo.py
|
joeyism/tensor2tensor
|
2f0edae221a9ec2a415dbf7fcc3ff25b8777d830
|
[
"Apache-2.0"
] | null | null | null |
tensor2tensor/trax/rlax/ppo.py
|
joeyism/tensor2tensor
|
2f0edae221a9ec2a415dbf7fcc3ff25b8777d830
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2019 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PPO in JAX.
Notation:
B, scalar - batch size
T, scalar - number of time-steps in a trajectory, or the value of the padded
time-step dimension.
OBS, tuple - shape of a singular observation from the environment.
Ex: For CartPole-v0 this is (4,) and Pong-v0 it's (210, 160, 3)
A, scalar - Number of actions, assuming a discrete space.
Policy and Value function signatures:
Policy Function :: [B, T] + OBS -> [B, T, A]
Value Function :: [B, T] + OBS -> [B, T, 1]
Policy and Value Function :: [B, T] + OBS -> ([B, T, A], [B, T, 1])
i.e. the policy net should take a batch of *trajectories* and at each time-step
in each batch deliver a probability distribution over actions.
NOTE: It doesn't return logits, rather the expectation is that it returns
log-probabilities instead.
NOTE: The policy and value functions need to take care to not take into account
future time-steps while deciding the actions (or value) for the current
time-step.
Policy and Value Function produces a tuple of the expected output of a policy
function and a value function.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import pickle
import time
from absl import logging
import gym
from jax import grad
from jax import jit
from jax import lax
from jax import numpy as np
from jax import random as jax_random
import numpy as onp
from tensor2tensor.envs import env_problem
from tensor2tensor.envs import env_problem_utils
from tensor2tensor.trax import jaxboard
from tensor2tensor.trax import layers
from tensor2tensor.trax import optimizers as trax_opt
from tensor2tensor.trax import trax
from tensorflow.io import gfile
DEBUG_LOGGING = False
GAMMA = 0.99
LAMBDA = 0.95
EPSILON = 0.1
EPOCHS = 50 # 100
NUM_OPTIMIZER_STEPS = 100
PRINT_EVERY_OPTIMIZER_STEP = 20
BATCH_TRAJECTORIES = 32
def policy_and_value_net(rng_key,
batch_observations_shape,
num_actions,
bottom_layers_fn=None,
two_towers=True):
"""A policy and value net function."""
# Layers.
# Now, with the current logits, one head computes action probabilities and the
# other computes the value function.
# NOTE: The LogSoftmax instead of the Softmax because of numerical stability.
net = None
if not two_towers:
tower = [] if bottom_layers_fn is None else bottom_layers_fn()
tower.extend([
layers.Branch(
layers.Serial(layers.Dense(num_actions), layers.LogSoftmax()),
layers.Dense(1))
])
net = layers.Serial(*tower)
else:
tower1 = [] if bottom_layers_fn is None else bottom_layers_fn()
tower2 = [] if bottom_layers_fn is None else bottom_layers_fn()
tower1.extend([layers.Dense(num_actions), layers.LogSoftmax()])
tower2.extend([layers.Dense(1)])
net = layers.Branch(
layers.Serial(*tower1),
layers.Serial(*tower2),
)
assert net
return net.initialize(batch_observations_shape, rng_key), net
def optimizer_fun(net_params, step_size=1e-3):
opt = trax_opt.Adam(step_size=step_size, b1=0.9, b2=0.999, eps=1e-08)
opt_init = lambda x: (x, opt.tree_init(x))
opt_update = lambda i, g, s: opt.tree_update(i, g, s[0], s[1])
get_params = lambda x: x[0]
opt_state = opt_init(net_params)
return opt_state, opt_update, get_params
# Should this be collect 'n' trajectories, or
# Run the env for 'n' steps and take completed trajectories, or
# Any other option?
# TODO(afrozm): Replace this with EnvProblem?
def collect_trajectories(env,
policy_fun,
num_trajectories=1,
policy=env_problem_utils.CATEGORICAL_SAMPLING,
max_timestep=None,
boundary=20,
epsilon=0.1,
reset=True,
rng=None):
"""Collect trajectories with the given policy net and behaviour.
Args:
env: A gym env interface, for now this is not-batched.
policy_fun: observations(B,T+1) -> log-probabs(B,T+1, A) callable.
num_trajectories: int, number of trajectories.
policy: string, "greedy", "epsilon-greedy", or "categorical-sampling" i.e.
how to use the policy_fun to return an action.
max_timestep: int or None, the index of the maximum time-step at which we
return the trajectory, None for ending a trajectory only when env returns
done.
boundary: int, boundary for padding, used in EnvProblem envs.
epsilon: float, the epsilon for `epsilon-greedy` policy.
reset: bool, true if we want to reset the envs. The envs are also reset if
max_max_timestep is None or < 0
rng: jax rng, splittable.
Returns:
A tuple (trajectory, number of trajectories that are done)
trajectory: list of (observation, action, reward) tuples, where each element
`i` is a tuple of numpy arrays with shapes as follows:
observation[i] = (B, T_i + 1)
action[i] = (B, T_i)
reward[i] = (B, T_i)
"""
assert isinstance(env, env_problem.EnvProblem)
# This is an env_problem, run its collect function.
return env_problem_utils.play_env_problem_with_policy(
env,
policy_fun,
num_trajectories=num_trajectories,
max_timestep=max_timestep,
boundary=boundary,
policy_sampling=policy,
eps=epsilon,
reset=reset,
rng=rng)
# This function can probably be simplified, ask how?
# Can we do something much simpler than lax.pad, maybe np.pad?
# Others?
def get_padding_value(dtype):
"""Returns the padding value given a dtype."""
padding_value = None
if dtype == np.uint8:
padding_value = np.uint8(0)
elif dtype == np.uint16:
padding_value = np.uint16(0)
elif dtype == np.float32 or dtype == np.float64:
padding_value = 0.0
else:
padding_value = 0
assert padding_value is not None
return padding_value
# TODO(afrozm): Use np.pad instead and make jittable?
def pad_trajectories(trajectories, boundary=20):
"""Pad trajectories to a bucket length that is a multiple of boundary.
Args:
trajectories: list[(observation, actions, rewards)], where each observation
is shaped (t+1,) + OBS and actions & rewards are shaped (t,), with the
length of the list being B (batch size).
boundary: int, bucket length, the actions and rewards are padded to integer
multiples of boundary.
Returns:
tuple: (padding lengths, reward_mask, padded_observations, padded_actions,
padded_rewards) where padded_observations is shaped (B, T+1) + OBS and
padded_actions, padded_rewards & reward_mask are shaped (B, T).
Where T is max(t) rounded up to an integer multiple of boundary.
padded_length is how much padding we've added and
reward_mask is 1s for actual rewards and 0s for the padding.
"""
# Let's compute max(t) over all trajectories.
t_max = max(r.shape[0] for (_, _, r) in trajectories)
# t_max is rounded to the next multiple of `boundary`
boundary = int(boundary)
bucket_length = boundary * int(np.ceil(float(t_max) / boundary))
# So all obs will be padded to t_max + 1 and actions and rewards to t_max.
padded_observations = []
padded_actions = []
padded_rewards = []
padded_lengths = []
reward_masks = []
for (o, a, r) in trajectories:
# Determine the amount to pad, this holds true for obs, actions and rewards.
num_to_pad = bucket_length + 1 - o.shape[0]
padded_lengths.append(num_to_pad)
if num_to_pad == 0:
padded_observations.append(o)
padded_actions.append(a)
padded_rewards.append(r)
reward_masks.append(onp.ones_like(r, dtype=np.int32))
continue
# First pad observations.
padding_config = [(0, num_to_pad, 0)]
for _ in range(o.ndim - 1):
padding_config.append((0, 0, 0))
padding_config = tuple(padding_config)
padding_value = get_padding_value(o.dtype)
action_padding_value = get_padding_value(a.dtype)
reward_padding_value = get_padding_value(r.dtype)
padded_obs = lax.pad(o, padding_value, padding_config)
padded_observations.append(padded_obs)
# Now pad actions and rewards.
assert a.ndim == 1 and r.ndim == 1
padding_config = ((0, num_to_pad, 0),)
padded_action = lax.pad(a, action_padding_value, padding_config)
padded_actions.append(padded_action)
padded_reward = lax.pad(r, reward_padding_value, padding_config)
padded_rewards.append(padded_reward)
# Also create the mask to use later.
reward_mask = onp.ones_like(r, dtype=np.int32)
reward_masks.append(lax.pad(reward_mask, 0, padding_config))
return padded_lengths, np.stack(reward_masks), np.stack(
padded_observations), np.stack(padded_actions), np.stack(padded_rewards)
# TODO(afrozm): JAX-ify this, this is too slow for pong.
def rewards_to_go(rewards, mask, gamma=0.99):
r"""Computes rewards to go.
Reward to go is defined as follows, the discounted reward that we have to
yet collect, going forward from this point, i.e.:
r2g_t = \sum_{l=0}^{\infty} (\gamma^{l} * reward_{t+l})
Args:
rewards: np.ndarray of shape (B, T) of rewards.
mask: np.ndarray of shape (B, T) of mask for the rewards.
gamma: float, discount factor.
Returns:
rewards to go, np.ndarray of shape (B, T).
"""
B, T = rewards.shape # pylint: disable=invalid-name,unused-variable
masked_rewards = rewards * mask # (B, T)
# We use the following recurrence relation, derived from the equation above:
#
# r2g[t+1] = (r2g[t] - r[t]) / gamma
#
# This means we'll need to calculate r2g[0] first and then r2g[1] and so on ..
#
# **However** this leads to overflows for long sequences: r2g[t] - r[t] > 0
# and gamma < 1.0, so the division keeps increasing.
#
# So we just run the recurrence in reverse, i.e.
#
# r2g[t] = r[t] + (gamma*r2g[t+1])
#
# This is much better, but might have lost updates since the (small) rewards
# at earlier time-steps may get added to a (very?) large sum.
# Compute r2g_{T-1} at the start and then compute backwards in time.
r2gs = [masked_rewards[:, -1]]
# Go from T-2 down to 0.
for t in reversed(range(T - 1)):
r2gs.append(masked_rewards[:, t] + (gamma * r2gs[-1]))
# The list should have length T.
assert T == len(r2gs)
# First we stack them in the correct way to make it (B, T), but these are
# still from newest (T-1) to oldest (0), so then we flip it on time axis.
return np.flip(np.stack(r2gs, axis=1), axis=1)
@jit
def value_loss_given_predictions(value_prediction,
rewards,
reward_mask,
gamma=0.99,
epsilon=0.2,
value_prediction_old=None):
"""Computes the value loss given the prediction of the value function.
Args:
value_prediction: np.ndarray of shape (B, T+1, 1)
rewards: np.ndarray of shape (B, T) of rewards.
reward_mask: np.ndarray of shape (B, T), the mask over rewards.
gamma: float, discount factor.
epsilon: float, clip-fraction, used if value_value_prediction_old isn't None
value_prediction_old: np.ndarray of shape (B, T+1, 1) of value predictions
using the old parameters. If provided, we incorporate this in the loss as
well. This is from the OpenAI baselines implementation.
Returns:
The average L2 value loss, averaged over instances where reward_mask is 1.
"""
B, T = rewards.shape # pylint: disable=invalid-name
assert (B, T) == reward_mask.shape
assert (B, T + 1, 1) == value_prediction.shape
value_prediction = np.squeeze(value_prediction, axis=2) # (B, T+1)
value_prediction = value_prediction[:, :-1] * reward_mask # (B, T)
r2g = rewards_to_go(rewards, reward_mask, gamma=gamma) # (B, T)
loss = (value_prediction - r2g)**2
# From the baselines implementation.
if value_prediction_old is not None:
value_prediction_old = np.squeeze(value_prediction_old, axis=2) # (B, T+1)
value_prediction_old = value_prediction_old[:, :-1] * reward_mask # (B, T)
v_clipped = value_prediction_old + np.clip(
value_prediction - value_prediction_old, -epsilon, epsilon)
v_clipped_loss = (v_clipped - r2g)**2
loss = np.maximum(v_clipped_loss, loss)
# Take an average on only the points where mask != 0.
return np.sum(loss) / np.sum(reward_mask)
# TODO(afrozm): JAX-ify this, this is too slow for pong.
def deltas(predicted_values, rewards, mask, gamma=0.99):
r"""Computes TD-residuals from V(s) and rewards.
Where a `delta`, i.e. a td-residual is defined as:
delta_{b,t} = r_{b,t} + \gamma * v_{b,t+1} - v_{b,t}.
Args:
predicted_values: ndarray of shape (B, T+1). NOTE: Expects axis 2 was
squeezed. These represent V(s_bt) for b < B and t < T+1
rewards: ndarray of shape (B, T) of rewards.
mask: ndarray of shape (B, T) of mask for rewards.
gamma: float, discount factor.
Returns:
ndarray of shape (B, T) of one-step TD-residuals.
"""
# `d`s are basically one-step TD residuals.
d = []
_, T = rewards.shape # pylint: disable=invalid-name
for t in range(T):
d.append(rewards[:, t] + (gamma * predicted_values[:, t + 1]) -
predicted_values[:, t])
return np.array(d).T * mask
def gae_advantages(td_deltas, mask, lambda_=0.95, gamma=0.99):
r"""Computes the GAE advantages given the one step TD-residuals.
The formula for a GAE advantage estimator is as follows:
A_{bt} = \sum_{l=0}^{\infty}(\gamma * \lambda)^{l}(\delta_{b,t+l}).
Internally we just call rewards_to_go, since it is the same computation.
Args:
td_deltas: np.ndarray of shape (B, T) of one step TD-residuals.
mask: np.ndarray of shape (B, T) of mask for the residuals. It maybe the
case that the `td_deltas` are already masked correctly since they are
produced by `deltas(...)`
lambda_: float, lambda parameter for GAE estimators.
gamma: float, lambda parameter for GAE estimators.
Returns:
GAE advantage estimates.
"""
return rewards_to_go(td_deltas, mask, lambda_ * gamma)
def chosen_probabs(probab_observations, actions):
"""Picks out the probabilities of the actions along batch and time-steps.
Args:
probab_observations: ndarray of shape `[B, T+1, A]`, where
probab_observations[b, t, i] contains the log-probability of action = i at
the t^th time-step in the b^th trajectory.
actions: ndarray of shape `[B, T]`, with each entry in [0, A) denoting which
action was chosen in the b^th trajectory's t^th time-step.
Returns:
`[B, T]` ndarray with the log-probabilities of the chosen actions.
"""
B, T = actions.shape # pylint: disable=invalid-name
assert (B, T + 1) == probab_observations.shape[:2]
return probab_observations[np.arange(B)[:, None], np.arange(T), actions]
def compute_probab_ratios(p_new, p_old, actions, reward_mask):
"""Computes the probability ratios for each time-step in a trajectory.
Args:
p_new: ndarray of shape [B, T+1, A] of the log-probabilities that the policy
network assigns to all the actions at each time-step in each batch using
the old parameters.
p_old: ndarray of shape [B, T+1, A], same as above, but using old policy
network parameters.
actions: ndarray of shape [B, T] where each element is from [0, A).
reward_mask: ndarray of shape [B, T] masking over probabilities.
Returns:
probab_ratios: ndarray of shape [B, T], where
probab_ratios_{b,t} = p_new_{b,t,action_{b,t}} / p_old_{b,t,action_{b,t}}
"""
B, T = actions.shape # pylint: disable=invalid-name
assert (B, T + 1) == p_old.shape[:2]
assert (B, T + 1) == p_new.shape[:2]
logp_old = chosen_probabs(p_old, actions)
logp_new = chosen_probabs(p_new, actions)
assert (B, T) == logp_old.shape
assert (B, T) == logp_new.shape
# Since these are log-probabilities, we just subtract them.
probab_ratios = np.exp(logp_new - logp_old) * reward_mask
assert (B, T) == probab_ratios.shape
return probab_ratios
def clipped_probab_ratios(probab_ratios, epsilon=0.2):
return np.clip(probab_ratios, 1 - epsilon, 1 + epsilon)
def clipped_objective(probab_ratios, advantages, reward_mask, epsilon=0.2):
return np.minimum(
probab_ratios * advantages,
clipped_probab_ratios(probab_ratios, epsilon=epsilon) *
advantages) * reward_mask
@jit
def ppo_loss_given_predictions(log_probab_actions_new,
log_probab_actions_old,
value_predictions_old,
padded_actions,
padded_rewards,
reward_mask,
gamma=0.99,
lambda_=0.95,
epsilon=0.2):
"""PPO objective, with an eventual minus sign, given predictions."""
B, T = padded_rewards.shape # pylint: disable=invalid-name
assert (B, T) == padded_actions.shape
assert (B, T) == reward_mask.shape
_, _, A = log_probab_actions_old.shape # pylint: disable=invalid-name
assert (B, T + 1, 1) == value_predictions_old.shape
assert (B, T + 1, A) == log_probab_actions_old.shape
assert (B, T + 1, A) == log_probab_actions_new.shape
# (B, T)
td_deltas = deltas(
np.squeeze(value_predictions_old, axis=2), # (B, T+1)
padded_rewards,
reward_mask,
gamma=gamma)
# (B, T)
advantages = gae_advantages(
td_deltas, reward_mask, lambda_=lambda_, gamma=gamma)
# Normalize the advantages.
advantages = (advantages - np.mean(advantages)) / np.std(advantages)
# (B, T)
ratios = compute_probab_ratios(log_probab_actions_new, log_probab_actions_old,
padded_actions, reward_mask)
assert (B, T) == ratios.shape
# (B, T)
objective = clipped_objective(
ratios, advantages, reward_mask, epsilon=epsilon)
assert (B, T) == objective.shape
# ()
average_objective = np.sum(objective) / np.sum(reward_mask)
# Loss is negative objective.
return -average_objective
@jit
def combined_loss_given_predictions(log_probab_actions_new,
log_probab_actions_old,
value_prediction_new,
value_prediction_old,
padded_actions,
padded_rewards,
reward_mask,
gamma=0.99,
lambda_=0.95,
epsilon=0.2,
c1=1.0,
c2=0.01):
"""Computes the combined (clipped loss + value loss) given predictions."""
loss_value = value_loss_given_predictions(
value_prediction_new,
padded_rewards,
reward_mask,
gamma=gamma,
value_prediction_old=value_prediction_old,
epsilon=epsilon)
loss_ppo = ppo_loss_given_predictions(
log_probab_actions_new,
log_probab_actions_old,
value_prediction_old,
padded_actions,
padded_rewards,
reward_mask,
gamma=gamma,
lambda_=lambda_,
epsilon=epsilon)
entropy_bonus = masked_entropy(log_probab_actions_new, reward_mask)
return (loss_ppo + (c1 * loss_value) - (c2 * entropy_bonus), loss_ppo,
loss_value, entropy_bonus)
@functools.partial(jit, static_argnums=(3,))
def combined_loss(new_params,
log_probab_actions_old,
value_predictions_old,
policy_and_value_net_apply,
padded_observations,
padded_actions,
padded_rewards,
reward_mask,
gamma=0.99,
lambda_=0.95,
epsilon=0.2,
c1=1.0,
c2=0.01,
rng=None):
"""Computes the combined (clipped loss + value loss) given observations."""
log_probab_actions_new, value_predictions_new = policy_and_value_net_apply(
padded_observations, new_params, rng=rng)
# (combined_loss, ppo_loss, value_loss, entropy_bonus)
return combined_loss_given_predictions(
log_probab_actions_new,
log_probab_actions_old,
value_predictions_new,
value_predictions_old,
padded_actions,
padded_rewards,
reward_mask,
gamma=gamma,
lambda_=lambda_,
epsilon=epsilon,
c1=c1,
c2=c2)
@functools.partial(jit, static_argnums=(2, 3, 4))
def policy_and_value_opt_step(i,
opt_state,
opt_update,
get_params,
policy_and_value_net_apply,
log_probab_actions_old,
value_predictions_old,
padded_observations,
padded_actions,
padded_rewards,
reward_mask,
c1=1.0,
c2=0.01,
gamma=0.99,
lambda_=0.95,
epsilon=0.1,
rng=None):
"""Policy and Value optimizer step."""
# Combined loss function given the new params.
def policy_and_value_loss(params):
"""Returns the combined loss given just parameters."""
(loss, _, _, _) = combined_loss(
params,
log_probab_actions_old,
value_predictions_old,
policy_and_value_net_apply,
padded_observations,
padded_actions,
padded_rewards,
reward_mask,
c1=c1,
c2=c2,
gamma=gamma,
lambda_=lambda_,
epsilon=epsilon,
rng=rng)
return loss
new_params = get_params(opt_state)
g = grad(policy_and_value_loss)(new_params)
# TODO(afrozm): Maybe clip gradients?
return opt_update(i, g, opt_state)
def get_time(t1, t2=None):
if t2 is None:
t2 = time.time()
return round((t2 - t1) * 1000, 2)
def approximate_kl(log_prob_new, log_prob_old, mask):
"""Computes the approximate KL divergence between the old and new log-probs.
Args:
log_prob_new: (B, T+1, A) log probs new
log_prob_old: (B, T+1, A) log probs old
mask: (B, T)
Returns:
Approximate KL.
"""
diff = log_prob_old - log_prob_new
# Cut the last time-step out.
diff = diff[:, :-1]
# Mask out the irrelevant part.
diff *= mask[:, :, np.newaxis] # make mask (B, T, 1)
# Average on non-masked part.
return np.sum(diff) / np.sum(mask)
def masked_entropy(log_probs, mask):
"""Computes the entropy for the given log-probs.
Args:
log_probs: (B, T+1, A) log probs
mask: (B, T) mask.
Returns:
Entropy.
"""
# Cut the last time-step out.
lp = log_probs[:, :-1]
# Mask out the irrelevant part.
lp *= mask[:, :, np.newaxis] # make mask (B, T, 1)
p = np.exp(lp) * mask[:, :, np.newaxis] # (B, T, 1)
# Average on non-masked part and take negative.
return -(np.sum(lp * p) / np.sum(mask))
def evaluate_policy(eval_env,
get_predictions,
boundary,
max_timestep=20000,
rng=None):
"""Evaluate the policy."""
avg_rewards = {}
for policy in [
env_problem_utils.CATEGORICAL_SAMPLING, env_problem_utils.GUMBEL_SAMPLING,
env_problem_utils.EPSILON_GREEDY
]:
trajs, _ = env_problem_utils.play_env_problem_with_policy(
eval_env,
get_predictions,
boundary=boundary,
max_timestep=max_timestep,
reset=True,
policy_sampling=policy,
rng=rng)
avg_rewards[policy] = float(sum(
np.sum(traj[2]) for traj in trajs)) / len(trajs)
return avg_rewards
def maybe_restore_params(output_dir, policy_and_value_net_params):
"""Maybe restore the params from the checkpoint dir.
Args:
output_dir: Directory where saved model checkpoints are stored.
policy_and_value_net_params: Default params, returned if model is'nt found.
Returns:
triple (restore (bool), params, iter(int)) where iter is the epoch from
which we restored the params, 0 is restore = False.
"""
model_files = gfile.glob(os.path.join(output_dir, "model-??????.pkl"))
if not model_files:
return False, policy_and_value_net_params, 0
model_file = sorted(model_files)[-1]
model_file_basename = os.path.basename(model_file) # model-??????.pkl
i = int(filter(str.isdigit, model_file_basename))
with gfile.GFile(model_file, "rb") as f:
policy_and_value_net_params = pickle.load(f)
return True, policy_and_value_net_params, i
def training_loop(
env=None,
epochs=EPOCHS,
policy_and_value_net_fun=None,
policy_and_value_optimizer_fun=None,
batch_size=BATCH_TRAJECTORIES,
num_optimizer_steps=NUM_OPTIMIZER_STEPS,
print_every_optimizer_steps=PRINT_EVERY_OPTIMIZER_STEP,
target_kl=0.01,
boundary=20,
max_timestep=None,
max_timestep_eval=20000,
random_seed=None,
gamma=GAMMA,
lambda_=LAMBDA,
epsilon=EPSILON,
c1=1.0,
c2=0.01,
output_dir=None,
eval_every_n=1000,
eval_env=None,
done_frac_for_policy_save=0.5,
enable_early_stopping=True,
env_name=None,
):
"""Runs the training loop for PPO, with fixed policy and value nets."""
assert env
assert output_dir
assert env_name
gfile.makedirs(output_dir)
# Create summary writers and history.
train_sw = jaxboard.SummaryWriter(os.path.join(output_dir, "train"))
timing_sw = jaxboard.SummaryWriter(os.path.join(output_dir, "timing"))
eval_sw = jaxboard.SummaryWriter(os.path.join(output_dir, "eval"))
train_sw.text("env_name", env_name)
timing_sw.text("env_name", env_name)
eval_sw.text("env_name", env_name)
jax_rng_key = trax.get_random_number_generator_and_set_seed(random_seed)
# Batch Observations Shape = [-1, -1] + OBS, because we will eventually call
# policy and value networks on shape [B, T] +_OBS
batch_observations_shape = (-1, -1) + env.observation_space.shape
assert isinstance(env.action_space, gym.spaces.Discrete)
num_actions = env.action_space.n
jax_rng_key, key1 = jax_random.split(jax_rng_key, num=2)
# Initialize the policy and value network.
policy_and_value_net_params, policy_and_value_net_apply = (
policy_and_value_net_fun(key1, batch_observations_shape, num_actions))
# Maybe restore the policy params. If there is nothing to restore, then
# iteration = 0 and policy_and_value_net_params are returned as is.
restore, policy_and_value_net_params, iteration = (
maybe_restore_params(output_dir, policy_and_value_net_params))
if restore:
logging.info("Restored parameters from iteration [%d]", iteration)
# We should start from the next iteration.
iteration += 1
policy_and_value_net_apply = jit(policy_and_value_net_apply)
# Initialize the optimizers.
policy_and_value_optimizer = (
policy_and_value_optimizer_fun(policy_and_value_net_params))
(policy_and_value_opt_state, policy_and_value_opt_update,
policy_and_value_get_params) = policy_and_value_optimizer
num_trajectories_done = 0
last_saved_at = 0
logging.info("Starting the PPO training loop.")
for i in range(iteration, epochs):
epoch_start_time = time.time()
# Params we'll use to collect the trajectories.
policy_and_value_net_params = policy_and_value_get_params(
policy_and_value_opt_state)
# A function to get the policy and value predictions.
def get_predictions(observations, rng=None):
"""Returns log-probs, value predictions and key back."""
key, key1 = jax_random.split(rng, num=2)
log_probs, value_preds = policy_and_value_net_apply(
observations, policy_and_value_net_params, rng=key1)
return log_probs, value_preds, key
# Evaluate the policy.
policy_eval_start_time = time.time()
if ((i + 1) % eval_every_n == 0) or (i == epochs - 1):
jax_rng_key, key = jax_random.split(jax_rng_key, num=2)
logging.vlog(1, "Epoch [% 6d] evaluating policy.", i)
avg_reward = evaluate_policy(
eval_env,
get_predictions,
boundary,
max_timestep=max_timestep_eval,
rng=key)
for k, v in avg_reward.items():
eval_sw.scalar("eval/mean_reward/%s" % k, v, step=i)
logging.info("Epoch [% 6d] Policy Evaluation [%s] = %10.2f", i, k, v)
policy_eval_time = get_time(policy_eval_start_time)
trajectory_collection_start_time = time.time()
logging.vlog(1, "Epoch [% 6d] collecting trajectories.", i)
jax_rng_key, key = jax_random.split(jax_rng_key)
trajs, num_done = collect_trajectories(
env,
policy_fun=get_predictions,
num_trajectories=batch_size,
max_timestep=max_timestep,
boundary=boundary,
rng=key,
reset=(i == 0) or restore,
epsilon=(10.0 / (i + 10.0))) # this is a different epsilon.
trajectory_collection_time = get_time(trajectory_collection_start_time)
logging.vlog(1, "Collecting trajectories took %0.2f msec.",
trajectory_collection_time)
avg_reward = float(sum(np.sum(traj[2]) for traj in trajs)) / len(trajs)
max_reward = max(np.sum(traj[2]) for traj in trajs)
min_reward = min(np.sum(traj[2]) for traj in trajs)
train_sw.scalar("train/mean_reward", avg_reward, step=i)
logging.vlog(1, "Rewards avg=[%0.2f], max=[%0.2f], min=[%0.2f], all=%s",
avg_reward, max_reward, min_reward,
[float(np.sum(traj[2])) for traj in trajs])
logging.vlog(1,
"Trajectory Length average=[%0.2f], max=[%0.2f], min=[%0.2f]",
float(sum(len(traj[0]) for traj in trajs)) / len(trajs),
max(len(traj[0]) for traj in trajs),
min(len(traj[0]) for traj in trajs))
logging.vlog(2, "Trajectory Lengths: %s", [len(traj[0]) for traj in trajs])
padding_start_time = time.time()
(_, reward_mask, padded_observations, padded_actions,
padded_rewards) = pad_trajectories(
trajs, boundary=boundary)
padding_time = get_time(padding_start_time)
logging.vlog(1, "Padding trajectories took %0.2f msec.",
get_time(padding_start_time))
logging.vlog(1, "Padded Observations' shape [%s]",
str(padded_observations.shape))
logging.vlog(1, "Padded Actions' shape [%s]", str(padded_actions.shape))
logging.vlog(1, "Padded Rewards' shape [%s]", str(padded_rewards.shape))
# Calculate log-probabilities and value predictions of the trajectories.
# We'll pass these to the loss functions so as to not get recomputed.
# NOTE:
# There is a slight problem here, if the policy network contains
# stochasticity in the log-probabilities (ex: dropout), then calculating
# these again here is not going to be correct and should be done in the
# collect function.
log_prob_recompute_start_time = time.time()
jax_rng_key, key = jax_random.split(jax_rng_key)
log_probabs_traj, value_predictions_traj, _ = get_predictions(
padded_observations, rng=key)
log_prob_recompute_time = get_time(log_prob_recompute_start_time)
# Some assertions.
B, T = padded_actions.shape # pylint: disable=invalid-name
assert (B, T) == padded_rewards.shape
assert (B, T) == reward_mask.shape
assert (B, T + 1) == padded_observations.shape[:2]
assert (B, T + 1) + env.observation_space.shape == padded_observations.shape
# Linear annealing from 0.1 to 0.0
# epsilon_schedule = epsilon if epochs == 1 else epsilon * (1.0 -
# (i /
# (epochs - 1)))
# Constant epsilon.
epsilon_schedule = epsilon
# Compute value and ppo losses.
jax_rng_key, key1 = jax_random.split(jax_rng_key, num=2)
logging.vlog(2, "Starting to compute P&V loss.")
loss_compute_start_time = time.time()
cur_combined_loss, cur_ppo_loss, cur_value_loss, entropy_bonus = (
combined_loss(
policy_and_value_net_params,
log_probabs_traj,
value_predictions_traj,
policy_and_value_net_apply,
padded_observations,
padded_actions,
padded_rewards,
reward_mask,
gamma=gamma,
lambda_=lambda_,
epsilon=epsilon_schedule,
c1=c1,
c2=c2,
rng=key1))
loss_compute_time = get_time(loss_compute_start_time)
logging.vlog(
1,
"Calculating P&V loss [%10.2f(%10.2f, %10.2f, %10.2f)] took %0.2f msec.",
cur_combined_loss, cur_value_loss, cur_ppo_loss, entropy_bonus,
get_time(loss_compute_start_time))
jax_rng_key, key1 = jax_random.split(jax_rng_key, num=2)
logging.vlog(1, "Policy and Value Optimization")
optimization_start_time = time.time()
keys = jax_random.split(key1, num=num_optimizer_steps)
for j in range(num_optimizer_steps):
k1, k2, k3 = jax_random.split(keys[j], num=3)
t = time.time()
# Update the optimizer state.
policy_and_value_opt_state = policy_and_value_opt_step(
j,
policy_and_value_opt_state,
policy_and_value_opt_update,
policy_and_value_get_params,
policy_and_value_net_apply,
log_probabs_traj,
value_predictions_traj,
padded_observations,
padded_actions,
padded_rewards,
reward_mask,
c1=c1,
c2=c2,
gamma=gamma,
lambda_=lambda_,
epsilon=epsilon_schedule,
rng=k1)
# Compute the approx KL for early stopping.
new_policy_and_value_net_params = policy_and_value_get_params(
policy_and_value_opt_state)
log_probab_actions_new, _ = policy_and_value_net_apply(
padded_observations, new_policy_and_value_net_params, rng=k2)
approx_kl = approximate_kl(log_probab_actions_new, log_probabs_traj,
reward_mask)
early_stopping = enable_early_stopping and approx_kl > 1.5 * target_kl
if early_stopping:
logging.vlog(
1, "Early stopping policy and value optimization at iter: %d, "
"with approx_kl: %0.2f", j, approx_kl)
# We don't return right-away, we want the below to execute on the last
# iteration.
t2 = time.time()
if (((j + 1) % print_every_optimizer_steps == 0) or
(j == num_optimizer_steps - 1) or early_stopping):
# Compute and log the loss.
(loss_combined, loss_ppo, loss_value, entropy_bonus) = (
combined_loss(
new_policy_and_value_net_params,
log_probabs_traj,
value_predictions_traj,
policy_and_value_net_apply,
padded_observations,
padded_actions,
padded_rewards,
reward_mask,
gamma=gamma,
lambda_=lambda_,
epsilon=epsilon_schedule,
c1=c1,
c2=c2,
rng=k3))
logging.vlog(1, "One Policy and Value grad desc took: %0.2f msec",
get_time(t, t2))
logging.vlog(
1, "Combined Loss(value, ppo, entropy_bonus) [%10.2f] ->"
" [%10.2f(%10.2f,%10.2f,%10.2f)]", cur_combined_loss, loss_combined,
loss_value, loss_ppo, entropy_bonus)
if early_stopping:
break
optimization_time = get_time(optimization_start_time)
logging.vlog(
1, "Total Combined Loss reduction [%0.2f]%%",
(100 * (cur_combined_loss - loss_combined) / np.abs(cur_combined_loss)))
# Save parameters every time we see the end of at least a fraction of batch
# number of trajectories that are done (not completed -- completed includes
# truncated and done).
# Also don't save too frequently, enforce a minimum gap.
# Or if this is the last iteration.
policy_save_start_time = time.time()
num_trajectories_done += num_done
if (((num_trajectories_done >= done_frac_for_policy_save * batch_size)
and (i - last_saved_at > eval_every_n)) or (i == epochs - 1)):
logging.vlog(1, "Epoch [% 6d] saving model.", i)
params_file = os.path.join(output_dir, "model-%06d.pkl" % i)
with gfile.GFile(params_file, "wb") as f:
pickle.dump(policy_and_value_net_params, f)
# Reset this number.
num_trajectories_done = 0
last_saved_at = i
policy_save_time = get_time(policy_save_start_time)
epoch_time = get_time(epoch_start_time)
logging.info(
"Epoch [% 6d], Reward[min, max, avg] [%5.2f,%5.2f,%5.2f], Combined"
" Loss(value, ppo, entropy) [%2.5f(%2.5f,%2.5f,%2.5f)]", i, min_reward,
max_reward, avg_reward, loss_combined, loss_value, loss_ppo,
entropy_bonus)
timing_dict = {
"epoch": epoch_time,
"policy_eval": policy_eval_time,
"trajectory_collection": trajectory_collection_time,
"padding": padding_time,
"log_prob_recompute": log_prob_recompute_time,
"loss_compute": loss_compute_time,
"optimization": optimization_time,
"policy_save": policy_save_time,
}
for k, v in timing_dict.items():
timing_sw.scalar("timing/%s" % k, v, step=i)
max_key_len = max(len(k) for k in timing_dict)
timing_info_list = [
"%s : % 10.2f" % (k.rjust(max_key_len + 1), v)
for k, v in sorted(timing_dict.items())
]
logging.info("Epoch [% 6d], Timings: \n%s", i, "\n".join(timing_info_list))
# Reset restore.
restore = False
# Flush summary writers once in a while.
if (i+1) % 1000 == 0 or i == epochs - 1:
train_sw.flush()
timing_sw.flush()
eval_sw.flush()
| 35.255666
| 81
| 0.649806
| 5,453
| 38,887
| 4.417202
| 0.122501
| 0.007639
| 0.037198
| 0.023291
| 0.369037
| 0.281064
| 0.213808
| 0.183875
| 0.138415
| 0.113962
| 0
| 0.018077
| 0.254584
| 38,887
| 1,102
| 82
| 35.287659
| 0.812882
| 0.323656
| 0
| 0.290221
| 0
| 0.006309
| 0.048533
| 0.002985
| 0
| 0
| 0
| 0.004537
| 0.044164
| 1
| 0.039432
| false
| 0
| 0.0347
| 0.003155
| 0.113565
| 0.004732
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bce5b76758741bd43e051c43114fa45c1ec64384
| 9,421
|
py
|
Python
|
models/cal.py
|
SudoRmFr/The-Nature-Conservancy-Fisheries-Monitoring
|
059f0063c1493c19b4f45fa27d13adaeb6b2b2d7
|
[
"MIT"
] | null | null | null |
models/cal.py
|
SudoRmFr/The-Nature-Conservancy-Fisheries-Monitoring
|
059f0063c1493c19b4f45fa27d13adaeb6b2b2d7
|
[
"MIT"
] | null | null | null |
models/cal.py
|
SudoRmFr/The-Nature-Conservancy-Fisheries-Monitoring
|
059f0063c1493c19b4f45fa27d13adaeb6b2b2d7
|
[
"MIT"
] | null | null | null |
"""
WS-DAN models
Hu et al.,
"See Better Before Looking Closer: Weakly Supervised Data Augmentation Network for Fine-Grained Visual Classification",
arXiv:1901.09891
"""
import logging
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import models.resnet as resnet
from models.inception import inception_v3, BasicConv2d
import models.coatnet as coatnet
import random
__all__ = ['WSDAN_CAL']
EPSILON = 1e-6
def weights_init_classifier(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.normal_(m.weight, std=0.001)
if m.bias:
nn.init.constant_(m.bias, 0.0)
def weights_init_kaiming(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_out')
nn.init.constant_(m.bias, 0.0)
elif classname.find('Conv') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')
if m.bias is not None:
nn.init.constant_(m.bias, 0.0)
elif classname.find('BatchNorm') != -1:
if m.affine:
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 0.0)
# Bilinear Attention Pooling
class BAP(nn.Module):
def __init__(self, pool='GAP'):
super(BAP, self).__init__()
assert pool in ['GAP', 'GMP']
if pool == 'GAP':
self.pool = None
else:
self.pool = nn.AdaptiveMaxPool2d(1)
def forward(self, features, attentions):
B, C, H, W = features.size()
_, M, AH, AW = attentions.size()
# match size
if AH != H or AW != W:
attentions = F.upsample_bilinear(attentions, size=(H, W))
# feature_matrix: (B, M, C) -> (B, M * C)
if self.pool is None:
feature_matrix = (torch.einsum('imjk,injk->imn', (attentions, features)) / float(H * W)).view(B, -1)
else:
feature_matrix = []
for i in range(M):
AiF = self.pool(features * attentions[:, i:i + 1, ...]).view(B, -1)
feature_matrix.append(AiF)
feature_matrix = torch.cat(feature_matrix, dim=1)
# sign-sqrt
feature_matrix_raw = torch.sign(feature_matrix) * torch.sqrt(torch.abs(feature_matrix) + EPSILON)
# l2 normalization along dimension M and C
feature_matrix = F.normalize(feature_matrix_raw, dim=-1)
if self.training:
fake_att = torch.zeros_like(attentions).uniform_(0, 2)
else:
fake_att = torch.ones_like(attentions)
counterfactual_feature = (torch.einsum('imjk,injk->imn', (fake_att, features)) / float(H * W)).view(B, -1)
counterfactual_feature = torch.sign(counterfactual_feature) * torch.sqrt(torch.abs(counterfactual_feature) + EPSILON)
counterfactual_feature = F.normalize(counterfactual_feature, dim=-1)
return feature_matrix, counterfactual_feature
def batch_augment(images, attention_map, mode='crop', theta=0.5, padding_ratio=0.1):
batches, _, imgH, imgW = images.size()
if mode == 'crop':
crop_images = []
for batch_index in range(batches):
atten_map = attention_map[batch_index:batch_index + 1]
if isinstance(theta, tuple):
theta_c = random.uniform(*theta) * atten_map.max()
else:
theta_c = theta * atten_map.max()
crop_mask = F.upsample_bilinear(atten_map, size=(imgH, imgW)) >= theta_c
nonzero_indices = torch.nonzero(crop_mask[0, 0, ...])
height_min = max(int(nonzero_indices[:, 0].min().item() - padding_ratio * imgH), 0)
height_max = min(int(nonzero_indices[:, 0].max().item() + padding_ratio * imgH), imgH)
width_min = max(int(nonzero_indices[:, 1].min().item() - padding_ratio * imgW), 0)
width_max = min(int(nonzero_indices[:, 1].max().item() + padding_ratio * imgW), imgW)
crop_images.append(
F.upsample_bilinear(images[batch_index:batch_index + 1, :, height_min:height_max, width_min:width_max],
size=(imgH, imgW)))
crop_images = torch.cat(crop_images, dim=0)
return crop_images
elif mode == 'drop':
drop_masks = []
for batch_index in range(batches):
atten_map = attention_map[batch_index:batch_index + 1]
if isinstance(theta, tuple):
theta_d = random.uniform(*theta) * atten_map.max()
else:
theta_d = theta * atten_map.max()
drop_masks.append(F.upsample_bilinear(atten_map, size=(imgH, imgW)) < theta_d)
drop_masks = torch.cat(drop_masks, dim=0)
drop_images = images * drop_masks.float()
return drop_images
else:
raise ValueError('Expected mode in [\'crop\', \'drop\'], but received unsupported augmentation method %s' % mode)
class WSDAN_CAL(nn.Module):
def __init__(self, num_classes, M=32, net='inception_mixed_6e', pretrained=False):
super(WSDAN_CAL, self).__init__()
self.num_classes = num_classes
self.M = M
self.net = net
# Network Initialization
if 'inception' in net:
if net == 'inception_mixed_6e':
self.features = inception_v3(pretrained=pretrained).get_features_mixed_6e()
self.num_features = 768
elif net == 'inception_mixed_7c':
self.features = inception_v3(pretrained=pretrained).get_features_mixed_7c()
self.num_features = 2048
else:
raise ValueError('Unsupported net: %s' % net)
elif 'resnet' in net:
self.features = getattr(resnet, net)(pretrained=pretrained).get_features()
self.num_features = 512 * self.features[-1][-1].expansion
elif 'coat' in net:
self.features = getattr(coatnet, net)().get_features()
if '0' in net or '1' in net:
self.num_features = 768
elif '2' in net:
self.num_features = 1026
elif '3' in net or '4' in net:
self.num_features = 1536
else:
raise ValueError('Not given valid CoAtNet size.')
else:
raise ValueError('Unsupported net: %s' % net)
# Attention Maps
self.attentions = BasicConv2d(self.num_features, self.M, kernel_size=1)
# Bilinear Attention Pooling
self.bap = BAP(pool='GAP')
# Classification Layer
self.fc = nn.Linear(self.M * self.num_features, self.num_classes, bias=False)
logging.info('WSDAN: using {} as feature extractor, num_classes: {}, num_attentions: {}'.format(net, self.num_classes, self.M))
def visualize(self, x):
batch_size = x.size(0)
# Feature Maps, Attention Maps and Feature Matrix
feature_maps = self.features(x)
if self.net != 'inception_mixed_7c':
attention_maps = self.attentions(feature_maps)
else:
attention_maps = feature_maps[:, :self.M, ...]
# print(feature_maps.shape)
# print(attention_maps.shape)
feature_matrix = self.bap(feature_maps, attention_maps)[0]
p = self.fc(feature_matrix * 100.)
return p, attention_maps
def forward(self, x):
batch_size = x.size(0)
# Feature Maps, Attention Maps and Feature Matrix
feature_maps = self.features(x)
if self.net != 'inception_mixed_7c':
attention_maps = self.attentions(feature_maps)
else:
attention_maps = feature_maps[:, :self.M, ...]
feature_matrix, feature_matrix_hat = self.bap(feature_maps, attention_maps)
# Classification
p = self.fc(feature_matrix * 100.)
# Generate Attention Map
if self.training:
# Randomly choose one of attention maps Ak
attention_map = []
for i in range(batch_size):
attention_weights = torch.sqrt(attention_maps[i].sum(dim=(1, 2)).detach() + EPSILON)
attention_weights = F.normalize(attention_weights, p=1, dim=0)
k_index = np.random.choice(self.M, 2, p=attention_weights.cpu().numpy())
attention_map.append(attention_maps[i, k_index, ...])
attention_map = torch.stack(attention_map) # (B, 2, H, W) - one for cropping, the other for dropping
else:
attention_map = torch.mean(attention_maps, dim=1, keepdim=True) # (B, 1, H, W)
return p, p - self.fc(feature_matrix_hat * 100.), feature_matrix, attention_map
def load_state_dict(self, state_dict, strict=True):
model_dict = self.state_dict()
pretrained_dict = {k: v for k, v in state_dict.items()
if k in model_dict and model_dict[k].size() == v.size()}
if len(pretrained_dict) == len(state_dict):
print('%s: All params loaded' % type(self).__name__)
else:
print('%s: Some params were not loaded:' % type(self).__name__)
not_loaded_keys = [k for k in state_dict.keys() if k not in pretrained_dict.keys()]
print(('%s, ' * (len(not_loaded_keys) - 1) + '%s') % tuple(not_loaded_keys))
model_dict.update(pretrained_dict)
super(WSDAN_CAL, self).load_state_dict(model_dict)
| 39.751055
| 135
| 0.604713
| 1,210
| 9,421
| 4.493388
| 0.203306
| 0.050212
| 0.022071
| 0.013794
| 0.309546
| 0.244804
| 0.22494
| 0.19588
| 0.181902
| 0.144749
| 0
| 0.018179
| 0.275979
| 9,421
| 236
| 136
| 39.919492
| 0.778918
| 0.071648
| 0
| 0.261628
| 0
| 0
| 0.055842
| 0
| 0
| 0
| 0
| 0
| 0.005814
| 1
| 0.052326
| false
| 0
| 0.052326
| 0
| 0.145349
| 0.017442
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bce6db15719682d4f24dcfd6984365aab4377658
| 1,526
|
py
|
Python
|
tests/walls/analytic/plates.py
|
noabauma/Mirheo
|
bf7979bfbbf402d33c26ac5dc879f880e78e7017
|
[
"MIT"
] | null | null | null |
tests/walls/analytic/plates.py
|
noabauma/Mirheo
|
bf7979bfbbf402d33c26ac5dc879f880e78e7017
|
[
"MIT"
] | null | null | null |
tests/walls/analytic/plates.py
|
noabauma/Mirheo
|
bf7979bfbbf402d33c26ac5dc879f880e78e7017
|
[
"MIT"
] | 1
|
2021-07-14T13:24:05.000Z
|
2021-07-14T13:24:05.000Z
|
#!/usr/bin/env python
import mirheo as mir
dt = 0.001
ranks = (1, 1, 1)
domain = (8, 16, 8)
force = (1.0, 0, 0)
density = 4
u = mir.Mirheo(ranks, domain, dt, debug_level=3, log_filename='log', no_splash=True)
pv = mir.ParticleVectors.ParticleVector('pv', mass = 1)
ic = mir.InitialConditions.Uniform(number_density=density)
u.registerParticleVector(pv=pv, ic=ic)
dpd = mir.Interactions.Pairwise('dpd', rc=1.0, kind="DPD", a=10.0, gamma=50.0, kBT=1.0, power=0.5)
u.registerInteraction(dpd)
plate_lo = mir.Walls.Plane("plate_lo", (0, 0, -1), (0, 0, 1))
plate_hi = mir.Walls.Plane("plate_hi", (0, 0, 1), (0, 0, domain[2] - 1))
u.registerWall(plate_lo, 0)
u.registerWall(plate_hi, 0)
vv = mir.Integrators.VelocityVerlet("vv")
frozen = u.makeFrozenWallParticles(pvName="plates", walls=[plate_lo, plate_hi], interactions=[dpd], integrator=vv, number_density=density)
u.setWall(plate_lo, pv)
u.setWall(plate_hi, pv)
for p in (pv, frozen):
u.setInteraction(dpd, p, pv)
vv_dp = mir.Integrators.VelocityVerlet_withConstForce("vv_dp", force)
u.registerIntegrator(vv_dp)
u.setIntegrator(vv_dp, pv)
sample_every = 2
dump_every = 1000
bin_size = (1., 1., 0.5)
u.registerPlugins(mir.Plugins.createDumpAverage('field', [pv], sample_every, dump_every, bin_size, ["velocities"], 'h5/solvent-'))
u.run(7002)
# nTEST: walls.analytic.plates
# cd walls/analytic
# rm -rf h5
# mir.run --runargs "-n 2" ./plates.py
# mir.avgh5 xy velocities h5/solvent-0000[4-7].h5 | awk '{print $1}' > profile.out.txt
| 27.745455
| 138
| 0.692005
| 248
| 1,526
| 4.153226
| 0.423387
| 0.01165
| 0.008738
| 0.040777
| 0.009709
| 0
| 0
| 0
| 0
| 0
| 0
| 0.053707
| 0.133683
| 1,526
| 54
| 139
| 28.259259
| 0.725416
| 0.130406
| 0
| 0
| 0
| 0
| 0.049962
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.033333
| 0
| 0.033333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bceaba57987d2038b2b3f984d0fa700547f6902c
| 12,224
|
py
|
Python
|
SIO_Code/SIO_coherence.py
|
mmstoll/Ocean569_Code
|
228cb719f3e82f187f704f343d3b3590a38236d7
|
[
"MIT"
] | null | null | null |
SIO_Code/SIO_coherence.py
|
mmstoll/Ocean569_Code
|
228cb719f3e82f187f704f343d3b3590a38236d7
|
[
"MIT"
] | null | null | null |
SIO_Code/SIO_coherence.py
|
mmstoll/Ocean569_Code
|
228cb719f3e82f187f704f343d3b3590a38236d7
|
[
"MIT"
] | null | null | null |
"""
Data: Temperature and Salinity time series from SIO Scripps Pier
Salinity: measured in PSU at the surface (~0.5m) and at depth (~5m)
Temp: measured in degrees C at the surface (~0.5m) and at depth (~5m)
- Timestamp included beginning in 1990
"""
# imports
import sys,os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import datetime
from scipy import signal
import scipy.stats as ss
import SIO_modules as SIO_mod
from importlib import reload
reload(SIO_mod)
# read in temp and sal files
sal_data = pd.read_csv('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/SIO_SALT_1916-201905.txt', sep='\t', skiprows = 27)
temp_data = pd.read_csv('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/SIO_TEMP_1916_201905.txt', sep='\t', skiprows = 26)
ENSO_data = pd.read_excel('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/NOAA_ENSO_data.xlsx')
ENSO_data_recent = pd.read_excel('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/NOAA_ENSO_recent_data.xlsx')
PDO_data = pd.read_csv('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/NOAA_PDO_data.csv', skiprows = 1)
path_out = '/Users/MMStoll/Python/Output/Ocean569_Output/SIO_Output/'
# convert year, month, day columns to single DATE column
sal_data['DATE'] = pd.to_datetime(sal_data[['YEAR', 'MONTH', 'DAY']])
temp_data['DATE'] = pd.to_datetime(temp_data[['YEAR', 'MONTH', 'DAY']])
ENSO_data_all = ENSO_data.append(ENSO_data_recent[323:], ignore_index = True)
PDO_data['DATE'] = pd.to_datetime(PDO_data['Date'], format='%Y%m')
# remove uncertain data(SURF_FLAG between 1 and 4), replace with NaN, then interpolate
for i in range(0,len(sal_data['SURF_SAL_PSU'])):
if (sal_data['SURF_FLAG'][i] >= 1) and (sal_data['SURF_FLAG'][i] <=4):
sal_data['SURF_SAL_PSU'][i] = np.nan
for i in range(0,len(temp_data['SURF_TEMP_C'])):
if (sal_data['SURF_FLAG'][i] >= 1) and (sal_data['SURF_FLAG'][i] <=4):
sal_data['SURF_SAL_PSU'][i] = np.nan
# interpolate missing temp and sal data
sal_data['SURF_SAL_PSU'] = sal_data['SURF_SAL_PSU'].interpolate()
temp_data['SURF_TEMP_C'] = temp_data['SURF_TEMP_C'].interpolate()
sal_data['SURF_SAL_PSU'][0] = sal_data['SURF_SAL_PSU'][1]
# remove the average from the sal and temp data and create new columns
sal_data['SURF_SAL_PSU_NOAVG'] = sal_data['SURF_SAL_PSU'] - sal_data['SURF_SAL_PSU'].mean()
temp_data['SURF_TEMP_C_NOAVG'] = temp_data['SURF_TEMP_C'] - temp_data['SURF_TEMP_C'].mean()
# remove trends from the sal and temp data and create new columns
sal_fit = np.polyfit(sal_data.index,sal_data['SURF_SAL_PSU_NOAVG'],1)
sal_fit_fn = np.poly1d(sal_fit)
temp_fit = np.polyfit(temp_data.index,temp_data['SURF_TEMP_C_NOAVG'],1)
temp_fit_fn = np.poly1d(temp_fit)
sal_fit_value = sal_fit_fn(sal_data.index)
temp_fit_value = temp_fit_fn(temp_data.index)
sal_data['SURF_SAL_PSU_DETREND'] = sal_data['SURF_SAL_PSU_NOAVG'] - sal_fit_value
temp_data['SURF_TEMP_C_DETREND'] = temp_data['SURF_TEMP_C_NOAVG'] - temp_fit_value
sal_tri = sal_data['SURF_SAL_PSU_DETREND'].rolling(center = True, window = 30, min_periods = 3, win_type = 'triang').mean()
temp_tri = temp_data['SURF_TEMP_C_DETREND'].rolling(center = True, window = 30, min_periods = 3, win_type = 'triang').mean()
# # 1. FFT the SIO Data
# t_freq,t_spec,t_spec_amp,t_fft,t_delt,t_freq_T,t_freq_nyquist = SIO_mod.var_fft(temp_data['SURF_TEMP_C_DETREND'])
# # 2. Apply butterworth filter to SIO data, with cutoff equal to nyquist freq of enso index
# fs = 1 # sampling frequency, once per day
# fc = 1/60 # cut-off frequency of the filter (cut off periods shorter than 60 days)
# w = fc / (fs / 2) #normalize the frequency
# b, a = signal.butter(4, w, 'low')
# temp_output = signal.filtfilt(b, a, t_spec)
# # 3. Inverse FFT of filtered SIO data
# temp_ifft = np.fft.irfft(temp_output,n=len(temp_output))
# # 4. Subsample new SIO time series with same delta t as ENSO index (once per month)
# temp_ifft_sampled = np.mean(temp_ifft[0:18750].reshape(-1, 30), axis=1)
# temp_ifft_len = temp_ifft_sampled[0:618]
# x = np.linspace(0,18770, 18770)
# plt.figure()
# plt.loglog(x, temp_ifft)
# plt.show()
# butterworth low pass filter for temperature and salinity
fs = 1 # sampling frequency, once per day
fc = 1/500 # cut-off frequency of the filter (cut off periods shorter than 500 days)
w = fc / (fs / 2) #normalize the frequency
b, a = signal.butter(4, w, 'low')
temp_output = signal.filtfilt(b, a, temp_tri)
sal_output = signal.filtfilt(b, a, sal_tri)
temp_sampled = np.mean(temp_output[0:37530].reshape(-1, 30), axis=1) #length = 1251
# create dataframe with spectra for each variable
spectra_temp_df = pd.DataFrame(columns = ['Temp_freq', 'Temp_spec', 'Temp_fft'])
spectra_sal_df = pd.DataFrame(columns = ['Sal_freq', 'Sal_spec', 'Sal_fft'])
spectra_PDO_df = pd.DataFrame(columns = ['PDO_freq', 'PDO_spec', 'PDO_fft'])
spectra_ENSO_df = pd.DataFrame(columns = ['ENSO_freq', 'ENSO_spec', 'ENSO_fft'])
# for coherence, start all records at 1916-01-01
# ENSO data [20:] 1916-09-01 onward, monthly// ends now, through 2019-05-01 [:1254]
# Temp data [10:] 1916-09-01 onward, daily // ends 2019-05-31
# PDO data [752:] 1916-09-01 onward, monthly// ends now, thorugh 2019-05-01 [:1985]
# compute spectral variables for each variable
for j in range(0,4):
data_sets = [temp_sampled, sal_data['SURF_SAL_PSU_DETREND'], PDO_data['Value'][743:], ENSO_data_all['VALUE'][14:]]
freq, spec, spec_amp, fft, delt, freq_T, freq_nyquist = SIO_mod.var_fft(data_sets[j])
if j == 0:
spectra_temp_df['Temp_freq'] = freq
spectra_temp_df['Temp_spec'] = spec
spectra_temp_df['Temp_fft'] = fft
if j == 1:
spectra_sal_df['Sal_freq'] = freq
spectra_sal_df['Sal_spec'] = spec
spectra_sal_df['Sal_fft'] = fft
if j == 2:
spectra_PDO_df['PDO_freq'] = freq
spectra_PDO_df['PDO_spec'] = spec
spectra_PDO_df['PDO_fft'] = fft
if j == 3:
spectra_ENSO_df['ENSO_freq'] = freq
spectra_ENSO_df['ENSO_spec'] = spec
spectra_ENSO_df['ENSO_fft'] = fft
def band_average(fft_var1,fft_var2,frequency,n_av):
# fft_var1 and fft_var2 are the inputs computed via fft
# they can be the same variable or different variables
# n_av is the number of bands to be used for smoothing (nice if it is an odd number)
# this function is limnited to 100,000 points but can easily be modified
nmax=100000
# T_length = (len(fft_var1) * 2 - 2)
# define some variables and arrays
n_spec=len(fft_var1)
n_av2=int(n_av//2+1) #number of band averages/2 + 1
spec_amp_av=np.zeros(nmax)
spec_phase_av=np.zeros(nmax)
freq_av=np.zeros(nmax)
# average the lowest frequency bands first (with half as many points in the average)
sum_low_amp=0.
sum_low_phase=0.
count=0
spectrum_amp=np.absolute(fft_var1*np.conj(fft_var2))#/(2.*np.pi*T_length*delt)
spectrum_phase=np.angle(fft_var1*np.conj(fft_var2),deg=True) #/(2.*np.pi*T_length*delt) don't know if I need the 2pi/Tdeltt here...
#
for i in range(0,n_av2):
sum_low_amp+=spectrum_amp[i]
sum_low_phase+=spectrum_phase[i]
spec_amp_av[0]=sum_low_amp/n_av2
spec_phase_av[0]=sum_low_phase/n_av
# compute the rest of the averages
for i in range(n_av2,n_spec-n_av,n_av):
count+=1
spec_amp_est=np.mean(spectrum_amp[i:i+n_av])
spec_phase_est=np.mean(spectrum_phase[i:i+n_av])
freq_est=frequency[i+n_av//2]
spec_amp_av[count]=spec_amp_est
spec_phase_av[count]=spec_phase_est
freq_av[count]=freq_est
# omega0 = 2.*np.pi/(T_length*delt)
# contract the arrays
spec_amp_av=spec_amp_av[0:count]
spec_phase_av=spec_phase_av[0:count]
freq_av=freq_av[0:count]
return spec_amp_av,spec_phase_av,freq_av,count
n_av = 5
# define terms to compute coherence between temp and ENSO
t_freq,t_spec,t_spec_amp,t_fft,t_delt,t_freq_T,t_freq_nyquist = SIO_mod.var_fft(temp_sampled) #take fft/compute spectra of temp_sampled at 30 day intervals
t_spec_b,t_phase_b,t_freq_av_b,count=band_average(t_fft,t_fft,t_freq,n_av)
e_spec_b,e_phase_b,e_freq_av_b,count=band_average(spectra_ENSO_df['ENSO_fft'],spectra_ENSO_df['ENSO_fft'],spectra_ENSO_df['ENSO_freq'],n_av)
e_fft_star = np.conj(spectra_ENSO_df['ENSO_fft'])
cospec_amp2,cospec_phase2,freq_av2,count2=band_average(t_fft,e_fft_star,spectra_ENSO_df['ENSO_freq'],n_av)
coh_sq2=cospec_amp2**2/(t_spec_b*e_spec_b)
# define colors
t_color = 'cadetblue'
s_color = 'darkslateblue'
p_color = 'seagreen'
e_color = 'steelblue'
freq_ann = 2*np.pi/365.25
# plot the coherence and phase between ENSO and temperature
tstr = 'SIO Temperature and ENSO Index \nCoherence and Phase'
im_name = 'SIO_TempENSO_CoherencePhase.jpg'
NR = 2; NC = 1
fig, axes = plt.subplots(nrows = NR,ncols=NC,figsize = (10,7))
axes[0].semilogx(freq_av2,coh_sq2, color = e_color)
axes[0].set_xlabel('$\omega$ (radians/day)')
axes[0].set_ylabel('Squared Coherence $\it{T}$-$\it{ENSO}$')
axes[0].axvline(t_freq_nyquist, color = 'black', linestyle = '--', alpha = 0.5)
axes[0].text(0.075, 0.1,'$\omega_{max}$', alpha = 0.5) #transform = ax.transAxes)
axes[0].axvline(t_freq_T, color = 'black', linestyle = '--', alpha = 0.5)
axes[0].text(0.00018, 0.1,'$\omega_o$', alpha = 0.5) #transform = ax.transAxes)
axes[0].axvline(freq_ann, color = 'black', linestyle = '--', alpha = 0.5)
axes[0].text(0.0098, 0.1, 'Annual', alpha = 0.5)#transform = ax.transAxes)
axes[1].semilogx(freq_av2, cospec_phase2, color = e_color)
axes[1].set_xlabel('$\omega$ (radians/day)')
axes[1].set_ylabel('Phase $\it{T}$-$\it{ENSO}$, degrees')
axes[1].axvline(t_freq_nyquist, color = 'black', linestyle = '--', alpha = 0.5)
axes[1].text(0.075, -110,'$\omega_{max}$', alpha = 0.5) #transform = ax.transAxes)
axes[1].axvline(t_freq_T, color = 'black', linestyle = '--', alpha = 0.5)
axes[1].text(0.00018, -110,'$\omega_o$', alpha = 0.5)#transform = ax.transAxes)
axes[1].axvline(freq_ann, color = 'black', linestyle = '--', alpha = 0.5)
axes[1].text(0.0098, -110, 'Annual', alpha = 0.5)#transform = ax.transAxes)
fig.suptitle(tstr)
# fig.tight_layout(pad=2.0)
plt.savefig(path_out + im_name)
plt.show()
n_av = 5
# define terms to compute coherence between temp and ENSO
#t_freq,t_spec,t_spec_amp,t_fft,t_delt,t_freq_T,t_freq_nyquist = SIO_mod.var_fft(temp_sampled) #take fft/compute spectra of temp_sampled at 30 day intervals
#t_spec_b,t_phase_b,t_freq_av_b,count=band_average(t_fft,t_fft,t_freq,n_av)
p_spec_b,p_phase_b,p_freq_av_b,count=band_average(spectra_PDO_df['PDO_fft'],spectra_PDO_df['PDO_fft'],spectra_PDO_df['PDO_freq'],n_av)
p_fft_star = np.conj(spectra_PDO_df['PDO_fft'])
cospec_amp2,cospec_phase2,freq_av2,count2=band_average(t_fft,p_fft_star,spectra_PDO_df['PDO_freq'],n_av)
coh_sq2=cospec_amp2**2/(t_spec_b*p_spec_b)
# plot the coherence and phase between ENSO and temperature
tstr = 'SIO Temperature and PDO Index \nCoherence and Phase'
im_name = 'SIO_TempPDO_CoherencePhase.jpg'
NR = 2; NC = 1
fig, axes = plt.subplots(nrows = NR,ncols=NC,figsize = (10,7))
axes[0].semilogx(freq_av2,coh_sq2, color = p_color)
axes[0].set_xlabel('$\omega$ (radians/day)')
axes[0].set_ylabel('Squared Coherence $\it{T}$-$\it{PDO}$')
axes[0].axvline(t_freq_nyquist, color = 'black', linestyle = '--', alpha = 0.5)
axes[0].text(0.075, 0.1,'$\omega_{max}$', alpha = 0.5) #transform = ax.transAxes)
axes[0].axvline(t_freq_T, color = 'black', linestyle = '--', alpha = 0.5)
axes[0].text(0.00018, 0.1,'$\omega_o$', alpha = 0.5) #transform = ax.transAxes)
axes[0].axvline(freq_ann, color = 'black', linestyle = '--', alpha = 0.5)
axes[0].text(0.0098, 0.1, 'Annual', alpha = 0.5)#transform = ax.transAxes)
axes[1].semilogx(freq_av2, cospec_phase2, color = p_color)
axes[1].set_xlabel('$\omega$ (radians/day)')
axes[1].set_ylabel('Phase $\it{T}$-$\it{PDO}$, degrees')
axes[1].axvline(t_freq_nyquist, color = 'black', linestyle = '--', alpha = 0.5)
axes[1].text(0.075, -110,'$\omega_{max}$', alpha = 0.5) #transform = ax.transAxes)
axes[1].axvline(t_freq_T, color = 'black', linestyle = '--', alpha = 0.5)
axes[1].text(0.00018, -110,'$\omega_o$', alpha = 0.5)#transform = ax.transAxes)
axes[1].axvline(freq_ann, color = 'black', linestyle = '--', alpha = 0.5)
axes[1].text(0.0098, -110, 'Annual', alpha = 0.5)#transform = ax.transAxes)
fig.suptitle(tstr)
# fig.tight_layout(pad=2.0)
plt.savefig(path_out + im_name)
plt.show()
| 47.015385
| 156
| 0.724967
| 2,196
| 12,224
| 3.777322
| 0.161202
| 0.029898
| 0.020253
| 0.025316
| 0.588427
| 0.548885
| 0.512236
| 0.466426
| 0.466426
| 0.438457
| 0
| 0.04548
| 0.120419
| 12,224
| 259
| 157
| 47.196911
| 0.726004
| 0.286486
| 0
| 0.263473
| 0
| 0
| 0.209057
| 0.057679
| 0
| 0
| 0
| 0
| 0
| 1
| 0.005988
| false
| 0
| 0.053892
| 0
| 0.065868
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bceb90c866742318115d3897625ab3cd17dad9ae
| 1,782
|
py
|
Python
|
abfs/group_data_split.py
|
rcdilorenzo/abfs
|
a897d00a4589a9412a9b9e737f8db91df008fc26
|
[
"MIT"
] | 7
|
2019-03-13T17:22:50.000Z
|
2022-01-09T09:03:16.000Z
|
abfs/group_data_split.py
|
rcdilorenzo/abfs
|
a897d00a4589a9412a9b9e737f8db91df008fc26
|
[
"MIT"
] | 1
|
2019-08-01T23:42:09.000Z
|
2019-08-02T16:14:31.000Z
|
abfs/group_data_split.py
|
rcdilorenzo/abfs
|
a897d00a4589a9412a9b9e737f8db91df008fc26
|
[
"MIT"
] | 2
|
2020-09-12T06:33:16.000Z
|
2021-01-01T01:05:48.000Z
|
from collections import namedtuple as Struct
from sklearn.model_selection import GroupShuffleSplit, ShuffleSplit
DataSplitConfig = Struct('DataSplitConfig', ['validation_size', 'test_size', 'random_seed'])
DEFAULT_SPLIT_CONFIG = DataSplitConfig(0.2, 0.2, 1337)
class GroupDataSplit():
def __init__(self, df, key, config=DEFAULT_SPLIT_CONFIG):
self.config = config
self.key = key
self._df = df
self._split_data()
@property
def total(self):
"""Total records in the data frame"""
return len(self._df)
def train_df(self):
"""Randomized train data frame"""
return self._train_df.sample(frac=1).reset_index(drop=True)
@property
def val_df(self):
"""Validation data frame"""
return self._val_df
@property
def test_df(self):
"""Test data frame"""
return self._test_df
@property
def test_split(self):
return GroupShuffleSplit(test_size=self.config.test_size,
random_state=self.config.random_seed).split
@property
def val_split(self):
val_size = self.config.validation_size / (1 - self.config.test_size)
return GroupShuffleSplit(test_size=val_size,
random_state=self.config.random_seed).split
def _split_data(self):
rem_indices, test_indices = next(
self.test_split(self._df, groups=self._df[self.key])
)
rem_df = self._df.iloc[rem_indices]
train_indices, val_indices = next(
self.val_split(rem_df, groups=rem_df[self.key])
)
self._test_df = self._df.iloc[test_indices]
self._val_df = rem_df.iloc[val_indices]
self._train_df = rem_df.iloc[train_indices]
| 30.724138
| 92
| 0.640292
| 225
| 1,782
| 4.777778
| 0.244444
| 0.044651
| 0.055814
| 0.053023
| 0.074419
| 0.074419
| 0.074419
| 0.074419
| 0
| 0
| 0
| 0.007559
| 0.257576
| 1,782
| 57
| 93
| 31.263158
| 0.804989
| 0.054433
| 0
| 0.170732
| 0
| 0
| 0.030048
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.195122
| false
| 0
| 0.04878
| 0.02439
| 0.414634
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bcef12fc47d4a9fcc176c51b16eef241913a4acb
| 2,989
|
py
|
Python
|
mmcls/models/utils/se_layer.py
|
YuxinZou/mmclassification
|
2037260ea6c98a3b115e97727e1151a1c2c32f7a
|
[
"Apache-2.0"
] | 1,190
|
2020-07-10T01:16:01.000Z
|
2022-03-31T09:48:38.000Z
|
mmcls/models/utils/se_layer.py
|
YuxinZou/mmclassification
|
2037260ea6c98a3b115e97727e1151a1c2c32f7a
|
[
"Apache-2.0"
] | 702
|
2020-07-13T13:31:33.000Z
|
2022-03-31T06:48:04.000Z
|
mmcls/models/utils/se_layer.py
|
YuxinZou/mmclassification
|
2037260ea6c98a3b115e97727e1151a1c2c32f7a
|
[
"Apache-2.0"
] | 502
|
2020-07-10T02:40:55.000Z
|
2022-03-31T02:07:09.000Z
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule
from .make_divisible import make_divisible
class SELayer(BaseModule):
"""Squeeze-and-Excitation Module.
Args:
channels (int): The input (and output) channels of the SE layer.
squeeze_channels (None or int): The intermediate channel number of
SElayer. Default: None, means the value of ``squeeze_channels``
is ``make_divisible(channels // ratio, divisor)``.
ratio (int): Squeeze ratio in SELayer, the intermediate channel will
be ``make_divisible(channels // ratio, divisor)``. Only used when
``squeeze_channels`` is None. Default: 16.
divisor(int): The divisor to true divide the channel number. Only
used when ``squeeze_channels`` is None. Default: 8.
conv_cfg (None or dict): Config dict for convolution layer. Default:
None, which means using conv2d.
act_cfg (dict or Sequence[dict]): Config dict for activation layer.
If act_cfg is a dict, two activation layers will be configurated
by this dict. If act_cfg is a sequence of dicts, the first
activation layer will be configurated by the first dict and the
second activation layer will be configurated by the second dict.
Default: (dict(type='ReLU'), dict(type='Sigmoid'))
"""
def __init__(self,
channels,
squeeze_channels=None,
ratio=16,
divisor=8,
bias='auto',
conv_cfg=None,
act_cfg=(dict(type='ReLU'), dict(type='Sigmoid')),
init_cfg=None):
super(SELayer, self).__init__(init_cfg)
if isinstance(act_cfg, dict):
act_cfg = (act_cfg, act_cfg)
assert len(act_cfg) == 2
assert mmcv.is_tuple_of(act_cfg, dict)
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
if squeeze_channels is None:
squeeze_channels = make_divisible(channels // ratio, divisor)
assert isinstance(squeeze_channels, int) and squeeze_channels > 0, \
'"squeeze_channels" should be a positive integer, but get ' + \
f'{squeeze_channels} instead.'
self.conv1 = ConvModule(
in_channels=channels,
out_channels=squeeze_channels,
kernel_size=1,
stride=1,
bias=bias,
conv_cfg=conv_cfg,
act_cfg=act_cfg[0])
self.conv2 = ConvModule(
in_channels=squeeze_channels,
out_channels=channels,
kernel_size=1,
stride=1,
bias=bias,
conv_cfg=conv_cfg,
act_cfg=act_cfg[1])
def forward(self, x):
out = self.global_avgpool(x)
out = self.conv1(out)
out = self.conv2(out)
return x * out
| 39.853333
| 77
| 0.603546
| 366
| 2,989
| 4.770492
| 0.306011
| 0.04811
| 0.030928
| 0.027491
| 0.266896
| 0.189003
| 0.158076
| 0.114548
| 0.068729
| 0.068729
| 0
| 0.010219
| 0.312479
| 2,989
| 74
| 78
| 40.391892
| 0.839416
| 0.405487
| 0
| 0.170213
| 0
| 0
| 0.058442
| 0
| 0
| 0
| 0
| 0
| 0.06383
| 1
| 0.042553
| false
| 0
| 0.106383
| 0
| 0.191489
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bcef9b7b7442550783a878ff705f2b12e8b4982b
| 605
|
py
|
Python
|
instagram/admin.py
|
James19stack/instagram-copy_cat
|
996a8678cec84a05e97d803356194cd112ee53e6
|
[
"MIT"
] | null | null | null |
instagram/admin.py
|
James19stack/instagram-copy_cat
|
996a8678cec84a05e97d803356194cd112ee53e6
|
[
"MIT"
] | 7
|
2021-04-08T21:26:44.000Z
|
2022-03-12T00:40:52.000Z
|
instagram/admin.py
|
James19stack/instagram-copy_cat
|
996a8678cec84a05e97d803356194cd112ee53e6
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Images,Comments,Profile
# Register your models here.
class CommentInline(admin.TabularInline):
model=Comments
extra=3
class ImageInline(admin.ModelAdmin):
fieldsets=[
(None,{'fields':['image']}),
(None,{'fields':['image_name']}),
(None,{'fields':['image_caption']}),
(None,{'fields':['likes']}),
]
inlines=[CommentInline]
admin.site.site_header='InstaPost Admin'
admin.site.site_title='InstaPost Admin Dashboard'
admin.site.register(Images,ImageInline)
admin.site.register(Profile)
| 24.2
| 49
| 0.676033
| 66
| 605
| 6.136364
| 0.5
| 0.098765
| 0.111111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001992
| 0.170248
| 605
| 24
| 50
| 25.208333
| 0.804781
| 0.042975
| 0
| 0
| 0
| 0
| 0.168403
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.117647
| 0
| 0.470588
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bcf0d2ce383dabf5df66eb0e8657dcde75189cda
| 8,894
|
py
|
Python
|
core/recognizer.py
|
awen1988/yry
|
b65ccd7062d60f605fc978a87e060d0015cf1d4c
|
[
"Apache-2.0"
] | 129
|
2017-11-14T07:20:33.000Z
|
2021-06-18T07:07:18.000Z
|
core/recognizer.py
|
awen1988/yry
|
b65ccd7062d60f605fc978a87e060d0015cf1d4c
|
[
"Apache-2.0"
] | 10
|
2018-04-18T08:01:09.000Z
|
2018-08-17T02:57:33.000Z
|
core/recognizer.py
|
awen1988/yry
|
b65ccd7062d60f605fc978a87e060d0015cf1d4c
|
[
"Apache-2.0"
] | 35
|
2017-11-14T07:17:00.000Z
|
2021-01-21T08:10:07.000Z
|
"""
recognize face landmark
"""
import json
import os
import requests
import numpy as np
FACE_POINTS = list(range(0, 83))
JAW_POINTS = list(range(0, 19))
LEFT_EYE_POINTS = list(range(19, 29))
LEFT_BROW_POINTS = list(range(29, 37))
MOUTH_POINTS = list(range(37, 55))
NOSE_POINTS = list(range(55, 65))
RIGHT_EYE_POINTS = list(range(65, 75))
RIGHT_BROW_POINTS = list(range(75, 83))
LEFT_FACE = list(range(0, 10)) + list(range(29, 34))
RIGHT_FACE = list(range(9, 19)) + list(range(75, 80))
JAW_END = 19
FACE_START = 0
FACE_END = 83
OVERLAY_POINTS = [
LEFT_FACE,
RIGHT_FACE,
JAW_POINTS,
]
def face_points(image):
points = []
txt = image + '.txt'
if os.path.isfile(txt):
with open(txt) as file:
for line in file:
points = line
elif os.path.isfile(image):
points = landmarks_by_face__(image)
with open(txt, 'w') as file:
file.write(str(points))
faces = json.loads(points)['faces']
if len(faces) == 0:
err = 404
else:
err = 0
matrix_list = np.matrix(matrix_marks(faces[0]['landmark']))
point_list = []
for p in matrix_list.tolist():
point_list.append((int(p[0]), int(p[1])))
return matrix_list, point_list, err
def landmarks_by_face__(image):
url = 'https://api-cn.faceplusplus.com/facepp/v3/detect'
params = {
'api_key': 'ZBbrYf41rX5AJ2mVDEcdIERF7HOlpG6t',
'api_secret': 'G5qlzXk7Wd9iE6MlORYPRulJ2lihdt9U',
'return_landmark': 1,
}
file = {'image_file': open(image, 'rb')}
r = requests.post(url=url, files=file, data=params)
if r.status_code == requests.codes.ok:
return r.content.decode('utf-8')
else:
return r.content
def matrix_rectangle(left, top, width, height):
pointer = [
(left, top),
(left + width / 2, top),
(left + width - 1, top),
(left + width - 1, top + height / 2),
(left, top + height / 2),
(left, top + height - 1),
(left + width / 2, top + height - 1),
(left + width - 1, top + height - 1)
]
return pointer
def matrix_marks(res):
pointer = [
[res['contour_left1']['x'], res['contour_left1']['y']],
[res['contour_left2']['x'], res['contour_left2']['y']],
[res['contour_left3']['x'], res['contour_left3']['y']],
[res['contour_left4']['x'], res['contour_left4']['y']],
[res['contour_left5']['x'], res['contour_left5']['y']],
[res['contour_left6']['x'], res['contour_left6']['y']],
[res['contour_left7']['x'], res['contour_left7']['y']],
[res['contour_left8']['x'], res['contour_left8']['y']],
[res['contour_left9']['x'], res['contour_left9']['y']],
[res['contour_chin']['x'], res['contour_chin']['y']],
[res['contour_right9']['x'], res['contour_right9']['y']],
[res['contour_right8']['x'], res['contour_right8']['y']],
[res['contour_right7']['x'], res['contour_right7']['y']],
[res['contour_right6']['x'], res['contour_right6']['y']],
[res['contour_right5']['x'], res['contour_right5']['y']],
[res['contour_right4']['x'], res['contour_right4']['y']],
[res['contour_right3']['x'], res['contour_right3']['y']],
[res['contour_right2']['x'], res['contour_right2']['y']],
[res['contour_right1']['x'], res['contour_right1']['y']],
[res['left_eye_bottom']['x'], res['left_eye_bottom']['y']],
[res['left_eye_center']['x'], res['left_eye_center']['y']],
[res['left_eye_left_corner']['x'], res['left_eye_left_corner']['y']],
[res['left_eye_lower_left_quarter']['x'], res['left_eye_lower_left_quarter']['y']],
[res['left_eye_lower_right_quarter']['x'], res['left_eye_lower_right_quarter']['y']],
[res['left_eye_pupil']['x'], res['left_eye_pupil']['y']],
[res['left_eye_right_corner']['x'], res['left_eye_right_corner']['y']],
[res['left_eye_top']['x'], res['left_eye_top']['y']],
[res['left_eye_upper_left_quarter']['x'], res['left_eye_upper_left_quarter']['y']],
[res['left_eye_upper_right_quarter']['x'], res['left_eye_upper_right_quarter']['y']],
[res['left_eyebrow_left_corner']['x'], res['left_eyebrow_left_corner']['y']],
[res['left_eyebrow_upper_left_quarter']['x'], res['left_eyebrow_upper_left_quarter']['y']],
[res['left_eyebrow_upper_middle']['x'], res['left_eyebrow_upper_middle']['y']],
[res['left_eyebrow_upper_right_quarter']['x'], res['left_eyebrow_upper_right_quarter']['y']],
[res['left_eyebrow_right_corner']['x'], res['left_eyebrow_right_corner']['y']],
[res['left_eyebrow_lower_left_quarter']['x'], res['left_eyebrow_lower_left_quarter']['y']],
[res['left_eyebrow_lower_middle']['x'], res['left_eyebrow_lower_middle']['y']],
[res['left_eyebrow_lower_right_quarter']['x'], res['left_eyebrow_lower_right_quarter']['y']],
[res['mouth_left_corner']['x'], res['mouth_left_corner']['y']],
[res['mouth_lower_lip_bottom']['x'], res['mouth_lower_lip_bottom']['y']],
[res['mouth_lower_lip_left_contour1']['x'], res['mouth_lower_lip_left_contour1']['y']],
[res['mouth_lower_lip_left_contour2']['x'], res['mouth_lower_lip_left_contour2']['y']],
[res['mouth_lower_lip_left_contour3']['x'], res['mouth_lower_lip_left_contour3']['y']],
[res['mouth_lower_lip_right_contour1']['x'], res['mouth_lower_lip_right_contour1']['y']],
[res['mouth_lower_lip_right_contour2']['x'], res['mouth_lower_lip_right_contour2']['y']],
[res['mouth_lower_lip_right_contour3']['x'], res['mouth_lower_lip_right_contour3']['y']],
[res['mouth_lower_lip_top']['x'], res['mouth_lower_lip_top']['y']],
[res['mouth_right_corner']['x'], res['mouth_right_corner']['y']],
[res['mouth_upper_lip_bottom']['x'], res['mouth_upper_lip_bottom']['y']],
[res['mouth_upper_lip_left_contour1']['x'], res['mouth_upper_lip_left_contour1']['y']],
[res['mouth_upper_lip_left_contour2']['x'], res['mouth_upper_lip_left_contour2']['y']],
[res['mouth_upper_lip_left_contour3']['x'], res['mouth_upper_lip_left_contour3']['y']],
[res['mouth_upper_lip_right_contour1']['x'], res['mouth_upper_lip_right_contour1']['y']],
[res['mouth_upper_lip_right_contour2']['x'], res['mouth_upper_lip_right_contour2']['y']],
[res['mouth_upper_lip_right_contour3']['x'], res['mouth_upper_lip_right_contour3']['y']],
[res['mouth_upper_lip_top']['x'], res['mouth_upper_lip_top']['y']],
[res['nose_contour_left1']['x'], res['nose_contour_left1']['y']],
[res['nose_contour_left2']['x'], res['nose_contour_left2']['y']],
[res['nose_contour_left3']['x'], res['nose_contour_left3']['y']],
[res['nose_contour_lower_middle']['x'], res['nose_contour_lower_middle']['y']],
[res['nose_contour_right1']['x'], res['nose_contour_right1']['y']],
[res['nose_contour_right2']['x'], res['nose_contour_right2']['y']],
[res['nose_contour_right3']['x'], res['nose_contour_right3']['y']],
[res['nose_left']['x'], res['nose_left']['y']],
[res['nose_right']['x'], res['nose_right']['y']],
[res['nose_tip']['x'], res['nose_tip']['y']],
[res['right_eye_bottom']['x'], res['right_eye_bottom']['y']],
[res['right_eye_center']['x'], res['right_eye_center']['y']],
[res['right_eye_left_corner']['x'], res['right_eye_left_corner']['y']],
[res['right_eye_lower_left_quarter']['x'], res['right_eye_lower_left_quarter']['y']],
[res['right_eye_lower_right_quarter']['x'], res['right_eye_lower_right_quarter']['y']],
[res['right_eye_pupil']['x'], res['right_eye_pupil']['y']],
[res['right_eye_right_corner']['x'], res['right_eye_right_corner']['y']],
[res['right_eye_top']['x'], res['right_eye_top']['y']],
[res['right_eye_upper_left_quarter']['x'], res['right_eye_upper_left_quarter']['y']],
[res['right_eye_upper_right_quarter']['x'], res['right_eye_upper_right_quarter']['y']],
[res['right_eyebrow_left_corner']['x'], res['right_eyebrow_left_corner']['y']],
[res['right_eyebrow_upper_left_quarter']['x'], res['right_eyebrow_upper_left_quarter']['y']],
[res['right_eyebrow_upper_middle']['x'], res['right_eyebrow_upper_middle']['y']],
[res['right_eyebrow_upper_right_quarter']['x'], res['right_eyebrow_upper_right_quarter']['y']],
[res['right_eyebrow_right_corner']['x'], res['right_eyebrow_right_corner']['y']],
[res['right_eyebrow_lower_left_quarter']['x'], res['right_eyebrow_lower_left_quarter']['y']],
[res['right_eyebrow_lower_middle']['x'], res['right_eyebrow_lower_middle']['y']],
[res['right_eyebrow_lower_right_quarter']['x'], res['right_eyebrow_lower_right_quarter']['y']],
]
return pointer
| 47.308511
| 103
| 0.619744
| 1,233
| 8,894
| 4.082725
| 0.111922
| 0.065952
| 0.03973
| 0.050854
| 0.523441
| 0.369289
| 0.025824
| 0
| 0
| 0
| 0
| 0.020574
| 0.158421
| 8,894
| 187
| 104
| 47.561497
| 0.651971
| 0.002586
| 0
| 0.03871
| 0
| 0
| 0.438565
| 0.271014
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025806
| false
| 0
| 0.025806
| 0
| 0.083871
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bcf263d3ef948ac8eb8afa3a601107434d608075
| 1,646
|
py
|
Python
|
magvar.py
|
rafidmorshedi/mag-dec-api
|
5daff929be8cad902f8db331090c0ed77f7bdef9
|
[
"MIT"
] | null | null | null |
magvar.py
|
rafidmorshedi/mag-dec-api
|
5daff929be8cad902f8db331090c0ed77f7bdef9
|
[
"MIT"
] | null | null | null |
magvar.py
|
rafidmorshedi/mag-dec-api
|
5daff929be8cad902f8db331090c0ed77f7bdef9
|
[
"MIT"
] | null | null | null |
import requests
import time
from bs4 import BeautifulSoup
import re
def decdeg2dms(dd):
negative = dd < 0
dd = abs(dd)
minutes,seconds = divmod(dd*3600,60)
degrees,minutes = divmod(minutes,60)
if negative:
if degrees > 0:
degrees = -degrees
elif minutes > 0:
minutes = -minutes
else:
seconds = -seconds
return (degrees,minutes,seconds)
def get_mag_var(lat, lon, year, month, day, elev=0):
"""Returns the magnetic variation at a particulat point on earth.
Keyword Arguments
lat -- latitude (e.g. -180.6 deg)
lon -- longitude (e.g. -34.6 deg)
elev -- elevation in km (default 0.0)
year -- year (e.g. 2015)
month -- month (e.g. 11)
day -- day (e.g. 30)
Returns
float -- magnetic variation
"""
(latd, latm, lats) = decdeg2dms(lat)
(lond, lonm, lons) = decdeg2dms(lon)
payload = {'latd': latd,'latm':latm,'lats':lats,'lond':lond,'lonm':lonm,
'lons':lons,'elev':elev,'year':year,'month':month,'day':day,'Ein':'D'}
url = 'http://www.ga.gov.au/oracle/cgi/geoAGRF.sh'
# Sleep to avoid spamming server
time.sleep(1)
r = requests.get(url, params=payload)
if r.status_code == 200:
c = r.content
soup = BeautifulSoup(c,'html.parser')
deg_text = soup.find_all('b')[-1].text.strip()
# strip out the junk so we have a number
# Strip spaces before the search
deg_text = deg_text.replace(" ","")
deg = re.search(r'D=(.*?)deg', deg_text).group(1)
deg = float(deg)
return deg
else:
return 'something went wrong'
| 29.392857
| 76
| 0.59113
| 230
| 1,646
| 4.195652
| 0.495652
| 0.010363
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.032258
| 0.265492
| 1,646
| 56
| 77
| 29.392857
| 0.765922
| 0.241191
| 0
| 0.057143
| 0
| 0
| 0.10741
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057143
| false
| 0
| 0.114286
| 0
| 0.257143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bcf492dcec78d6b358e2430eb0bbca995c069560
| 5,054
|
py
|
Python
|
Deep-Learning/Crowd-Count/src/data_preprocess.py
|
sadbb/CVCode
|
c7c8b527af786d8f113122231e6296987b242b59
|
[
"Apache-2.0"
] | 1
|
2018-11-18T05:43:05.000Z
|
2018-11-18T05:43:05.000Z
|
Deep-Learning/Crowd-Count/src/data_preprocess.py
|
sadbb/CVCode
|
c7c8b527af786d8f113122231e6296987b242b59
|
[
"Apache-2.0"
] | null | null | null |
Deep-Learning/Crowd-Count/src/data_preprocess.py
|
sadbb/CVCode
|
c7c8b527af786d8f113122231e6296987b242b59
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
# ------------------------
# written by Songjian Chen
# 2018-10
# ------------------------
import os
import skimage.io
from skimage.color import rgb2gray
import skimage.transform
from scipy.io import loadmat
import numpy as np
import cv2
import math
import warnings
import random
import torch
import matplotlib.pyplot as plt
warnings.filterwarnings("ignore")
def gaussian_kernel(image, points):
image_density = np.zeros(image.shape)
h, w = image_density.shape
if len(points) == 0:
return image_density
for j in range(len(points)):
f_sz = 15
sigma = 4.0
# convert x, y to int
x = min(w, max(0, int(points[j, 0])))
y = min(h, max(0, int(points[j, 1])))
gap = f_sz // 2
x1 = x - gap if x - gap > 0 else 0
x2 = x + gap if x + gap < w else w - 1
y1 = y - gap if y - gap > 0 else 0
y2 = y + gap if y + gap < h else h - 1
# generate 2d gaussian kernel
kx = cv2.getGaussianKernel(y2 - y1 + 1, sigma=sigma)
ky = cv2.getGaussianKernel(x2 - x1 + 1, sigma=sigma)
gaussian = np.multiply(kx, ky.T)
image_density[y1:y2 + 1, x1:x2 + 1] += gaussian
return image_density
def extract_data(mode="train", patch_number=9, part="A"):
num_images = 300 if mode=="train" else 182
# original path
dataset_path = "../data/original/part_{0}_final/".format(part)
mode_data = os.path.join(dataset_path, "{0}_data".format(mode))
mode_images = os.path.join(mode_data, "images")
mode_ground_truth = os.path.join(mode_data, "ground_truth")
# preprocessed path
preprocessed_mode = "../data/preprocessed/{0}/".format(mode)
preprocessed_mode_density = "../data/preprocessed/{0}_density/".format(mode)
if not os.path.exists("../data/preprocessed/"):
os.mkdir("../data/preprocessed/")
if not os.path.exists(preprocessed_mode):
os.mkdir(preprocessed_mode)
if not os.path.exists(preprocessed_mode_density):
os.mkdir(preprocessed_mode_density)
# convert images to gray-density for each
for index in range(1, num_images + 1):
if index % 10 == 9:
print("{0} images have been processed".format(index + 1))
image_path = os.path.join(mode_images, "IMG_{0}.jpg".format(index))
ground_truth_path = os.path.join(mode_ground_truth, "GT_IMG_{0}.mat".format(index))
image = skimage.io.imread(image_path)
# convert to gray map
if image.shape[-1] == 3:
image = rgb2gray(image)
mat = loadmat(ground_truth_path)
image_info = mat["image_info"]
ann_points = image_info[0][0][0][0][0]
# gaussian transfer
image_density = gaussian_kernel(image, ann_points)
# split image into 9 patches where patch is 1/4 size
h, w = image.shape
w_block = math.floor(w / 8)
h_block = math.floor(h / 8)
for j in range(patch_number):
x = math.floor((w - 2 * w_block) * random.random() + w_block)
y = math.floor((h - 2 * h_block) * random.random() + h_block)
image_sample = image[y - h_block:y + h_block, x - w_block:x + w_block]
image_density_sample = image_density[y - h_block:y + h_block, x - w_block:x + w_block]
img_idx = "{0}_{1}".format(index, j)
np.save(os.path.join(preprocessed_mode_density, "{0}.npy".format(img_idx)), image_density_sample)
skimage.io.imsave(os.path.join(preprocessed_mode, "{0}.jpg".format(img_idx)), image_sample)
def extract_test_data(part="A"):
num_images = 183 if part == "A" else 317
test_data_path = "../data/original/part_{part}_final/test_data/images".format(part=part)
test_ground_path = "../data/original/part_{part}_final/test_data/ground_truth".format(part=part)
test_density_path = "../data/preprocessed/test_density"
print("create directory........")
if not os.path.exists(test_density_path):
os.mkdir(test_density_path)
print("begin to preprocess test data........")
for index in range(1, num_images):
if index % 10 == 0:
print("{num} images are done".format(num=index))
image_path = os.path.join(test_data_path, "IMG_{0}.jpg".format(index))
ground_truth_path = os.path.join(test_ground_path, "GT_IMG_{0}.mat".format(index))
# load mat and image
image = skimage.io.imread(image_path)
if image.shape[-1] == 3:
image = rgb2gray(image)
mat = loadmat(ground_truth_path)
image_info = mat["image_info"]
# ann_points: points pixels mean people
# number: number of people in the image
ann_points = image_info[0][0][0][0][0]
number = image_info[0][0][0][0][1]
h = float(image.shape[0])
w = float(image.shape[1])
# convert images to density
image_density = gaussian_kernel(image, ann_points)
np.save(os.path.join(test_density_path, "IMG_{0}.npy".format(index)), image_density)
extract_test_data()
| 38.287879
| 109
| 0.623467
| 740
| 5,054
| 4.07973
| 0.201351
| 0.027824
| 0.033124
| 0.006625
| 0.333885
| 0.251408
| 0.20636
| 0.141438
| 0.116926
| 0.10467
| 0
| 0.02869
| 0.234468
| 5,054
| 131
| 110
| 38.580153
| 0.751615
| 0.085675
| 0
| 0.166667
| 0
| 0
| 0.115577
| 0.059309
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03125
| false
| 0
| 0.125
| 0
| 0.177083
| 0.041667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bcf6d0a350e5ace0a39c3f35ff8dbbc6f050f1f4
| 6,407
|
py
|
Python
|
shellmacros/istr.py
|
duaneellissd/shellmacros
|
33b5cd1a8794e35a9540f78dca066b8dfc289c97
|
[
"BSD-2-Clause"
] | null | null | null |
shellmacros/istr.py
|
duaneellissd/shellmacros
|
33b5cd1a8794e35a9540f78dca066b8dfc289c97
|
[
"BSD-2-Clause"
] | null | null | null |
shellmacros/istr.py
|
duaneellissd/shellmacros
|
33b5cd1a8794e35a9540f78dca066b8dfc289c97
|
[
"BSD-2-Clause"
] | null | null | null |
'''
Created on Dec 27, 2019
@author: duane
'''
DOLLAR = ord('$')
LBRACE = ord('{')
RBRACE = ord('}')
LPAREN = ord('(')
RPAREN = ord(')')
class IStrFindResult(object):
OK = 0
NOTFOUND = 1
SYNTAX = 2
def __init__(self):
self.result = IStrFindResult.SYNTAX
self.lhs = 0
self.rhs = 0
self.name = None
class IStr(list):
'''
This closely models a basic ASCII string
Note: Unicode strings are expressly not supported here.
The problem this addresses occurs during macro processing.
Sometimes macros are defined externally
Other times, macros are fully defined with a package.
Often macros need to be resolved either partially or fully
When a macro is only external - they get in the way of resolving other macros
To work around that, we convert the string into an array of integers
Then for every macro byte that is 'external' we add 0x100
This makes the byte 'non-matchable'
Later, when we convert the resolved string into we strip the 0x100.
'''
IGNORE = 0x100
def __init__(self, s):
'''
Constructor
'''
# convert to integers
list.__init__(self, map(ord, s))
def __str__(self):
# return as string, stripping flags
return ''.join(map(lambda v: chr(v & 0xff), self))
def sslice(self, lhs, rhs):
# return as string, stripping flags
return ''.join(map(lambda v: chr(v & 0xff), self[lhs:rhs]))
def iarray(self):
return self[:]
def mark(self, lhs, rhs, flagvalue=IGNORE):
'''
Apply flags to locations between left and right hand sides, ie: [lhs:rhs]
'''
for idx in range(lhs, rhs):
self[idx] |= flagvalue
def locate(self, needle, lhs, rhs):
'''Find this needle(char) in the hay stack(list).'''
try:
return self.index(needle, lhs, rhs)
except:
# not found
return -1
def replace(self, lhs, rhs, newcontent):
'''replace the data between [lhs:rhs] with newcontent'''
self[lhs: rhs] = map(ord, newcontent)
def next_macro(self, lhs, rhs):
'''
Find a macro within the string, return (lhs,rhs) if found
If not found, return (-1,-1)
If syntax error, return (-2,-2)
'''
result = IStrFindResult()
result.lhs = lhs
result.rhs = rhs
# if it is not long enough...
if (rhs - lhs) < 4:
result.code = result.NOTFOUND
return result
# We search for the CLOSING
# Consider nested: ${ ${foo}_${bar} }
# The first thing we must do is "foo"
# So find the close
tmp = self.locate(RBRACE, result.lhs,result.rhs)
if tmp >= 0:
_open_symbol = LBRACE
else:
tmp = self.locate(RPAREN,result.lhs,result.rhs)
_open_symbol = RPAREN
if tmp < 0:
# not found
result.code = result.NOTFOUND
return result
# We want to end at RHS where the closing symbol is
result.rhs = tmp
while result.lhs < result.rhs:
# find DOLLAR
dollar_loc = self.locate(DOLLAR, result.lhs, result.rhs)
if dollar_loc < 0:
# above, we know we have a CLOSE
# We could call this a SYNTAX error
# but ... we won't we'll leave this as NOT FOUND
result.code = result.NOTFOUND
return result
# we have: DOLLAR + CLOSE
# Can we find DOLLAR + OPEN?
ch = self[dollar_loc+1]
if ch != _open_symbol:
# Nope... try again after dollar
result.lhs = dollar_loc+1
continue
result.lhs = dollar_loc
# Do we have a nested macro, ie: ${${x}}
tmp = self.locate(DOLLAR, dollar_loc + 1, result.rhs)
if tmp >= 0:
# we do have a nested macro
result.lhs = tmp
continue
# nope, we are good
# Everything between LHS and RHS should be a macro
result.code = result.OK
result.name = self.sslice(result.lhs + 2, result.rhs)
# the RHS should include the closing symbol
result.rhs += 1
return result
# not found syntax stray dollar or brace
result.code = result.SYNTAX
return result
def test_istr():
def check2(l, r, text, dut):
print("----")
print("Check (%d,%d)" % (l, r))
print("s = %s" % str(dut))
print("i = %s" % dut.iarray())
result = dut.next_macro(0, len(dut))
if (result.lhs != l) or (result.rhs != r):
print("str = %s" % str(dut))
print("int = %s" % dut.iarray())
print("Error: (%d,%d) != (%d,%d)" % (l, r, result.lhs, result.rhs))
assert (False)
if text is not None:
assert( result.name == text )
dut.mark(l, r)
return dut
def check(l, r, s):
if l >= 0:
expected = s[l + 2:r - 1]
else:
expected = None
dut = IStr(s)
check2(l, r, expected, dut)
st = str(dut)
assert (st == s)
return dut
check(-1, -1, "")
check(-1, -1, "a")
check(-1, -1, "ab")
check(-1, -1, "abc")
check(-1, -1, "abcd")
check(-1, -1, "abcde")
check(-1, -1, "abcdef")
check(0, 4, "${a}")
check(0, 5, "${ab}")
check(0, 6, "${abc}")
check(0, 7, "${abcd}")
check(1, 5, "a${a}")
check(2, 6, "ab${a}")
check(3, 7, "abc${a}")
check(4, 8, "abcd${a}")
check(5, 9, "abcde${a}")
check(0, 4, "${a}a")
check(0, 4, "${a}ab")
check(0, 4, "${a}abc")
check(0, 4, "${a}abcd")
check(0, 4, "${a}abcde")
dut = check(4, 8, "abcd${a}xyz")
dut.replace(4, 8, "X")
check2(-1, -1, None, dut)
r = str(dut)
print("Got: %s" % r)
assert ("abcdXxyz" == str(dut))
# now nested tests
dut = check(5, 9, "abc${${Y}}xyz")
dut.replace(5, 9, "X")
r = str(dut)
assert (r == "abc${X}xyz")
dut = check2(3, 7, "${X}", dut)
dut.replace(3, 7, "ABC")
s = str(dut)
r = "abcABCxyz"
assert (s == r)
print("Success")
if __name__ == '__main__':
test_istr()
| 27.856522
| 81
| 0.517715
| 857
| 6,407
| 3.822637
| 0.271879
| 0.021978
| 0.014957
| 0.014652
| 0.107143
| 0.076313
| 0.076313
| 0.064713
| 0.064713
| 0.03663
| 0
| 0.024922
| 0.348681
| 6,407
| 229
| 82
| 27.978166
| 0.760125
| 0.26674
| 0
| 0.132353
| 0
| 0
| 0.059801
| 0
| 0
| 0
| 0.002879
| 0
| 0.044118
| 1
| 0.088235
| false
| 0
| 0
| 0.022059
| 0.220588
| 0.066176
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bcf76125149120b7d959b455bacb0c98cf4095f0
| 7,712
|
py
|
Python
|
cli.py
|
checktheroads/deenis
|
2581e2fcbb08a9c85590bd54e109f24cc87b664f
|
[
"WTFPL"
] | 4
|
2019-07-18T18:16:31.000Z
|
2020-02-28T08:39:58.000Z
|
cli.py
|
checktheroads/deenis
|
2581e2fcbb08a9c85590bd54e109f24cc87b664f
|
[
"WTFPL"
] | null | null | null |
cli.py
|
checktheroads/deenis
|
2581e2fcbb08a9c85590bd54e109f24cc87b664f
|
[
"WTFPL"
] | null | null | null |
#!/usr/bin/env python3
"""
CLI for Accessing Deenis
"""
# Standard Imports
import sys
from pathlib import Path
# Module Imports
import click
# Path Fixes
working_dir = Path(__file__).resolve().parent
sys.path.append(str(working_dir))
# Project Imports
from deenis import Deenis
@click.group(
help=(
"Deenis can be used to group and automate boring DNS tasks. For example, "
"`host` can take a hostname, IPv4 Address, and IPv6 Address, and create "
"forward A & AAAA, and reverse PTR records (4 actions) with a single command."
)
)
def add_records():
"""Click Command Group Definition"""
# pylint: disable=unnecessary-pass
# Dear Pylint: This is how Click likes to do things. Get over it bruh.
pass
@add_records.command("host", help="Add a Host Record")
@click.option("-c", "--config-file", "config_file", help="Path to YAML Config File")
@click.option("-4", "--ipv4-address", "ipv4", default=None, help="IPv4 Address")
@click.option("-6", "--ipv6-address", "ipv6", default=None, help="IPv6 Address")
@click.option("-f", "--fqdn", "fqdn", required=True, help="FQDN")
def host(**click_input):
"""Add host records from CLI"""
if not click_input["config_file"]:
config_path = Path.cwd().joinpath("deenis.yaml")
if not config_path.exists():
raise click.UsageError(
click.style(
(
f"Config file not specified and not found at {config_path}. "
"Please specify a config file path."
),
fg="red",
bold=True,
)
)
elif click_input["config_file"]:
config_path = Path().resolve(click_input["config_file"])
if not click_input["ipv4"] and not click_input["ipv6"]:
raise click.UsageError(
click.style("At least one IP Address is required", fg="red", bold=True)
)
try:
responses = Deenis(str(config_path)).AddHost(
{
"hostname": click_input["fqdn"],
"ipv4": click_input["ipv4"],
"ipv6": click_input["ipv6"],
}
)
if responses:
for res in responses:
status, record_record, record, target, errors = res
if status == "Success":
click.echo(
"Added "
+ click.style(record_record, fg="green", bold=True)
+ " Record for "
+ click.style(record, fg="yellow", bold=True)
+ " Pointing to "
+ click.style(target, fg="blue", bold=True)
)
elif status == "Failure":
click.echo(
"Error Adding "
+ click.style(record_record, fg="magenta", bold=True)
+ " Record for "
+ click.style(record, fg="cyan", bold=True)
+ " Pointing to "
+ click.style(target, fg="red", bold=True)
+ f"\nErrors:\n"
)
for err in errors:
click.secho(err, fg="red")
if not responses:
click.secho("\nNo records were added", fg="magenta", bold=True)
except (RuntimeError, AttributeError) as error_exception:
raise click.UsageError(click.style(str(error_exception), fg="red", bold=True))
@add_records.command("tenant", help="Bulk Add PTR Records for a Tenant/Customer")
@click.option("-c", "--config-file", "config_file", help="Path to YAML Config File")
@click.option(
"-i", "--crm-id", "crm_id", default=None, help="Unique Tenant Indentifier"
)
@click.option(
"-4", "--ipv4-prefix", "prefix4", default=None, help="IPv4 Prefix Assignment"
)
@click.option(
"-6", "--ipv6-prefix", "prefix6", default=None, help="IPv6 Prefix Assignment"
)
@click.option(
"-f4", "--ipv4-fqdn", "host4", default=None, help="FQDN for IPv4 PTR Target"
)
@click.option(
"-f6", "--ipv6-fqdn", "host6", default=None, help="FQDN for IPv6 PTR Target"
)
def tenant_reverse(**click_input):
"""Add Tenant Records from CLI"""
if not click_input["config_file"]:
config_path = Path.cwd().joinpath("deenis.yaml")
if not config_path.exists():
raise click.UsageError(
click.style(
(
f"Config file not specified and not found at {config_path}. "
"Please specify a config file path."
),
fg="red",
bold=True,
)
)
elif click_input["config_file"]:
config_path = Path().resolve(click_input["config_file"])
if not click_input["prefix4"] and not click_input["prefix6"]:
raise click.UsageError(
click.style("At least one prefix is required", fg="red", bold=True)
)
try:
responses = Deenis(str(config_path)).TenantReverse(
{
"crm_id": click_input["crm_id"],
"host4": click_input["host4"],
"host6": click_input["host6"],
"prefix4": click_input["prefix4"],
"prefix6": click_input["prefix6"],
}
)
"""
Response format:
[
(
'Success',
'A',
'test011.omnificent.io',
'199.34.95.250',
[]
),
(
'Success',
'PTR',
'250',
'test011.omnificent.io',
[]
)
]
"""
nl = "\n"
tab = " "
_text = {"fg": "white", "bold": True}
_stat_suc = {"fg": "green", "bold": True}
_stat_fail = {"fg": "red", "bold": True}
_rec_type = {"fg": "yellow", "bold": True}
_rec_name = {"fg": "magenta", "bold": True}
_rec_trgt = {"fg": "cyan", "bold": True}
_error = {"fg": "red"}
click.secho(nl + "Records:" + nl, **_text)
for res in responses:
status, rec_type, rec_name, rec_trgt, errors = res
if status == "Success":
_status = ("⚡ " + status, _stat_suc)
elif status == "Failure":
_status = ("☝ " + status, _stat_fail)
click.echo(
tab
+ click.style(_status[0], **_status[1])
+ nl
+ tab * 4
+ click.style(rec_type, **_rec_type)
+ click.style(" ⟫ ", **_text)
+ click.style(rec_name, **_rec_name)
+ click.style(" ⟩ ", **_text)
+ click.style(rec_trgt, **_rec_trgt)
)
if errors:
click.echo(tab * 4 + click.style("Errors: ", **_stat_fail))
for err in errors:
if isinstance(err, dict):
for ename in err.keys():
click.echo(
tab * 6
+ click.style(str(ename) + ":", **_error)
+ tab
+ click.style(str(err[ename]), **_error)
)
elif isinstance(err, str):
click.echo(tab * 4 + click.style(err, **_error))
except (AttributeError, RuntimeError) as tenant_error:
raise click.ClickException(tenant_error)
if __name__ == "__main__":
add_records()
| 36.72381
| 86
| 0.490794
| 803
| 7,712
| 4.585305
| 0.232877
| 0.057034
| 0.028517
| 0.024715
| 0.355785
| 0.297121
| 0.284628
| 0.284628
| 0.224335
| 0.224335
| 0
| 0.014309
| 0.374741
| 7,712
| 209
| 87
| 36.899522
| 0.748445
| 0.037863
| 0
| 0.305389
| 0
| 0
| 0.203714
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017964
| false
| 0.005988
| 0.023952
| 0
| 0.041916
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bcf7f47be4d0d789e4869009ef9f2f68c5ab3b33
| 5,383
|
py
|
Python
|
main_cl.py
|
spiolynn/pybo
|
186495de315eb8ec47a996de959574f9864da7c4
|
[
"MIT"
] | null | null | null |
main_cl.py
|
spiolynn/pybo
|
186495de315eb8ec47a996de959574f9864da7c4
|
[
"MIT"
] | null | null | null |
main_cl.py
|
spiolynn/pybo
|
186495de315eb8ec47a996de959574f9864da7c4
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from bigone import BigOneDog
from common import gen_logger
import logging
import time
import json
def strategy_eth_big_bnc_eth(dog):
"""
正向:买BIG/ETH -> 卖BIG/BNC -> 买ETH/BNC
反向:卖ETH/BNC -> 买BIG/BNC -> 卖BIG/ETH
:param dog: implemention of BigOneDog
:return: 正向收益率,反向收益率
"""
big_eth_data = dog.get_order_book('BIG-ETH')
big_bnc_data = dog.get_order_book('BIG-BNC')
eth_bnc_data = dog.get_order_book('ETH-BNC')
print('BIG-ETH')
print('卖一', big_eth_data['asks'][0]['price'], big_eth_data['asks'][0]['amount'])
print('买一', big_eth_data['bids'][0]['price'], big_eth_data['bids'][0]['amount'])
print('BIG-BNC')
print('卖一', big_bnc_data['asks'][0]['price'], big_bnc_data['asks'][0]['amount'])
print('买一', big_bnc_data['bids'][0]['price'], big_bnc_data['bids'][0]['amount'])
print('ETH-BNC')
print('卖一', eth_bnc_data['asks'][0]['price'], eth_bnc_data['asks'][0]['amount'])
print('买一', eth_bnc_data['bids'][0]['price'], eth_bnc_data['bids'][0]['amount'])
# positive transaction
pos_anc = 0.999*0.999*0.999*\
((1 / (float(big_eth_data['asks'][0]['price'])))
* float(big_bnc_data['bids'][0]['price']) )
pos_anc = pos_anc / float(eth_bnc_data['asks'][0]['price']) - 1
# negative transaction
neg_anc = 0.999 * 0.999 * 0.999 * \
(float(eth_bnc_data['bids'][0]['price'])
/ float(big_bnc_data['asks'][0]['price'])
* float(big_eth_data['asks'][0]['price']))
neg_anc = neg_anc / 1 - 1
flag = False
amt = 2.0
if float(big_eth_data['asks'][0]['amount']) >= amt:
if float(big_bnc_data['bids'][0]['amount']) >= amt:
if float(eth_bnc_data['asks'][0]['amount']) >= amt * float(big_eth_data['asks'][0]['price']):
flag = True
msg = "预期本次[正向套利:买BIG/ETH -> 卖BIG/BNC -> 买ETH/BNC]利润:"
if pos_anc < 0.01:
result = "利润空间小于1%, 放弃本次套利 0"
logger.info("{0} {1:.2f}%, {2}".format(msg,pos_anc*100,result))
else:
result = "利润空间大于1%"
if flag is False:
result = "{},{}".format(result,"量不足, 放弃本次套利 0")
logger.info("{0} {1:.2f}%, {2}".format(msg,pos_anc*100,result))
else:
result = "{},{}".format(result,"执行本次套利 1")
logger.info("{0} {1:.2f}%, {2}".format(msg,pos_anc*100,result))
print("{} {} {} {}".format('BIG-ETH','BID', big_eth_data['asks'][0]['price'], str(amt)))
print("{} {} {} {}".format('BIG-BNC','ASK', big_bnc_data['bids'][0]['price'], str(amt)))
print("{} {} {} {}".format('ETH-BNC','BID', eth_bnc_data['asks'][0]['price'],
str(amt * float(big_eth_data['asks'][0]['price']))))
# dog.create_order('BIG-ETH','ASK', big_eth_data['asks'][0]['price'], '2.0')
# dog.create_order('BIG-BNC','BID', big_bnc_data['bids'][0]['price'], '2.0')
# dog.create_order('ETH-BNC','ASK', eth_bnc_data['asks'][0]['price'],
# str(2.0 * float(big_eth_data['asks'][0]['price'])))
return True
if neg_anc < 0.01:
result = "利润空间小于1%, 放弃本次套利 0"
else:
result = "利润空间大于1%, 执行本次套利 1"
logger.info("预期本次[反向套利:卖ETH/BNC -> 买BIG/BNC -> 卖BIG/ETH]利润: {0:.2f}%, {1}".format(neg_anc*100,result))
return False
# return pos_anc, neg_anc
def strategy_eth_bnc(dog):
eth_bnc_data = dog.get_order_book('ETH-BNC')
print('ETH-BNC')
print('卖一', eth_bnc_data['asks'][0]['price'], eth_bnc_data['asks'][0]['amount'])
print('买一', eth_bnc_data['bids'][0]['price'], eth_bnc_data['bids'][0]['amount'])
anc = float(eth_bnc_data['asks'][0]['price']) / float(eth_bnc_data['bids'][0]['price']) - 1
print(anc)
if anc > 0.02:
r = dog.create_order('ETH-BNC', 'BID', str(float(eth_bnc_data['bids'][0]['price'])+0.01), '0.01' )
bid_order_id = r['order_id']
r = dog.create_order('ETH-BNC', 'ASK', str(float(eth_bnc_data['asks'][0]['price'])-0.01), '0.01' )
ask_order_id = r['order_id']
return anc, anc
if __name__ == '__main__':
gen_logger('bigonetest')
logger = logging.getLogger("bigone")
with open("PRIVATE_KEY.json",'r') as f:
private_key = json.load(f)["key"]
dog = BigOneDog(private_key)
# strategy_eth_bnc(dog)
# dog.get_orders("ETH-BNC",'10')
# r = dog.get_order("b79ef031-c477-46f9-b452-7e97aa97435d")
# print(r)
# r = dog.get_orders('ETH-BNC','10')
# print(r)
while True:
flag = strategy_eth_big_bnc_eth(dog)
if flag is True:
break
else:
print("休眠10秒")
print("")
time.sleep(10)
# break
# pos_anc, neg_anc = strategy_eth_bnc(dog)
# if pos_anc < 0.01:
# result = "利润空间小于1%, 放弃本次套利 0"
# else:
# result = "利润空间大于1%, 执行本次套利 1"
#
# logger.info("预期本次[正向套利:买BIG/ETH -> 卖BIG/BNC -> 买ETH/BNC]利润: {0:.2f}%, {1}".format(pos_anc*100,result))
#
# if neg_anc < 0.01:
# result = "利润空间小于1%, 放弃本次套利 0"
# else:
# result = "利润空间大于1%, 执行本次套利 1"
#
# logger.info("预期本次[反向套利:卖ETH/BNC -> 买BIG/BNC -> 卖BIG/ETH]利润: {0:.2f}%, {1}".format(neg_anc*100,result))
#
# print("休眠10秒")
# print("")
# time.sleep(10)
| 35.886667
| 112
| 0.546907
| 774
| 5,383
| 3.605943
| 0.135659
| 0.068793
| 0.074167
| 0.085274
| 0.714439
| 0.677893
| 0.460408
| 0.335722
| 0.295593
| 0.264421
| 0
| 0.047108
| 0.2389
| 5,383
| 149
| 113
| 36.127517
| 0.634123
| 0.211964
| 0
| 0.204819
| 0
| 0.012048
| 0.192298
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.024096
| false
| 0
| 0.060241
| 0
| 0.120482
| 0.216867
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bcf86b7e6462408e17d610983a6cb23985d20fe4
| 1,191
|
py
|
Python
|
run_experiments.py
|
gahaalt/cifar-vs-tensorflow2
|
547d131382438ef76e315dde06a6870737f1fbad
|
[
"MIT"
] | 6
|
2019-11-15T08:42:29.000Z
|
2021-03-04T11:58:39.000Z
|
run_experiments.py
|
gahaalt/cifar-vs-tensorflow2
|
547d131382438ef76e315dde06a6870737f1fbad
|
[
"MIT"
] | null | null | null |
run_experiments.py
|
gahaalt/cifar-vs-tensorflow2
|
547d131382438ef76e315dde06a6870737f1fbad
|
[
"MIT"
] | 3
|
2020-11-25T03:44:41.000Z
|
2021-03-08T04:45:56.000Z
|
import os
import yaml
import logging
import importlib
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
logging.getLogger('tensorflow').disabled = True
from cifar_training_tools import cifar_training, cifar_error_test
def print_dict(d, tabs=0):
tab = '\t'
for key in d:
if type(d[key]) == dict:
print(f"{tab*tabs}{key}:")
print_dict(d[key], tabs+1)
else:
print(f"{tab*tabs}{key}: {d[key]}")
print('\n' + '#' * 19)
print("TESTING FOR ERRORS!")
print('#' * 19)
stream = open('experiments.yaml', 'r')
for exp in yaml.safe_load_all(stream):
if 'skip_error_test' in exp and exp['skip_error_test']:
continue
model = getattr(importlib.import_module(exp['module']), exp['model'])
cifar_error_test(model(**exp['model_parameters']))
print("OK!")
print('\n' + '#' * 22)
print("MODEL TRAINING BEGINS!")
print('#' * 22)
stream = open('experiments.yaml', 'r')
for exp in yaml.safe_load_all(stream):
print(); print_dict(exp); print();
model = getattr(importlib.import_module(exp['module']), exp['model'])
cifar_training(model(**exp['model_parameters']), **exp['train_parameters'])
| 27.697674
| 79
| 0.628044
| 164
| 1,191
| 4.390244
| 0.371951
| 0.05
| 0.038889
| 0.036111
| 0.35
| 0.305556
| 0.305556
| 0.305556
| 0.305556
| 0.305556
| 0
| 0.011482
| 0.195634
| 1,191
| 43
| 79
| 27.697674
| 0.740084
| 0
| 0
| 0.181818
| 0
| 0
| 0.218121
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030303
| false
| 0
| 0.212121
| 0
| 0.242424
| 0.363636
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bcf9bd066aefdc4f6abca126693e2677662eb927
| 1,542
|
py
|
Python
|
histdata/mt5db/script_DownloadAndStoreToMongodb.py
|
UpSea/midProjects
|
ed6086e74f68b1b89f725abe0b270e67cf8993a8
|
[
"MIT"
] | 1
|
2018-07-02T13:54:49.000Z
|
2018-07-02T13:54:49.000Z
|
histdata/mt5db/script_DownloadAndStoreToMongodb.py
|
UpSea/midProjects
|
ed6086e74f68b1b89f725abe0b270e67cf8993a8
|
[
"MIT"
] | null | null | null |
histdata/mt5db/script_DownloadAndStoreToMongodb.py
|
UpSea/midProjects
|
ed6086e74f68b1b89f725abe0b270e67cf8993a8
|
[
"MIT"
] | 3
|
2016-05-28T15:13:02.000Z
|
2021-04-10T06:04:25.000Z
|
# -*- coding: utf-8 -*-
import os,sys
from PyQt4 import QtGui,QtCore
dataRoot = os.path.abspath(os.path.join(os.path.dirname(__file__),os.pardir,os.pardir,'histdata'))
sys.path.append(dataRoot)
import dataCenter as dataCenter
from data.mongodb.DataSourceMongodb import Mongodb
import datetime as dt
def getSymbols():
#mid 1)从excel赋值粘贴获得如下数据
codesStr = """
XAGUSD
"""
#mid 2)将字符串使用split()分割为list,默认会去除\n和所有空格。
#codeList = ['000021.SZ','000022.SZ']
codeList = [code.split('.')[0] for code in codesStr.split()]
return codeList
def subMain():
DC = dataCenter.dataCenter()
remoteDataSourceType = 'mt5'
localStorageType = 'mongodb'
periodType = 'D'
timeStart = dt.datetime(2000,10,20)
timeEnd = dt.datetime.now()
# 1)get codes form eastmoney
codeList = getSymbols()
# 2)download history data
dataDict = DC.downloadHistData(providerType=remoteDataSourceType,storageType=localStorageType,periodType=periodType,
codeList=codeList,timeFrom = timeStart,timeTo = timeEnd)
if __name__ == '__main__':
#app = QtGui.QApplication(sys.argv)
#mid-----------------------------------------------------------------------------------------------------------------------------
subMain()
#mid-----------------------------------------------------------------------------------------------------------------------------
#sys.exit(app.exec_())
| 37.609756
| 133
| 0.527237
| 136
| 1,542
| 5.882353
| 0.595588
| 0.0225
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023045
| 0.212062
| 1,542
| 41
| 134
| 37.609756
| 0.635391
| 0.311933
| 0
| 0
| 0
| 0
| 0.064176
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08
| false
| 0
| 0.2
| 0
| 0.32
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bcfa7e8108972dea4c27619df4c1be7b06458b6e
| 3,813
|
py
|
Python
|
main.py
|
brunotoshio/castella
|
ad418bd1beb4953687a4ad7be586b12631c25992
|
[
"MIT"
] | 2
|
2020-02-18T09:41:43.000Z
|
2020-02-20T11:03:03.000Z
|
main.py
|
brunotoshio/castella
|
ad418bd1beb4953687a4ad7be586b12631c25992
|
[
"MIT"
] | null | null | null |
main.py
|
brunotoshio/castella
|
ad418bd1beb4953687a4ad7be586b12631c25992
|
[
"MIT"
] | null | null | null |
import pymongo
import yaml
import sched
import time
import json
from castella import TweetCrawler
class Castella(object):
def __init__(self):
# Get connection parameters
with open("settings.yml", "r") as stream:
try:
settings = yaml.safe_load(stream)["settings"]
# Database
self.server_url = settings["output"]["database"]["url"]
self.server_port = settings["output"]["database"]["port"]
self.database_name = settings["output"]["database"]["database"]
self.collection_name = settings["output"]["database"]["collection"]
# Search
self.query = settings["search"]["query"]
self.search_params = settings["search"]["params"]
# Schedule
self.interval_type = settings["interval"]["each"]
self.interval_amount = settings["interval"]["amount"]
self.total_executions = 0
except yaml.YAMLError as exc:
print("ERROR: No settings.yml found or it could not be read")
def execute_search(self):
# Mongo connection
client = pymongo.MongoClient(self.server_url, self.server_port)
db = client[self.database_name]
self.tweets = db[self.collection_name]
self._create_scheduled_executions()
def _save_tweet(self, tweet):
print("Saving: ", tweet._json["id_str"])
try:
bson = tweet._json
bson["query_str"] = self.query
self.tweets.insert_one(bson)
except:
print("Error occurred when trying to save")
def _search(self):
# Continue from last id
try:
self.tweets.create_index([("id", pymongo.DESCENDING)])
last_tweet = self.tweets.find({}).sort([("id", pymongo.DESCENDING)]).next()
except StopIteration:
last_tweet = None
# Searching
tc = TweetCrawler()
params = dict(result_type="recent", include_entities=True, count=100)
if isinstance(self.search_params, dict):
params.update(self.search_params)
if last_tweet is not None:
print("============================================================")
print("Resuming from tweet id:", last_tweet['id_str'])
print("============================================================")
params["since_id"] = last_tweet.get("id_str")
tc.search(self.query, self._save_tweet, params)
self.total_executions += 1
print("============================================================")
print("Finished for today...")
print(self.total_executions, "out of", self.interval_amount, "scheduled executions")
print("============================================================")
if self.total_executions < self.interval_amount:
print("Keep this process running until the execution of the last scheduled iteration, or stop this process to cancel further executions.")
print("============================================================")
# Preparing functions for scheduler
def _days(self):
return time.time() / (60 * 60 * 24)
def _weeks(self):
return time.time() / (60 * 60 * 24 * 7)
# Scheduling events
def _create_scheduled_executions(self):
if self.interval_type == "day":
handler = self._days
else:
handler = self._weeks
scheduler = sched.scheduler(handler, time.sleep)
for i in range(self.interval_amount):
scheduler.enter(i, 1, self._search)
scheduler.run()
if __name__ == "__main__":
searcher = Castella()
searcher.execute_search()
| 37.019417
| 150
| 0.540782
| 383
| 3,813
| 5.206266
| 0.370757
| 0.036108
| 0.044132
| 0.017051
| 0.024072
| 0.024072
| 0.024072
| 0
| 0
| 0
| 0
| 0.006877
| 0.275374
| 3,813
| 103
| 151
| 37.019417
| 0.714803
| 0.039601
| 0
| 0.106667
| 0
| 0.013333
| 0.218998
| 0.082124
| 0
| 0
| 0
| 0
| 0
| 1
| 0.093333
| false
| 0
| 0.08
| 0.026667
| 0.213333
| 0.16
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bcfabdd28c428dd3bd0fa4eb4f234286130b7db0
| 1,275
|
py
|
Python
|
ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_slice.py
|
monroid/openvino
|
8272b3857ef5be0aaa8abbf7bd0d5d5615dc40b6
|
[
"Apache-2.0"
] | 2,406
|
2020-04-22T15:47:54.000Z
|
2022-03-31T10:27:37.000Z
|
ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_slice.py
|
thomas-yanxin/openvino
|
031e998a15ec738c64cc2379d7f30fb73087c272
|
[
"Apache-2.0"
] | 4,948
|
2020-04-22T15:12:39.000Z
|
2022-03-31T18:45:42.000Z
|
ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_slice.py
|
thomas-yanxin/openvino
|
031e998a15ec738c64cc2379d7f30fb73087c272
|
[
"Apache-2.0"
] | 991
|
2020-04-23T18:21:09.000Z
|
2022-03-31T18:40:57.000Z
|
#
# slice paddle model generator
#
import numpy as np
from save_model import saveModel
import paddle as pdpd
import sys
data_type = 'float32'
def slice(name : str, x, axes : list, start : list, end : list):
pdpd.enable_static()
with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
node_x = pdpd.static.data(name='x', shape=x.shape, dtype = data_type)
out = pdpd.fluid.layers.slice(node_x, axes = axes, starts = start, ends = end)
cpu = pdpd.static.cpu_places(1)
exe = pdpd.static.Executor(cpu[0])
# startup program will call initializer to initialize the parameters.
exe.run(pdpd.static.default_startup_program())
outs = exe.run(
feed={'x': x},
fetch_list=[out])
saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1])
return outs[0]
def main():
x = np.linspace(1, 60, num = 60, dtype=np.int32).reshape(4, 3, 5).astype(data_type)
slice("slice", x, axes=[1, 2], start=(0, 1), end=(-1, 3))
x = np.linspace(1, 60, num = 60, dtype=np.int32).reshape(2, 30).astype(data_type)
slice("slice_1d", x, axes=[0], start=[0], end=[1])
if __name__ == "__main__":
main()
| 32.692308
| 116
| 0.622745
| 191
| 1,275
| 4.036649
| 0.408377
| 0.090791
| 0.066148
| 0.031128
| 0.16083
| 0.098573
| 0.098573
| 0.098573
| 0.098573
| 0.098573
| 0
| 0.036963
| 0.214902
| 1,275
| 39
| 117
| 32.692308
| 0.733267
| 0.075294
| 0
| 0
| 0
| 0
| 0.026383
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08
| false
| 0
| 0.16
| 0
| 0.28
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bcfca1189da0e63d3e685ea19031e90196e49d8d
| 719
|
py
|
Python
|
testfixtures/compat.py
|
cjw296/testfixtures
|
1bf1e6fe1e111210d6d7fbcd00feb564095ffd02
|
[
"MIT"
] | null | null | null |
testfixtures/compat.py
|
cjw296/testfixtures
|
1bf1e6fe1e111210d6d7fbcd00feb564095ffd02
|
[
"MIT"
] | null | null | null |
testfixtures/compat.py
|
cjw296/testfixtures
|
1bf1e6fe1e111210d6d7fbcd00feb564095ffd02
|
[
"MIT"
] | null | null | null |
# compatibility module for different python versions
import sys
if sys.version_info[:2] > (3, 0):
PY2 = False
PY3 = True
Bytes = bytes
Unicode = str
basestring = str
class_type_name = 'class'
ClassType = type
exception_module = 'builtins'
new_class = type
self_name = '__self__'
from io import StringIO
xrange = range
else:
PY2 = True
PY3 = False
Bytes = str
Unicode = unicode
basestring = basestring
class_type_name = 'type'
from types import ClassType
exception_module = 'exceptions'
from new import classobj as new_class
self_name = 'im_self'
from cStringIO import StringIO
xrange = xrange
| 18.435897
| 52
| 0.635605
| 85
| 719
| 5.188235
| 0.494118
| 0.061224
| 0.058957
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014
| 0.30459
| 719
| 38
| 53
| 18.921053
| 0.868
| 0.069541
| 0
| 0
| 0
| 0
| 0.062969
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.185185
| 0
| 0.185185
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bcfd55447233f3a98240a98d95e5f9301c8b38ec
| 3,898
|
py
|
Python
|
old_py2/tests/models_tests/notifications/test_match_score.py
|
ofekashery/the-blue-alliance
|
df0e47d054161fe742ac6198a6684247d0713279
|
[
"MIT"
] | 266
|
2015-01-04T00:10:48.000Z
|
2022-03-28T18:42:05.000Z
|
tests/models_tests/notifications/test_match_score.py
|
gregmarra/the-blue-alliance
|
5bedaf5c80b4623984760d3da3289640639112f9
|
[
"MIT"
] | 2,673
|
2015-01-01T20:14:33.000Z
|
2022-03-31T18:17:16.000Z
|
tests/models_tests/notifications/test_match_score.py
|
gregmarra/the-blue-alliance
|
5bedaf5c80b4623984760d3da3289640639112f9
|
[
"MIT"
] | 230
|
2015-01-04T00:10:48.000Z
|
2022-03-26T18:12:04.000Z
|
import re
import unittest2
from google.appengine.ext import ndb
from google.appengine.ext import testbed
from consts.notification_type import NotificationType
from helpers.event.event_test_creator import EventTestCreator
from models.team import Team
from models.notifications.match_score import MatchScoreNotification
class TestMatchScoreNotification(unittest2.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
ndb.get_context().clear_cache() # Prevent data from leaking between tests
self.testbed.init_taskqueue_stub(root_path=".")
for team_number in range(6):
Team(id="frc%s" % team_number,
team_number=team_number).put()
self.event = EventTestCreator.createPresentEvent()
self.match = self.event.matches[0]
self.notification = MatchScoreNotification(self.match)
def tearDown(self):
self.testbed.deactivate()
def test_type(self):
self.assertEqual(MatchScoreNotification._type(), NotificationType.MATCH_SCORE)
def test_fcm_notification(self):
self.assertIsNotNone(self.notification.fcm_notification)
self.assertEqual(self.notification.fcm_notification.title, 'TESTPRESENT Q1 Results')
match_regex = re.compile(r'^\d+, \d+, \d+ beat \d+, \d+, \d+ scoring \d+-\d+.$')
match = re.match(match_regex, self.notification.fcm_notification.body)
self.assertIsNotNone(match)
def test_fcm_notification_tied(self):
score = self.notification.match.alliances['red']['score']
self.notification.match.alliances['blue']['score'] = score
self.assertIsNotNone(self.notification.fcm_notification)
self.assertEqual(self.notification.fcm_notification.title, 'TESTPRESENT Q1 Results')
match_regex = re.compile(r'^\d+, \d+, \d+ tied with \d+, \d+, \d+ scoring \d+-\d+.$')
match = re.match(match_regex, self.notification.fcm_notification.body)
self.assertIsNotNone(match)
def test_fcm_notification_team(self):
team = Team.get_by_id('frc1')
notification = MatchScoreNotification(self.match, team)
self.assertEqual(notification.fcm_notification.title, 'Team 1 TESTPRESENT Q1 Results')
def test_data_payload(self):
payload = self.notification.data_payload
self.assertEqual(len(payload), 2)
self.assertEqual(payload['event_key'], self.event.key_name)
self.assertEqual(payload['match_key'], '{}_qm1'.format(self.event.key_name))
def test_data_payload_team(self):
team = Team.get_by_id('frc1')
notification = MatchScoreNotification(self.match, team)
payload = notification.data_payload
self.assertEqual(len(payload), 3)
self.assertEqual(payload['event_key'], self.event.key_name)
self.assertEqual(payload['match_key'], '{}_qm1'.format(self.event.key_name))
self.assertEqual(payload['team_key'], 'frc1')
def test_webhook_message_data(self):
# Has `event_name`
payload = self.notification.webhook_message_data
self.assertEqual(len(payload), 3)
self.assertEqual(payload['event_key'], self.event.key_name)
self.assertEqual(payload['event_name'], 'Present Test Event')
self.assertIsNotNone(payload['match'])
def test_webhook_message_data_team(self):
team = Team.get_by_id('frc1')
notification = MatchScoreNotification(self.match, team)
payload = notification.webhook_message_data
self.assertEqual(len(payload), 4)
self.assertEqual(payload['event_key'], self.event.key_name)
self.assertEqual(payload['event_name'], 'Present Test Event')
self.assertEqual(payload['team_key'], 'frc1')
self.assertIsNotNone(payload['match'])
| 41.913978
| 94
| 0.69882
| 459
| 3,898
| 5.753813
| 0.211329
| 0.102234
| 0.083302
| 0.070428
| 0.604317
| 0.537675
| 0.522529
| 0.491859
| 0.464218
| 0.464218
| 0
| 0.006283
| 0.183427
| 3,898
| 92
| 95
| 42.369565
| 0.823437
| 0.014366
| 0
| 0.388889
| 0
| 0.027778
| 0.09664
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.138889
| false
| 0
| 0.111111
| 0
| 0.263889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bcfde8681fdc58448a7018049cb36bbab73499b0
| 21,700
|
py
|
Python
|
Compliant_control/Force Tracking/archive/VIC_Huang1992_(main 09.03).py
|
martihmy/Compliant_control
|
485f627fa83d59f414f41bd57c5d37528ef5f1ec
|
[
"Apache-2.0"
] | null | null | null |
Compliant_control/Force Tracking/archive/VIC_Huang1992_(main 09.03).py
|
martihmy/Compliant_control
|
485f627fa83d59f414f41bd57c5d37528ef5f1ec
|
[
"Apache-2.0"
] | null | null | null |
Compliant_control/Force Tracking/archive/VIC_Huang1992_(main 09.03).py
|
martihmy/Compliant_control
|
485f627fa83d59f414f41bd57c5d37528ef5f1ec
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/env python
import copy
from copy import deepcopy
import rospy
import threading
import quaternion
import numpy as np
from geometry_msgs.msg import Point
from visualization_msgs.msg import *
from franka_interface import ArmInterface
from panda_robot import PandaArm
import matplotlib.pyplot as plt
from scipy.spatial.transform import Rotation
np.set_printoptions(precision=2)
"""
This is a FORCE-BASED VARIABLE IMPEDANCE CONTROLLER based on [Huang1992: Compliant Motion Control of Robots by Using Variable Impedance]
To achieve force tracking, the apparent stiffness (K) and damping (B) is dynamically adjusted through functions dependent on the error in position, velocity and force
About the code/controller:
1] Only stiffness and damping in the 'z'-direction is adaptive, the rest are static
2] Due to the faulted joint velocities (read from rostopics), the more noisy,
numerically derived derivatives of the joint position are prefered to be
used in the controller { get_x_dot(..., numerically = True) }
3] You can now choose between perform_torque_Huang1992() and perform_torque_DeSchutter()
- DeSchutter's control-law offers geometrically consitent stiffness and is more computationally expensive
4] The default desired motion- and force-trajectories are now made in a time-consistent matter, so that the PUBLISH RATE can be altered without messing up the desired behaviour.
The number of iterations is calculated as a function of the controller's control-cycle, T: (max_num_it = duration(=15 s) / T)
"""
# --------- Constants -----------------------------
#print(robot.joint_ordered_angles()) #Read the robot's joint-angles
#new_start = {'panda_joint1': 1.938963389436404, 'panda_joint2': 0.6757504724282993, 'panda_joint3': -0.43399745125475564, 'panda_joint4': -2.0375275954865573, 'panda_joint5': -0.05233040021194351, 'panda_joint6': 3.133254153457202, 'panda_joint7': 1.283328743909796}
# Stiffness
Kp = 30
Kpz = 30 #initial value (adaptive)
Ko = 900
K = np.array([[Kp, 0, 0, 0, 0, 0],
[0, Kp, 0, 0, 0, 0],
[0, 0, Kpz, 0, 0, 0],
[0, 0, 0, Ko, 0, 0],
[0, 0, 0, 0, Ko, 0],
[0, 0, 0, 0, 0, Ko]])
# Damping
Bp = Kp/7
Bpz = Bp # #initial value (adaptive)
Bo = 50
B = np.array([[Bp, 0, 0, 0, 0, 0],
[0, Bp, 0, 0, 0, 0],
[0, 0, Bpz, 0, 0, 0],
[0, 0, 0, Bo, 0, 0],
[0, 0, 0, 0, Bo, 0],
[0, 0, 0, 0, 0, Bo]])
# Apparent inertia
Mp = 10
Mo = 10
M_diag = np.array([Mp,Mp,Mp,Mo,Mo,Mo])
M = np.diagflat(M_diag)
# Constant matrices appearing in equation (50) of [Huang1992]
K_v = np.identity(6)
P = np.identity(6)
gamma = np.identity(18)
#gamma_M = 12
gamma_B = 0.001 #2 # The damping's rate of adaptivity (high value = slow changes)
gamma_K = 0.0005 #1 # The stiffness' rate of adaptivity (high value = slow changes)
#gamma[2,2] = gamma_M
gamma[8,8] = gamma_B
gamma[14,14] = gamma_K
duration = 15 #seconds SHOULD NOT BE ALTERED
"""Functions for generating desired MOTION trajectories"""
#1 Generate a desired trajectory for the manipulator to follow
def generate_desired_trajectory(iterations,T):
a = np.zeros((6,iterations))
v = np.zeros((6,iterations))
p = np.zeros((3,iterations))
p[:,0] = get_p()
if iterations > 300:
a[2,0:100]=-0.00001/T**2
a[2,250:350]=0.00001/T**2
if iterations > 6500:
a[0,4500:4510]=0.00001/T**2
a[0,6490:6500]=-0.00001/T**2
for i in range(max_num_it):
if i>0:
v[:,i]=v[:,i-1]+a[:,i-1]*T
p[:,i]=p[:,i-1]+v[:3,i-1]*T
return a,v,p
#2 Generate a desired trajectory for the manipulator to follow
def generate_desired_trajectory_express(iterations,T):
a = np.zeros((6,iterations))
v = np.zeros((6,iterations))
p = np.zeros((3,iterations))
p[:,0] = get_p()
if iterations > 175:
a[2,0:50]=-0.00002/T**2
a[2,125:175]=0.00002/T**2
if iterations > 3250:
a[0,2250:2255]=0.00002/T**2
a[0,3245:3250]=-0.00002/T**2
for i in range(max_num_it):
if i>0:
v[:,i]=v[:,i-1]+a[:,i-1]*T
p[:,i]=p[:,i-1]+v[:3,i-1]*T
return a,v,p
#3 Generate a (time-consistent) desired motion trajectory
def generate_desired_trajectory_tc(iterations,T,move_in_x=False):
a = np.zeros((6,iterations))
v = np.zeros((6,iterations))
p = np.zeros((3,iterations))
p[:,0] = get_p()
a[2,0:int(iterations/75)]=-1.25
a[2,int(iterations*2/75):int(iterations/25)]= 1.25
if move_in_x:
a[0,int(iterations*3/5):int(iterations*451/750)]=1.25
a[0,int(iterations*649/750):int(iterations*13/15)]=-1.25
for i in range(max_num_it):
if i>0:
v[:,i]=v[:,i-1]+a[:,i-1]*T
p[:,i]=p[:,i-1]+v[:3,i-1]*T
return a,v,p
"""Functions for generating desired FORCE trajectories"""
#1 Generate a desired force trajectory
def generate_F_d(max_num_it,T):
a = np.zeros((6,max_num_it))
v = np.zeros((6,max_num_it))
s = np.zeros((6,max_num_it))
a[2,0:100] = 0.0005/T**2
a[2,100:200] = - 0.0005/T**2
if max_num_it > 1100:
a[2,500:550] = 0.0002/T**2
if max_num_it >4001:
a[2,1500:1550]=-0.0002/T**2
it = 2000
while it <= 4000:
a[2,it]= (-9*(np.pi**2)*(T/4)**2*np.sin(it*T/4*2*np.pi+np.pi/2))/T**2
it+=1
a[2,4001]=0.0001/T**2
for i in range(max_num_it):
if i>0:
v[2,i]=v[2,i-1]+a[2,i-1]*T
s[2,i]=s[2,i-1]+v[2,i-1]*T
return s
#2 Generate an efficient desired force trajectory
def generate_F_d_express(max_num_it,T):
a = np.zeros((6,max_num_it))
v = np.zeros((6,max_num_it))
s = np.zeros((6,max_num_it))
a[2,0:50] = 0.0010/T**2
a[2,100:150] = - 0.0010/T**2
if max_num_it > 275:
a[2,250:275] = 0.0008/T**2
if max_num_it >2001:
a[2,750:775]=-0.0008/T**2
it = 1000
while it <= 2000:
a[2,it]= (-9*(np.pi**2)*(T/4)**2*np.sin(2*it*T/4*2*np.pi+np.pi/2))/T**2
it+=1
a[2,2001]=0.0001/T**2
for i in range(max_num_it):
if i>0:
v[2,i]=v[2,i-1]+a[2,i-1]*T
s[2,i]=s[2,i-1]+v[2,i-1]*T
return s
#3 Generate a (time-consistent) desired force trajectory
def generate_F_d_tc(max_num_it,T):
a = np.zeros((6,max_num_it))
v = np.zeros((6,max_num_it))
s = np.zeros((6,max_num_it))
a[2,0:int(max_num_it/75)] = 62.5
a[2,int(max_num_it/37.5):int(max_num_it/25)] = - 62.5
if max_num_it > 275:
a[2,int(max_num_it/15):int(max_num_it*11/150)] = 50
if max_num_it >2001:
a[2,int(max_num_it/5):int(max_num_it*31/150)]=-50
it = int(max_num_it*4/15)
while it <= int(max_num_it*8/15):
a[2,it]= (-9*(np.pi**2)*(T/4)**2*np.sin(2*it*T/4*2*np.pi+np.pi/2))/T**2
it+=1
a[2,int(max_num_it*8/15+1)]=6.25
for i in range(max_num_it):
if i>0:
v[2,i]=v[2,i-1]+a[2,i-1]*T
s[2,i]=s[2,i-1]+v[2,i-1]*T
return s
# ------------ Helper functions --------------------------------
# Calculate the numerical derivative of a each row in a vector
def get_derivative_of_vector(history,iteration,T):
size = history.shape[0]
if iteration > 0:
return np.subtract(history[:,iteration],history[:,iteration-1])/T
else:
return np.zeros(size)
# Saturation-function
def ensure_limits(lower,upper,matrix):
for i in range(6):
if matrix[i,i] > upper:
matrix[i,i] = upper
elif matrix[i,i] < lower:
matrix[i,i] = lower
# Return the cartesian (task-space) inertia of the manipulator [alternatively the inverse of it]
def get_W(inv = False):
W = np.linalg.multi_dot([robot.jacobian(),np.linalg.inv(robot.joint_inertia_matrix()),robot.jacobian().T])
if inv == True:
return np.linalg.inv(W)
else:
return W
# Return the external forces (everything except for z-force is set to 0 due to offsets)
def get_F_ext(two_dim = False):
if two_dim == True:
return np.array([0,0,robot.endpoint_effort()['force'][2],0,0,0]).reshape([6,1])
else:
return np.array([0,0,robot.endpoint_effort()['force'][2],0,0,0])
# Return the position and (relative) orientation
def get_x(goal_ori):
pos_x = robot.endpoint_pose()['position']
rel_ori = quatdiff_in_euler_radians(goal_ori, np.asarray(robot.endpoint_pose()['orientation']))
return np.append(pos_x,rel_ori)
# Return the linear and angular velocities
# Numerically = True -> return the derivarive of the state-vector
# Numerically = False -> read values from rostopic (faulty in sim when interacting with the environment)
def get_x_dot(x_hist,i,T, numerically=False):
if numerically == True:
return get_derivative_of_vector(x_hist,i,T)
else:
return np.append(robot.endpoint_velocity()['linear'],robot.endpoint_velocity()['angular'])
# Return the error in position and orientation
def get_delta_x(goal_ori, p_d, two_dim = False):
delta_pos = p_d - robot.endpoint_pose()['position']
delta_ori = quatdiff_in_euler_radians(np.asarray(robot.endpoint_pose()['orientation']), goal_ori)
if two_dim == True:
return np.array([np.append(delta_pos,delta_ori)]).reshape([6,1])
else:
return np.append(delta_pos,delta_ori)
# Return the error in linear and angular velocities
def get_x_dot_delta(x_d_dot,x_dot, two_dim = True):
if two_dim == True:
return (x_d_dot - x_dot).reshape([6,1])
else:
return x_d_dot - x_dot
# Return the error in linear and angular acceleration
def get_x_ddot_delta(x_d_ddot,v_history,i,T):
a = get_derivative_of_vector(v_history,i,T)
return x_d_ddot-a
# Return the cartesian (task-space) position
def get_p(two_dim=False):
if two_dim == True:
return robot.endpoint_pose()['position'].reshape([3,1])
else:
return robot.endpoint_pose()['position']
# Compute difference between quaternions and return Euler angle in radians as difference
def quatdiff_in_euler_radians(quat_curr, quat_des):
curr_mat = quaternion.as_rotation_matrix(quat_curr)
des_mat = quaternion.as_rotation_matrix(quat_des)
rel_mat = des_mat.T.dot(curr_mat)
rel_quat = quaternion.from_rotation_matrix(rel_mat)
vec = quaternion.as_float_array(rel_quat)[1:]
if rel_quat.w < 0.0:
vec = -vec
return -des_mat.dot(vec)
# -------------- Main functions --------------------
# Get xi as it is described in equation (44) in [Huang1992]
def get_xi(goal_ori, p_d, x_dot, x_d_dot, x_d_ddot, v_history, i, T):
E = -get_delta_x(goal_ori, p_d)
E_dot = -get_x_dot_delta(x_d_dot,x_dot, two_dim = False)
E_ddot = -get_x_ddot_delta(x_d_ddot,v_history,i,T)
E_diag = np.diagflat(E)
E_dot_diag = np.diagflat(E_dot)
E_ddot_diag = np.diagflat(E_ddot)
return np.block([E_ddot_diag,E_dot_diag,E_diag])
# Calculate lambda_dot as in equation (50) in [Huang1992]
def get_lambda_dot(gamma,xi,K_v,P,F_d):
return np.linalg.multi_dot([-np.linalg.inv(gamma),xi.T,np.linalg.inv(K_v),P,get_F_ext(two_dim=True)-F_d.reshape([6,1])])
# Return the updated (adapted) Inertia, Damping and Stiffness matrices.
def update_MBK_hat(lam,M,B,K):
M_hat = M # + np.diagflat(lam[0:6]) M is chosen to be constant
B_hat = B + np.diagflat(lam[6:12])
K_hat = K + np.diagflat(lam[12:18])
#ensure_limits(1,5000,M_hat)
ensure_limits(1,5000,B_hat)
ensure_limits(1,5000,K_hat)
return M_hat, B_hat, K_hat
# Calculate and perform the torque as in equation (10) in [Huang1992]
def perform_torque_Huang1992(M, B, K, x_d_ddot, x_d_dot,x_dot, p_d, goal_ori):
a = np.linalg.multi_dot([robot.jacobian().T,get_W(inv=True),np.linalg.inv(M)])
b = np.array([np.dot(M,x_d_ddot)]).reshape([6,1]) + np.array([np.dot(B,get_x_dot_delta(x_d_dot,x_dot))]).reshape([6,1]) + np.array([np.dot(K,get_delta_x(goal_ori,p_d,two_dim = True))]).reshape([6,1])
c = robot.coriolis_comp().reshape([7,1])
d = (np.identity(6)-np.dot(get_W(inv=True),np.linalg.inv(M))).reshape([6,6])
total_torque = np.array([np.dot(a,b)]).reshape([7,1]) + c + np.array([np.linalg.multi_dot([robot.jacobian().T,d,get_F_ext()])]).reshape([7,1])
robot.set_joint_torques(dict(list(zip(robot.joint_names(),total_torque))))
"""
TESTING AREA (Functions needed to run an adaptive version of DeSchutter's impedance controller)
[with geometrically consistent stiffness]
"""
def skew(vector):
return np.array([[0, -vector[2], vector[1]],
[vector[2], 0, -vector[0]],
[-vector[1], vector[0], 0]])
def from_three_to_six_dim(matrix):
return np.block([[matrix,np.zeros((3,3))],[np.zeros((3,3)),matrix]])
def get_K_Pt_dot(R_d,K_pt,R_e):
return np.array([0.5*np.linalg.multi_dot([R_d,K_pt,R_d.T])+0.5*np.linalg.multi_dot([R_e,K_pt,R_e.T])])
def get_K_Pt_ddot(p_d,R_d,K_pt):
return np.array([0.5*np.linalg.multi_dot([skew(p_d-robot.endpoint_pose()['position']),R_d,K_pt,R_d.T])])
def E_quat(quat_n,quat_e):
return np.dot(quat_n,np.identity(3))-skew(quat_e)
def get_K_Po_dot(quat_n,quat_e,R_e,K_po):
return np.array([2*np.linalg.multi_dot([E_quat(quat_n,quat_e).T,R_e,K_po,R_e.T])])
def get_h_delta(K_pt_dot,K_pt_ddot,p_delta,K_po_dot,quat_e):
f_delta_t = np.array([np.dot(K_pt_dot,p_delta)])
m_delta_t = np.array([np.dot(K_pt_ddot,p_delta)])
null = np.zeros((3,1))
m_delta_o = np.array([np.dot(K_po_dot,quat_e)])
return np.array([np.append(f_delta_t.T,m_delta_t.T)]).T + np.array([np.append(null.T,m_delta_o.T)]).T
def perform_torque_DeSchutter(M, B, K, x_d_ddot, x_d_dot,x_dot, p_d, Rot_d): # must include Rot_d
J = robot.jacobian()
Rot_e = robot.endpoint_pose()['orientation_R']
Rot_e_bigdim = from_three_to_six_dim(Rot_e)
Rot_e_dot = np.dot(skew(robot.endpoint_velocity()['angular']),Rot_e) #not a 100 % sure about this one
Rot_e_dot_bigdim = from_three_to_six_dim(Rot_e_dot)
quat = quaternion.from_rotation_matrix(np.dot(Rot_e.T,Rot_d)) #orientational displacement represented as a unit quaternion
#quat = robot.endpoint_pose()['orientation']
quat_e_e = np.array([quat.x,quat.y,quat.z]) # vector part of the unit quaternion in the frame of the end effector
quat_e = np.dot(Rot_e.T,quat_e_e) # ... in the base frame
quat_n = quat.w
p_delta = p_d-robot.endpoint_pose()['position']
K_Pt_dot = get_K_Pt_dot(Rot_d,K[:3,:3],Rot_e)
K_Pt_ddot = get_K_Pt_ddot(p_d,Rot_d,K[:3,:3])
K_Po_dot = get_K_Po_dot(quat_n,quat_e,Rot_e,K[3:,3:])
h_delta_e = np.array(np.dot(Rot_e_bigdim,get_h_delta(K_Pt_dot,K_Pt_ddot,p_delta,K_Po_dot,quat_e))).reshape([6,1])
h_e = get_F_ext(two_dim=True)
h_e_e = np.array(np.dot(Rot_e_bigdim,h_e))
a_d_e = np.dot(Rot_e_bigdim,x_d_ddot).reshape([6,1])
v_d_e = np.dot(Rot_e_bigdim,x_d_dot).reshape([6,1])
alpha_e = a_d_e + np.dot(np.linalg.inv(M),(np.dot(B,v_d_e.reshape([6,1])-np.dot(Rot_e_bigdim,x_dot).reshape([6,1]))+h_delta_e-h_e_e)).reshape([6,1])
alpha = np.dot(Rot_e_bigdim.T,alpha_e).reshape([6,1])+np.dot(Rot_e_dot_bigdim.T,np.dot(Rot_e_bigdim,x_dot)).reshape([6,1])
torque = np.linalg.multi_dot([J.T,get_W(inv=True),alpha]).reshape((7,1)) + np.array(robot.coriolis_comp().reshape((7,1))) + np.dot(J.T,h_e).reshape((7,1))
robot.set_joint_torques(dict(list(zip(robot.joint_names(),torque))))
"""
TESTING AREA
"""
# -------------- Plotting ------------------------
def plot_result(v_num, v,p,p_d, delta_x, F_ext,F_d, z_dynamics,M,B,K, T):
time_array = np.arange(len(p[0]))*T
plt.subplot(211)
plt.title("External force")
plt.plot(time_array, F_ext[2], label="force z [N]")
plt.plot(time_array, F_d[2], label="desired force z [N]", color='b',linestyle='dashed')
plt.xlabel("Real time [s]")
plt.legend()
plt.subplot(212)
plt.title("Position")
plt.plot(time_array, p[0,:], label = "true x [m]")
plt.plot(time_array, p[1,:], label = "true y [m]")
plt.plot(time_array, p[2,:], label = "true z [m]")
plt.plot(time_array, p_d[0,:], label = "desired x [m]", color='b',linestyle='dashed')
plt.plot(time_array, p_d[1,:], label = "desired y [m]", color='C1',linestyle='dashed')
plt.plot(time_array, p_d[2,:], label = "desired z [m]", color='g',linestyle='dashed')
plt.xlabel("Real time [s]")
plt.legend()
"""
plt.subplot(233)
plt.title("Orientation error in Euler")
plt.plot(time_array, delta_x[3]*(180/np.pi), label = "error Ori_x [degrees]")
plt.plot(time_array, delta_x[4]*(180/np.pi), label = "error Ori_y [degrees]")
plt.plot(time_array, delta_x[5]*(180/np.pi), label = "error Ori_z [degrees]")
plt.xlabel("Real time [s]")
plt.legend()
plt.subplot(234)
plt.title("Adaptive dynamics along the z-axis")
plt.plot(time_array, z_dynamics[0], label = "inertia (M_z)")
plt.plot(time_array, z_dynamics[1], label = "damping (B_z)")
plt.plot(time_array, z_dynamics[2], label = "stiffness (K_z)")
plt.axhline(y=M[2][2], label = "initial inertia (M_z)", color='b',linestyle='dashed')
plt.axhline(y=B[2][2], label = "initial damping (B_z)", color='C1',linestyle='dashed')
plt.axhline(y=K[2][2], label = "initial stiffness (K_z)", color='g',linestyle='dashed')
plt.xlabel("Real time [s]")
plt.legend()
plt.subplot(235)
plt.title("velocity read from rostopic")
plt.plot(time_array, v[0], label = "vel x")
plt.plot(time_array, v[1], label = "vel y")
plt.plot(time_array, v[2], label = "vel z")
plt.plot(time_array, v[3], label = "ang x")
plt.plot(time_array, v[4], label = "ang y")
plt.plot(time_array, v[5], label = "ang z")
plt.xlabel("Real time [s]")
plt.legend()
plt.subplot(236)
plt.title("numerically calculated velocity")
plt.plot(time_array, v_num[0], label = "vel x")
plt.plot(time_array, v_num[1], label = "vel y")
plt.plot(time_array, v_num[2], label = "vel z")
plt.plot(time_array, v_num[3], label = "ang x")
plt.plot(time_array, v_num[4], label = "ang y")
plt.plot(time_array, v_num[5], label = "ang z")
plt.xlabel("Real time [s]")
plt.legend()
"""
plt.show()
if __name__ == "__main__":
# ---------- Initialization -------------------
rospy.init_node("impedance_control")
robot = PandaArm()
publish_rate = 250
rate = rospy.Rate(publish_rate)
T = 0.001*(1000/publish_rate)
max_num_it = int(duration /T)
#robot.move_to_joint_positions(new_start)
robot.move_to_neutral()
# List used to contain data needed for calculation of the torque output
lam = np.zeros(18)
v_history = np.zeros((6,max_num_it))
# Lists providing data for plotting
p_history = np.zeros((3,max_num_it))
v_history_num = np.zeros((6,max_num_it))
x_history = np.zeros((6,max_num_it))
delta_x_history = np.zeros((6,max_num_it))
F_ext_history = np.zeros((6,max_num_it))
z_dynamics_history = np.zeros((3,max_num_it))
# Specify the desired behaviour of the robot
x_d_ddot, x_d_dot, p_d = generate_desired_trajectory_tc(max_num_it,T,move_in_x = True)
goal_ori = np.asarray(robot.endpoint_pose()['orientation']) # goal orientation = current (initial) orientation [remains the same the entire duration of the run]
Rot_d = robot.endpoint_pose()['orientation_R'] # used by the DeSchutter implementation
F_d = generate_F_d_tc(max_num_it,T)
# ----------- The control loop -----------
for i in range(max_num_it):
# update state-lists
p_history[:,i] = get_p()
x_history[:,i] = get_x(goal_ori)
delta_x_history[:,i] = get_delta_x(goal_ori,p_d[:,i])
F_ext_history[:,i] = get_F_ext()
x_dot = get_x_dot(x_history,i,T, numerically=False) #chose 'numerically' either 'True' or 'False'
v_history_num[:,i] = get_x_dot(x_history,i,T, numerically=True) # only for plotting
v_history[:,i] = get_x_dot(x_history,i,T) # for calculating error in acceleration
# adapt M,B and K
xi = get_xi(goal_ori, p_d[:,i],x_dot, x_d_dot[:,i], x_d_ddot[:,i], v_history, i, T)
lam = lam.reshape([18,1]) + get_lambda_dot(gamma,xi,K_v,P,F_d[:,i]).reshape([18,1])*T
M_hat,B_hat,K_hat = update_MBK_hat(lam,M,B,K)
# Apply the resulting torque to the robot
"""CHOOSE ONE OF THE TWO CONTROLLERS BELOW"""
perform_torque_Huang1992(M_hat, B_hat, K_hat, x_d_ddot[:,i], x_d_dot[:,i],x_dot, p_d[:,i], goal_ori)
#perform_torque_DeSchutter(M_hat, B_hat, K_hat, x_d_ddot[:,i], x_d_dot[:,i],x_dot, p_d[:,i], Rot_d)
rate.sleep()
# plotting and printing
z_dynamics_history[0][i]=M_hat[2][2]
z_dynamics_history[1][i]=B_hat[2][2]
z_dynamics_history[2][i]=K_hat[2][2]
# Live printing to screen when the controller is running
if i%100 == 0:
print(i,'/',max_num_it,' = ',T*i,' [s] ) Force in z: ',F_ext_history[2,i])
print(K_hat[2][2])
print('')
#Uncomment the block below to save plotting-data
"""
np.save('VIC_p_d.npy',p_d)
np.save('VIC_p.npy',p_history)
np.save('VIC_Fz_d.npy',F_d)
np.save('VIC_Fz.npy',F_ext_history[2])
np.save('VIC_delta_x.npy',delta_x_history) #orientation error in radians
np.save('VIC_adaptive_gains.npy',z_dynamics_history)
"""
plot_result(v_history_num,v_history, p_history, p_d, delta_x_history, F_ext_history, F_d, z_dynamics_history,M,B,K, T)
| 36.842105
| 267
| 0.638157
| 3,877
| 21,700
| 3.367552
| 0.124581
| 0.008885
| 0.028799
| 0.009191
| 0.431756
| 0.369868
| 0.299556
| 0.241651
| 0.184666
| 0.147672
| 0
| 0.056673
| 0.193364
| 21,700
| 588
| 268
| 36.904762
| 0.689214
| 0.157419
| 0
| 0.224852
| 0
| 0
| 0.024836
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088757
| false
| 0
| 0.035503
| 0.02071
| 0.221893
| 0.011834
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c016da0c81742ef879e9615198cd22dc666a5c6
| 6,998
|
py
|
Python
|
pycmap/common.py
|
mdashkezari/pycmap
|
5b526404d005ec220ab0911cd2f3c05263f9eda3
|
[
"MIT"
] | 4
|
2019-09-23T17:12:42.000Z
|
2022-02-01T02:38:40.000Z
|
pycmap/common.py
|
mdashkezari/pycmap
|
5b526404d005ec220ab0911cd2f3c05263f9eda3
|
[
"MIT"
] | 2
|
2019-09-20T12:56:21.000Z
|
2019-09-24T23:08:26.000Z
|
pycmap/common.py
|
mdashkezari/pycmap
|
5b526404d005ec220ab0911cd2f3c05263f9eda3
|
[
"MIT"
] | 1
|
2019-12-18T20:47:20.000Z
|
2019-12-18T20:47:20.000Z
|
"""
Author: Mohammad Dehghani Ashkezari <mdehghan@uw.edu>
Date: 2019-06-28
Function: Host a collection of shared multi-purpose helper functions.
"""
import os
import sys
from tqdm import tqdm
from colorama import Fore, Back, Style, init
import numpy as np
import pandas as pd
import webbrowser
import IPython
MAX_ROWS = 2000000
MAX_SAMPLE_SOURCE = 500000
def halt(msg):
"""Prints an error message and terminates the program."""
msg = '\n' + msg
init(convert=True)
print(Fore.RED + msg, file=sys.stderr)
print(Style.RESET_ALL, end='')
sys.exit(1)
return
def print_tqdm(msg, err=False):
"""Print helper function compatible with tqdmm progressbar."""
# init()
msg = '\n' + msg
if err:
tqdm.write(Fore.RED + msg)
else:
tqdm.write(msg)
tqdm.write(Style.RESET_ALL, end='')
return
def get_base_url():
"""Returns API root endpoint."""
return os.environ.get(
'CMAP_API_BASE_URL', 'https://simonscmap.com').rstrip('/')
def jupytered():
"""Returns True if jupyter notebook has invoked the package."""
jup = False
import __main__ as main
if not hasattr(main, '__file__'):
jup = True
return jup
def inline():
"""
Checks if the package results should get prepared for an "inline" context.
Currently, just calls the jupytered function.
"""
return jupytered()
def make_filename_by_table_var(table, variable, prefix=''):
"""Generate a filename (without extention) using table and variable names."""
if prefix != '': prefix += '_'
return prefix + variable + '_' + table
def canvas_rect(dw, dh):
"""Resizes a canvas dimensions so that it better fits on client browser."""
ar = dw / dh
h = 400 if ar > 3 else 500
w_min = 300
w_max = 1000
w = int(ar * h)
if w > w_max: w = w_max
if w < w_min: w = w_min
return w, h
def get_data_limits(data, quant=0.05):
"""Returns low and high quantile limits of a numeric array."""
data = np.array(data).flatten()
return np.nanquantile(data, quant), np.nanquantile(data, 1-quant)
# def get_token(token=None):
# token = token or os.environ.get('CMAP_API_KEY')
# if token in [None, '']:
# halt('API Key must be specified to access CMAP API')
# return token
def config_path():
"""Returns the path to the config spreadsheet file."""
return os.path.join(os.path.dirname(os.path.realpath(__file__)), 'config.csv')
def initiate_config_file(token, vizEngine, exportDir, exportFormat, figureDir):
"""Creates a .csv file hosting the primary project configs """
if vizEngine is None: vizEngine = 'plotly'
if exportDir is None: exportDir = './export/'
if exportFormat is None: exportFormat = '.csv'
if figureDir is None: figureDir = './figure/'
config = {
'token': [token],
'vizEngine': [vizEngine],
'exportDir': [exportDir],
'exportFormat': [exportFormat],
'figureDir': [figureDir]
}
pd.DataFrame(config).to_csv(config_path(), index=False)
return
def remove_angle_brackets(token):
"""Removes angle brackets at start and end of the token, if exist."""
if token is not None:
if token[0] == '<': token = token[1:]
if token[-1] == '>': token = token[:-1]
return token
def save_config(token=None, vizEngine=None, exportDir=None, exportFormat=None, figureDir=None):
"""Updates the project's configs at the config spreadsheet."""
configPath = config_path()
if not os.path.isfile(configPath):
initiate_config_file(token, vizEngine, exportDir, exportFormat, figureDir)
df = pd.read_csv(configPath)
if token is not None:
df['token'] = remove_angle_brackets(token)
if vizEngine is not None:
supportedVizEngines = ['bokeh', 'plotly']
if vizEngine not in supportedVizEngines:
halt('%s is not a supported visualization library' % vizEngine)
df['vizEngine'] = vizEngine
if exportDir is not None:
df['exportDir'] = exportDir
if exportFormat is not None:
df['exportFormat'] = exportFormat
if figureDir is not None:
df['figureDir'] = figureDir
df.to_csv(configPath, index=False)
return
def load_config():
"""Loads the config spreadsheet and returns it as a dataframe."""
configPath = config_path()
if not os.path.isfile(configPath):
msg = '\nAPI key not found!\n'
msg = msg + 'Please pass the API key using the following code:\n'
msg = msg + 'import pycmap\n'
msg = msg + 'pycmap.API(<api_key>)\n'
halt(msg)
return pd.read_csv(configPath)
def get_token():
"""Returns the API key."""
return remove_angle_brackets(load_config()['token'][0])
def get_vizEngine():
"""Returns the visualization library name."""
return load_config()['vizEngine'][0]
def get_export_dir():
"""Returns the path to the export directory."""
return load_config()['exportDir'][0]
def get_export_format():
"""Returns the file format of the exported files."""
return load_config()['exportFormat'][0]
def get_figure_dir():
"""Returns the path to the figure directory."""
return load_config()['figureDir'][0]
def get_bokeh_tools():
"""Returns a list tools used along with a bokeh graph."""
return 'crosshair,pan,zoom_in,wheel_zoom,zoom_out,box_zoom,reset,save'
def normalize(vals, min_max=False):
"""Takes an array and either normalize to min/max, standardize it (remove the mean and divide by standard deviation)."""
if min_max:
normalized_vals=(vals-np.nanmin(vals))/(np.nanmax(vals)-np.nanmin(vals))
else:
normalized_vals=(vals-np.nanmean(vals))/np.nanstd(vals)
return normalized_vals
def open_HTML(path):
"""Display HTML file by defaut browser or inline in case jupyter is the caller."""
if jupytered():
vObj = IPython.display.IFrame(path, width=800, height=400)
IPython.display.display(vObj)
else:
path = 'file://' + os.path.realpath(path)
webbrowser.open(path, new=2)
return
| 33.32381
| 131
| 0.572021
| 830
| 6,998
| 4.718072
| 0.316867
| 0.01379
| 0.01379
| 0.011236
| 0.089632
| 0.066905
| 0.055669
| 0.055669
| 0.024004
| 0
| 0
| 0.011767
| 0.319949
| 6,998
| 209
| 132
| 33.483254
| 0.811095
| 0.228637
| 0
| 0.125984
| 0
| 0
| 0.090981
| 0.01665
| 0
| 0
| 0
| 0
| 0
| 1
| 0.165354
| false
| 0.007874
| 0.07874
| 0
| 0.409449
| 0.023622
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c03d0743c0121e9d0de50ceaa47b8661683af6f
| 2,207
|
py
|
Python
|
tests/test_device.py
|
michaelwoods/home-assistant-cli
|
340643af943f36283621f39ac39a690b1fccc045
|
[
"Apache-2.0"
] | null | null | null |
tests/test_device.py
|
michaelwoods/home-assistant-cli
|
340643af943f36283621f39ac39a690b1fccc045
|
[
"Apache-2.0"
] | null | null | null |
tests/test_device.py
|
michaelwoods/home-assistant-cli
|
340643af943f36283621f39ac39a690b1fccc045
|
[
"Apache-2.0"
] | null | null | null |
"""Testing Device operations."""
import json
import unittest.mock as mock
from click.testing import CliRunner
import homeassistant_cli.cli as cli
def test_device_list(default_devices) -> None:
"""Test Device List."""
with mock.patch(
'homeassistant_cli.remote.get_devices', return_value=default_devices
):
runner = CliRunner()
result = runner.invoke(
cli.cli,
["--output=json", "device", "list"],
catch_exceptions=False,
)
assert result.exit_code == 0
data = json.loads(result.output)
assert len(data) == 23
def test_device_list_filter(default_devices) -> None:
"""Test Device List."""
with mock.patch(
'homeassistant_cli.remote.get_devices', return_value=default_devices
):
runner = CliRunner()
result = runner.invoke(
cli.cli,
["--output=json", "device", "list", "table"],
catch_exceptions=False,
)
assert result.exit_code == 0
data = json.loads(result.output)
assert len(data) == 2
assert data[0]['name'] == "Kitchen table left"
assert data[1]['name'] == "Kitchen table right"
def test_device_assign(default_areas, default_devices) -> None:
"""Test basic device assign."""
with mock.patch(
'homeassistant_cli.remote.get_devices', return_value=default_devices
):
with mock.patch(
'homeassistant_cli.remote.get_areas', return_value=default_areas
):
with mock.patch(
'homeassistant_cli.remote.assign_area',
return_value={'success': True},
):
runner = CliRunner()
result = runner.invoke(
cli.cli,
["device", "assign", "Kitchen", "Kitchen table left"],
catch_exceptions=False,
)
print(result.output)
assert result.exit_code == 0
expected = (
"Successfully assigned 'Kitchen'"
" to 'Kitchen table left'\n"
)
assert result.output == expected
| 29.824324
| 76
| 0.557771
| 223
| 2,207
| 5.35426
| 0.269058
| 0.080402
| 0.054439
| 0.108878
| 0.572027
| 0.554439
| 0.525126
| 0.460637
| 0.460637
| 0.460637
| 0
| 0.005416
| 0.330766
| 2,207
| 73
| 77
| 30.232877
| 0.802979
| 0.039873
| 0
| 0.535714
| 0
| 0
| 0.178742
| 0.084843
| 0
| 0
| 0
| 0
| 0.142857
| 1
| 0.053571
| false
| 0
| 0.071429
| 0
| 0.125
| 0.017857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c03f5083b7da646254c6bd784cf88ab749969d1
| 4,350
|
py
|
Python
|
widgets/tree_item.py
|
tarsa129/j3d-animation-editor
|
3f0691bd7dcece6e2055a0b5af0510608f28f2ca
|
[
"MIT"
] | 6
|
2020-08-10T13:09:03.000Z
|
2021-11-20T02:37:46.000Z
|
widgets/tree_item.py
|
tarsa129/j3d-animation-editor
|
3f0691bd7dcece6e2055a0b5af0510608f28f2ca
|
[
"MIT"
] | 3
|
2021-02-16T06:20:23.000Z
|
2022-02-24T21:43:41.000Z
|
widgets/tree_item.py
|
tarsa129/j3d-animation-editor
|
3f0691bd7dcece6e2055a0b5af0510608f28f2ca
|
[
"MIT"
] | 2
|
2021-02-16T05:02:04.000Z
|
2021-12-17T16:11:10.000Z
|
from PyQt5.QtWidgets import QAction, QTreeWidget, QTreeWidgetItem, QFileDialog
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import Qt
import animations.general_animation as j3d
from widgets.yaz0 import compress, compress_slow, compress_fast
from io import BytesIO
class tree_item(QTreeWidgetItem):
def __init__(self, parent):
QTreeWidgetItem.__init__(self, parent,1000)
self.display_info = []
self.filepath = ""
self.compressed = 1
self.bmd_file = None
self.sound_data = None
self.changed = False
def set_values(self, display_info, filepath, compressed ):
self.display_info = display_info
self.filepath = filepath.replace("|", ".")
self.compressed = compressed
forward_i = filepath.rfind("/") + 1
backwad_i = filepath.rfind("\\") + 1
self.setText(0, self.filepath[max(forward_i, backwad_i):])
def set_sound(self, sound_data):
self.sound_data = sound_data
if sound_data is not None:
icon = QIcon("icons/sound.png")
self.setIcon(0, icon)
else:
self.setIcon(0, QIcon() )
def save_animation(self, other_filepath = "", compress_dis = 1, save_all = False):
if save_all and not self.changed:
print("skipping " + self.filepath + " because nothing has changed")
return
if other_filepath != "":
working_filepath = other_filepath
else:
working_filepath = self.filepath
if (working_filepath.endswith("a") and not working_filepath.endswith(".bva") ):
info = j3d.fix_array( self.display_info)
self.convert_to_a(info)
else:
info = j3d.fix_array( self.display_info)
j3d.sort_filepath(working_filepath, info, self.sound_data)
compress_status = self.compressed
if compress_dis != 0:
compress_status = compress_dis
print(compress_status)
if compress_status > 1:
out = BytesIO()
with open(working_filepath, "rb") as f:
if compress_status == 2:
out = compress_fast(f)
elif compress_status == 3:
out = compress(f)
elif compress_status == 4:
out = compress_slow(f)
with open(working_filepath, "wb") as f:
f.write(out.getbuffer())
self.changed = False
def convert_to_k(self):
filepath = self.filepath[:-1] + "k"
info = j3d.fix_array(self.display_info)
if self.filepath.endswith(".bca"):
bck = j3d.sort_filepath(filepath, info)
elif filepath.endswith(".bla"):
blk = j3d.sort_filepath(filepath, info)
def convert_to_a(self, info):
info = j3d.fix_array( info )
if self.filepath.endswith(".bck") or self.filepath.endswith(".bca"):
bca = j3d.convert_to_a(self.filepath, info) #this is a pure bck, no saving
filepath = self.filepath[:-1] + "a"
with open(filepath, "wb") as f:
bca.write_bca(f)
f.close()
elif self.filepath.endswith(".blk") or self.filepath.endswith(".bla"):
bla = j3d.convert_to_a(self.filepath, info) #this is a pure bck, no saving
filepath = self.filepath[:-1] + "a"
with open(filepath, "wb") as f:
bla.write_bla(f)
f.close()
def export_anim(self):
info = j3d.fix_array(self.display_info)
filepath = self.filepath[0:-4] + ".anim"
if self.bmd_file is None:
bmd_file, choosentype = QFileDialog.getOpenFileName( None, "Open File","" , "Model files (*.bmd *.bdl)")
if bmd_file:
bck = j3d.export_anim(filepath, info, bmd_file)
else:
bck = j3d.export_anim(filepath, info, self.bmd_file)
def add_children(self, strings):
self.takeChildren()
for name in strings:
child = QTreeWidgetItem(self)
child.setText(0, name)
child.setDisabled(True)
| 37.5
| 116
| 0.55931
| 495
| 4,350
| 4.739394
| 0.248485
| 0.086957
| 0.044757
| 0.031969
| 0.198636
| 0.155158
| 0.131287
| 0.080136
| 0.080136
| 0.080136
| 0
| 0.013575
| 0.33954
| 4,350
| 116
| 117
| 37.5
| 0.802993
| 0.013333
| 0
| 0.168421
| 0
| 0
| 0.031694
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.084211
| false
| 0
| 0.063158
| 0
| 0.168421
| 0.021053
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c040273405e24f9a3249bb42b05984c6988f41a
| 3,445
|
py
|
Python
|
Wheels.py
|
edhosken/WheelsSong
|
cb988c8510a1095eeec3a2399b0fc0ba24bfa648
|
[
"MIT"
] | null | null | null |
Wheels.py
|
edhosken/WheelsSong
|
cb988c8510a1095eeec3a2399b0fc0ba24bfa648
|
[
"MIT"
] | null | null | null |
Wheels.py
|
edhosken/WheelsSong
|
cb988c8510a1095eeec3a2399b0fc0ba24bfa648
|
[
"MIT"
] | null | null | null |
#Create the pre-defined song values and empty variables...Correct names not used so each starting letter would be unique
numbers = (1 ,2 ,3 ,4 ,5 ,6 ,7 ,8 ,9 ,10 ,11 ,12 ,13 ,14 ,15 ,16 ,17 ,18 )
letters = ['a ','b ','c ','d ','e ','f ','g ','h ','i ','j ','k ','l ','m ','n ','o ','p ','q ','r ']
roman = ['I ', 'II ', 'III ', 'IV ', 'V ', 'VI ', 'VII ', 'VIII ', 'IX ', 'X ', 'XI ', 'XII ', 'XIII ', 'XIV ', 'XV ', 'XVI ', 'XVII ', 'XVIII']
military = ['alpha ', 'bravo ', 'charlie ', 'delta ', 'echo ', 'foxtrot ', 'golf ', 'hotel ', 'india ', 'juliet ', 'kilo ', 'lima ', 'mike ', 'november ', 'oscar ', 'papa ', 'quebec ', 'romeo ']
german = ['eins', 'zwei', 'drei', 'vier', 'fünf', 'sechs', 'sieben', 'acht', 'neun', 'zehn', 'elf', 'zwölf', 'dreizehn', 'vierzehn', 'fünfzehn', 'sechzehn', 'siebzehn', 'achtzehn']
pi = ['3 ','point ','1 ','4 ','1 ','5 ','9 ','2 ','6 ','5 ','3 ','5 ','8 ','9 ','7 ','9 ','3 ','2 ']
##Build morse code sequences
t = 'dot'
s = 'dash'
m1 = t, s, s, s, s
m2 = t, t, s, s, s
m3 = t, t, t, s, s
m4 = t, t, t, t, s
m5 = t, t, t, t, t
m6 = s, t, t, t, t
m7 = s, s, t, t, t
m8 = s, s, s, t, t
m9 = s, s, s, s, t
m0 = s, s, s, s, s
code = [m1, m2, m3, m4, m5, m6, m7, m8, m9, m1 + m0, m1 + m1, m1 + m2, m1 + m3, m1 + m4, m1 + m5, m1 + m6, m1 + m7, m1 + m8]
##Other ideas: piglatin, japanese, spanish, prime, tau, e, ...
##NEED TO ADD INVALID ENTRY CATCHES
print("Hello, let's sing a song that everybody loves!\n")
sing = 'y'
while sing == 'y':
user = []
variation = input ("Please input what variation you wish to perform be entering 'numbers', 'letters', 'roman', 'military', 'pi', 'german', 'code', or 'user' to make your own song: \n").lower().strip()
##Seeming silly switching of strings to list types
if variation == "numbers" or variation == "n":
variation = numbers
elif variation == "letters" or variation == "l":
variation = letters
elif variation == "roman" or variation == "r":
variation = roman
elif variation == "military" or variation == "m":
variation = military
elif variation == "pi" or variation == "p":
variation = pi
elif variation == "german" or variation == "g":
variation = german
elif variation == "code" or variation == "c":
variation = code
elif variation == "user" or variation == "u":
while len(user) < 18:
user.append(input ("Enter a word: "))
#User input to select the song pattern
pattern = input ("\nNow please tell me what pattern to use by entering 'forward', 'backward', 'even', or 'odd':\n")
print ("\nHere we go: \n\n")
#Asemble the song...IMPROVE FORMAT SO OUTPUT IS EASIER TO READ
song1 = "Oh, there are "
song2 = " wheels on a big rig truck!"
a = song1, variation[::], song2
b = song1, variation[::-1], song2
c = song1, variation[::2], song2
d = song1, variation[1::2], song2
##Use pattern.startswith()?...Also, might be better to seperate forward/backward and even/odd choices.
if pattern == 'forward' or pattern == 'f':
print (a)
elif pattern == 'backward' or pattern == 'b':
print (b)
elif pattern == 'odd' or pattern == 'o':
print (c)
elif pattern == 'even' or pattern == 'e':
print (d)
sing = input('\n\nWould you like to sing it again? (y/n) ').lower()
## This is the end of the while loop
else:
print ("\nOK, Goodbye!")
| 37.445652
| 204
| 0.54688
| 515
| 3,445
| 3.658252
| 0.450485
| 0.016985
| 0.014331
| 0.008493
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.037481
| 0.248766
| 3,445
| 91
| 205
| 37.857143
| 0.690495
| 0.150943
| 0
| 0
| 0
| 0.033333
| 0.299313
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.116667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c0417fc2a324560f940489498afd9c4d64ac7c7
| 15,792
|
py
|
Python
|
tests/test_config.py
|
dfroger/conda
|
c0f99ff46b217d081501e66f4dcd7bcdb5d9c6aa
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_config.py
|
dfroger/conda
|
c0f99ff46b217d081501e66f4dcd7bcdb5d9c6aa
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_config.py
|
dfroger/conda
|
c0f99ff46b217d081501e66f4dcd7bcdb5d9c6aa
|
[
"BSD-3-Clause"
] | null | null | null |
# (c) 2012-2014 Continuum Analytics, Inc. / http://continuum.io
# All Rights Reserved
#
# conda is distributed under the terms of the BSD 3-clause license.
# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.
import os
from os.path import dirname, join, exists
import unittest
import pytest
import conda.config as config
from conda.utils import get_yaml
from conda.compat import iterkeys
from tests.helpers import run_conda_command
yaml = get_yaml()
# use condarc from source tree to run these tests against
config.rc_path = join(dirname(__file__), 'condarc')
def _get_default_urls():
return ['http://repo.continuum.io/pkgs/free',
'http://repo.continuum.io/pkgs/pro']
config.get_default_urls = _get_default_urls
# unset CIO_TEST. This is a Continuum-internal variable that draws packages from an internal server instead of
# repo.continuum.io
try:
del os.environ['CIO_TEST']
except KeyError:
pass
class TestConfig(unittest.TestCase):
# These tests are mostly to ensure API stability
def __init__(self, *args, **kwargs):
config.rc = config.load_condarc(config.rc_path)
# Otherwise normalization tests will fail if the user is logged into
# binstar.
config.rc['add_binstar_token'] = False
super(TestConfig, self).__init__(*args, **kwargs)
def test_globals(self):
self.assertTrue(config.root_dir)
self.assertTrue(config.pkgs_dirs)
self.assertTrue(config.envs_dirs)
self.assertTrue(config.default_prefix)
self.assertTrue(config.platform)
self.assertTrue(config.subdir)
self.assertTrue(config.arch_name)
self.assertTrue(config.bits in (32, 64))
def test_pkgs_dir_from_envs_dir(self):
root_dir = config.root_dir
root_pkgs = join(root_dir, 'pkgs')
for pi, po in [
(join(root_dir, 'envs'), root_pkgs),
('/usr/local/foo/envs' if config.platform != 'win' else 'C:\envs',
'/usr/local/foo/envs/.pkgs' if config.platform != 'win' else 'C:\envs\.pkgs'),
]:
self.assertEqual(config.pkgs_dir_from_envs_dir(pi), po)
def test_proxy_settings(self):
self.assertEqual(config.get_proxy_servers(),
{'http': 'http://user:pass@corp.com:8080',
'https': 'https://user:pass@corp.com:8080'})
def test_normalize_urls(self):
current_platform = config.subdir
assert config.DEFAULT_CHANNEL_ALIAS == 'https://conda.anaconda.org/'
assert config.rc.get('channel_alias') == 'https://your.repo/'
for channel in iterkeys(config.normalize_urls(['defaults', 'system',
'https://anaconda.org/username', 'file:///Users/username/repo',
'username'])):
assert (channel.endswith('/%s/' % current_platform) or
channel.endswith('/noarch/'))
self.assertEqual(config.normalize_urls([
'defaults', 'system', 'https://conda.anaconda.org/username',
'file:///Users/username/repo', 'username'
], 'osx-64'),
{'file:///Users/username/repo/noarch/': ('file:///Users/username/repo', 6),
'file:///Users/username/repo/osx-64/': ('file:///Users/username/repo', 6),
'http://repo.continuum.io/pkgs/free/noarch/': (None, 1),
'http://repo.continuum.io/pkgs/free/osx-64/': (None, 1),
'http://repo.continuum.io/pkgs/pro/noarch/': (None, 1),
'http://repo.continuum.io/pkgs/pro/osx-64/': (None, 1),
'http://some.custom/channel/noarch/': ('http://some.custom/channel', 3),
'http://some.custom/channel/osx-64/': ('http://some.custom/channel', 3),
'https://conda.anaconda.org/username/noarch/': ('https://conda.anaconda.org/username', 5),
'https://conda.anaconda.org/username/osx-64/': ('https://conda.anaconda.org/username', 5),
'https://your.repo/binstar_username/noarch/': ('binstar_username', 2),
'https://your.repo/binstar_username/osx-64/': ('binstar_username', 2),
'https://your.repo/username/noarch/': ('username', 7),
'https://your.repo/username/osx-64/': ('username', 7)})
test_condarc = os.path.join(os.path.dirname(__file__), 'test_condarc')
def _read_test_condarc():
with open(test_condarc) as f:
return f.read()
# Tests for the conda config command
# FIXME This shoiuld be multiple individual tests
@pytest.mark.slow
def test_config_command_basics():
try:
# Test that creating the file adds the defaults channel
assert not os.path.exists('test_condarc')
stdout, stderr = run_conda_command('config', '--file', test_condarc, '--add',
'channels', 'test')
assert stdout == stderr == ''
assert _read_test_condarc() == """\
channels:
- test
- defaults
"""
os.unlink(test_condarc)
# When defaults is explicitly given, it should not be added
stdout, stderr = run_conda_command('config', '--file', test_condarc, '--add',
'channels', 'test', '--add', 'channels', 'defaults')
assert stdout == stderr == ''
assert _read_test_condarc() == """\
channels:
- defaults
- test
"""
os.unlink(test_condarc)
# Duplicate keys should not be added twice
stdout, stderr = run_conda_command('config', '--file', test_condarc, '--add',
'channels', 'test')
assert stdout == stderr == ''
stdout, stderr = run_conda_command('config', '--file', test_condarc, '--add',
'channels', 'test')
assert stdout == ''
assert stderr == "Skipping channels: test, item already exists"
assert _read_test_condarc() == """\
channels:
- test
- defaults
"""
os.unlink(test_condarc)
# Test creating a new file with --set
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--set', 'always_yes', 'true')
assert stdout == stderr == ''
assert _read_test_condarc() == """\
always_yes: true
"""
os.unlink(test_condarc)
finally:
try:
pass
os.unlink(test_condarc)
except OSError:
pass
# FIXME Break into multiple tests
@pytest.mark.slow
def test_config_command_get():
try:
# Test --get
with open(test_condarc, 'w') as f:
f.write("""\
channels:
- test
- defaults
create_default_packages:
- ipython
- numpy
changeps1: no
always_yes: true
invalid_key: yes
channel_alias: http://alpha.conda.anaconda.org
""")
stdout, stderr = run_conda_command('config', '--file', test_condarc, '--get')
assert stdout == """\
--set always_yes True
--set changeps1 no
--set channel_alias http://alpha.conda.anaconda.org
--add channels 'defaults'
--add channels 'test'
--add create_default_packages 'numpy'
--add create_default_packages 'ipython'\
"""
assert stderr == "unknown key invalid_key"
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--get', 'channels')
assert stdout == """\
--add channels 'defaults'
--add channels 'test'\
"""
assert stderr == ""
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--get', 'changeps1')
assert stdout == """\
--set changeps1 no\
"""
assert stderr == ""
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--get', 'changeps1', 'channels')
assert stdout == """\
--set changeps1 no
--add channels 'defaults'
--add channels 'test'\
"""
assert stderr == ""
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--get', 'allow_softlinks')
assert stdout == ""
assert stderr == ""
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--get', 'track_features')
assert stdout == ""
assert stderr == ""
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--get', 'invalid_key')
assert stdout == ""
assert "invalid choice: 'invalid_key'" in stderr
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--get', 'not_valid_key')
assert stdout == ""
assert "invalid choice: 'not_valid_key'" in stderr
os.unlink(test_condarc)
finally:
try:
pass
os.unlink(test_condarc)
except OSError:
pass
# FIXME Break into multiple tests
@pytest.mark.slow
def test_config_command_parser():
try:
# Now test the YAML "parser"
# Channels is normal content.
# create_default_packages has extra spaces in list items
condarc = """\
channels:
- test
- defaults
create_default_packages :
- ipython
- numpy
changeps1: false
# Here is a comment
always_yes: yes
"""
# First verify that this itself is valid YAML
assert yaml.load(condarc, Loader=yaml.RoundTripLoader) == {'channels': ['test', 'defaults'],
'create_default_packages': ['ipython', 'numpy'], 'changeps1':
False, 'always_yes': 'yes'}
with open(test_condarc, 'w') as f:
f.write(condarc)
stdout, stderr = run_conda_command('config', '--file', test_condarc, '--get')
assert stdout == """\
--set always_yes yes
--set changeps1 False
--add channels 'defaults'
--add channels 'test'
--add create_default_packages 'numpy'
--add create_default_packages 'ipython'\
"""
stdout, stderr = run_conda_command('config', '--file', test_condarc, '--add',
'channels', 'mychannel')
assert stdout == stderr == ''
assert _read_test_condarc() == """\
channels:
- mychannel
- test
- defaults
create_default_packages:
- ipython
- numpy
changeps1: false
# Here is a comment
always_yes: 'yes'
"""
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--set', 'changeps1', 'true')
assert stdout == stderr == ''
assert _read_test_condarc() == """\
channels:
- mychannel
- test
- defaults
create_default_packages:
- ipython
- numpy
changeps1: true
# Here is a comment
always_yes: 'yes'
"""
os.unlink(test_condarc)
# Test adding a new list key. We couldn't test this above because it
# doesn't work yet with odd whitespace
condarc = """\
channels:
- test
- defaults
always_yes: true
"""
with open(test_condarc, 'w') as f:
f.write(condarc)
stdout, stderr = run_conda_command('config', '--file', test_condarc, '--add',
'disallow', 'perl')
assert stdout == stderr == ''
assert _read_test_condarc() == condarc + """\
disallow:
- perl
"""
os.unlink(test_condarc)
finally:
try:
pass
os.unlink(test_condarc)
except OSError:
pass
# FIXME Break into multiple tests
@pytest.mark.slow
def test_config_command_remove_force():
try:
# Finally, test --remove, --remove-key
run_conda_command('config', '--file', test_condarc, '--add',
'channels', 'test')
run_conda_command('config', '--file', test_condarc, '--set',
'always_yes', 'true')
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--remove', 'channels', 'test')
assert stdout == stderr == ''
assert yaml.load(_read_test_condarc(), Loader=yaml.RoundTripLoader) == {'channels': ['defaults'],
'always_yes': True}
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--remove', 'channels', 'test', '--force')
assert stdout == ''
assert stderr == "Error: 'test' is not in the 'channels' key of the config file"
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--remove', 'disallow', 'python', '--force')
assert stdout == ''
assert stderr == "Error: key 'disallow' is not in the config file"
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--remove-key', 'always_yes', '--force')
assert stdout == stderr == ''
assert yaml.load(_read_test_condarc(), Loader=yaml.RoundTripLoader) == {'channels': ['defaults']}
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--remove-key', 'always_yes', '--force')
assert stdout == ''
assert stderr == "Error: key 'always_yes' is not in the config file"
os.unlink(test_condarc)
finally:
try:
pass
os.unlink(test_condarc)
except OSError:
pass
# FIXME Break into multiple tests
@pytest.mark.slow
def test_config_command_bad_args():
try:
stdout, stderr = run_conda_command('config', '--file', test_condarc, '--add',
'notarealkey', 'test')
assert stdout == ''
assert not exists(test_condarc)
stdout, stderr = run_conda_command('config', '--file', test_condarc, '--set',
'notarealkey', 'yes')
assert stdout == ''
assert not exists(test_condarc)
finally:
try:
pass
os.unlink(test_condarc)
except OSError:
pass
def test_invalid_rc():
# Some tests for unexpected input in the condarc, like keys that are the
# wrong type
try:
condarc = """\
channels:
"""
with open(test_condarc, 'w') as f:
f.write(condarc)
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--add', 'channels', 'test')
assert stdout == ''
assert stderr == """\
Error: Could not parse the yaml file. Use -f to use the
yaml parser (this will remove any structure or comments from the existing
.condarc file). Reason: key 'channels' should be a list, not NoneType."""
assert _read_test_condarc() == condarc
os.unlink(test_condarc)
finally:
try:
pass
os.unlink(test_condarc)
except OSError:
pass
def test_config_set():
# Test the config set command
# Make sure it accepts only boolean values for boolean keys and any value for string keys
try:
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--set', 'always_yes', 'yep')
assert stdout == ''
assert stderr == 'Error: Key: always_yes; yep is not a YAML boolean.'
finally:
try:
os.unlink(test_condarc)
except OSError:
pass
def test_set_rc_string():
# Test setting string keys in .condarc
# We specifically test ssl_verify since it can be either a boolean or a string
try:
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--set', 'ssl_verify', 'yes')
assert stdout == ''
assert stderr == ''
verify = yaml.load(open(test_condarc, 'r'), Loader=yaml.RoundTripLoader)['ssl_verify']
assert verify == 'yes'
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--set', 'ssl_verify', 'test_string.crt')
assert stdout == ''
assert stderr == ''
verify = yaml.load(open(test_condarc, 'r'), Loader=yaml.RoundTripLoader)['ssl_verify']
assert verify == 'test_string.crt'
os.unlink(test_condarc)
finally:
try:
os.unlink(test_condarc)
except OSError:
pass
| 30.31094
| 111
| 0.597138
| 1,821
| 15,792
| 5.010434
| 0.157606
| 0.085598
| 0.050964
| 0.069049
| 0.629768
| 0.58516
| 0.539347
| 0.500986
| 0.457475
| 0.436651
| 0
| 0.005498
| 0.262918
| 15,792
| 520
| 112
| 30.369231
| 0.778351
| 0.097771
| 0
| 0.622047
| 0
| 0
| 0.301998
| 0.02906
| 0
| 0
| 0
| 0.001923
| 0.194226
| 1
| 0.03937
| false
| 0.044619
| 0.020997
| 0.002625
| 0.068241
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c045d0953c279b203d260f5d6f3f9a0b7bdf019
| 3,579
|
py
|
Python
|
malaya/transformers/babble.py
|
ahmed3991/malaya
|
d90be6d5b2a1393a3f8b8b1ffa8ae676cdaa083c
|
[
"MIT"
] | 1
|
2021-03-19T22:42:34.000Z
|
2021-03-19T22:42:34.000Z
|
malaya/transformers/babble.py
|
ahmed3991/malaya
|
d90be6d5b2a1393a3f8b8b1ffa8ae676cdaa083c
|
[
"MIT"
] | null | null | null |
malaya/transformers/babble.py
|
ahmed3991/malaya
|
d90be6d5b2a1393a3f8b8b1ffa8ae676cdaa083c
|
[
"MIT"
] | null | null | null |
# Bert has a Mouth, and It Must Speak: BERT as a Markov Random Field Language Model,
# by Alex Wang, Kyunghyun Cho, NeuralGen 2019
# https://colab.research.google.com/drive/1MxKZGtQ9SSBjTK5ArsZ5LKhkztzg52RV
# https://arxiv.org/abs/1902.04094
import tensorflow as tf
import tensorflow_probability as tfp
import numpy as np
import math
from malaya.text.bpe import merge_sentencepiece_tokens, merge_wordpiece_tokens
CLS = '[CLS]'
SEP = '[SEP]'
MASK = '[MASK]'
def topk_distributions(logits, top_k):
with tf.InteractiveSession().as_default():
logits = tf.convert_to_tensor(logits)
kth_vals, kth_idx = tf.nn.top_k(logits, k = top_k)
dist = tfp.distributions.categorical.Categorical(logits = kth_vals)
idx = tf.gather(
kth_idx, tf.expand_dims(dist.sample(), -1), batch_dims = 1
)
idx = tf.squeeze(idx, axis = -1)
return idx.eval()
def distributions(logits):
with tf.InteractiveSession().as_default():
logits = tf.convert_to_tensor(logits)
dist = tfp.distributions.categorical.Categorical(logits = logits)
return dist.sample().eval()
def generate_step(
logits,
gen_idx,
top_k = 0,
temperature = 1.0,
sample = False,
return_list = True,
):
logits = logits[:, gen_idx]
logits = logits / temperature
if top_k > 0:
idx = topk_distributions(logits, top_k)
elif sample:
idx = distributions(logits)
else:
idx = np.argmax(logits, axis = -1)
return idx.tolist() if return_list else idx
def tokenize_batch(batch, tokenizer):
return [tokenizer.convert_tokens_to_ids(sent) for sent in batch]
def untokenize_batch(batch, tokenizer):
return [tokenizer.convert_ids_to_tokens(sent) for sent in batch]
def get_init_text(seed_text, max_len, tokenizer, batch_size = 1):
batch = [seed_text + [MASK] * max_len + [SEP] for _ in range(batch_size)]
return tokenize_batch(batch, tokenizer)
def sequential_generation(
seed_text,
model,
batch_size = 5,
max_len = 15,
leed_out_len = 1,
temperature = 1.0,
top_k = 100,
burnin = 20,
):
mask_id = model._tokenizer.vocab['[MASK]']
sep_id = model._tokenizer.vocab['[SEP]']
seed_text = model._tokenizer.tokenize(seed_text)
seed_len = len(seed_text)
batch = get_init_text(
seed_text, max_len, model._tokenizer, batch_size = batch_size
)
for ii in range(max_len):
inp = [sent[: seed_len + ii] + [sep_id] for sent in batch]
batch = np.array(batch)
masks = np.ones(batch.shape)
segments = np.zeros(batch.shape)
out = model._sess.run(
model._logits,
feed_dict = {
model.X: batch,
model.MASK: masks,
model.segment_ids: segments,
},
)
topk = top_k if (ii >= burnin) else 0
idxs = generate_step(
out,
gen_idx = seed_len + ii,
top_k = topk,
temperature = temperature,
sample = (ii < burnin),
)
for jj in range(batch_size):
batch[jj][seed_len + ii] = idxs[jj]
results = untokenize_batch(batch.tolist(), model._tokenizer)
if hasattr(model._tokenizer, 'sp_model'):
merge_function = merge_sentencepiece_tokens
else:
merge_function = merge_wordpiece_tokens
outputs = []
for r in results:
r = [(t, 0) for t in r]
r = merge_function(r)
r = [t[0] for t in r]
outputs.append(' '.join(r))
return outputs
| 28.632
| 84
| 0.626991
| 470
| 3,579
| 4.570213
| 0.304255
| 0.01676
| 0.026536
| 0.019553
| 0.217877
| 0.192737
| 0.090317
| 0.057728
| 0.057728
| 0.057728
| 0
| 0.016
| 0.266555
| 3,579
| 124
| 85
| 28.862903
| 0.802286
| 0.065102
| 0
| 0.10101
| 0
| 0
| 0.010775
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.070707
| false
| 0
| 0.050505
| 0.020202
| 0.191919
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c045e92df54148ce6ef4110afe95ac625400e40
| 652
|
py
|
Python
|
coding patterns/two pointers/sortedarr_square.py
|
mkoryor/Python
|
837ec4c03130dc4cb919fb5f1eeb4d31206790e4
|
[
"Unlicense"
] | null | null | null |
coding patterns/two pointers/sortedarr_square.py
|
mkoryor/Python
|
837ec4c03130dc4cb919fb5f1eeb4d31206790e4
|
[
"Unlicense"
] | null | null | null |
coding patterns/two pointers/sortedarr_square.py
|
mkoryor/Python
|
837ec4c03130dc4cb919fb5f1eeb4d31206790e4
|
[
"Unlicense"
] | null | null | null |
"""
[E] Given a sorted array, create a new array containing squares of all the
number of the input array in the sorted order.
Input: [-2, -1, 0, 2, 3]
Output: [0, 1, 4, 4, 9]
"""
# Time: O(N) Space: O(n)
def make_squares(arr):
n = len(arr)
squares = [0 for x in range(n)]
highestSquareIdx = n - 1
left, right = 0, n - 1
while left <= right:
leftSquare = arr[left] * arr[left]
rightSquare = arr[right] * arr[right]
if leftSquare > rightSquare:
squares[highestSquareIdx] = leftSquare
left += 1
else:
squares[highestSquareIdx] = rightSquare
right -= 1
highestSquareIdx -= 1
return squares
| 19.757576
| 75
| 0.619632
| 94
| 652
| 4.287234
| 0.457447
| 0.009926
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035124
| 0.257669
| 652
| 32
| 76
| 20.375
| 0.797521
| 0.300614
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c060ddce60243c22acf2298bfd181a17b757f40
| 1,917
|
py
|
Python
|
modules/evaluate/evaluate_step.py
|
Azure/aml-object-classification-pipeline
|
f94e4327ebfb5534b52c5c70e82832a86c64a2d1
|
[
"MIT"
] | 5
|
2020-05-20T12:41:31.000Z
|
2022-03-18T17:35:26.000Z
|
modules/evaluate/evaluate_step.py
|
Azure/aml-object-classification-pipeline
|
f94e4327ebfb5534b52c5c70e82832a86c64a2d1
|
[
"MIT"
] | null | null | null |
modules/evaluate/evaluate_step.py
|
Azure/aml-object-classification-pipeline
|
f94e4327ebfb5534b52c5c70e82832a86c64a2d1
|
[
"MIT"
] | 5
|
2020-06-03T12:19:20.000Z
|
2021-12-30T02:58:06.000Z
|
import os
from azureml.pipeline.steps import PythonScriptStep
from azureml.core.runconfig import RunConfiguration
from azureml.core.conda_dependencies import CondaDependencies
from azureml.pipeline.core import PipelineData
from azureml.pipeline.core import PipelineParameter
from azureml.pipeline.steps import EstimatorStep
from azureml.train.dnn import PyTorch
def evaluate_step(model_dir, test_dir, compute_target):
'''
This step evaluates the trained model on the testing data and outputs the accuracy.
:param model_dir: The reference to the directory containing the trained model
:type model_dir: DataReference
:param test_dir: The reference to the directory containing the testing data
:type test_dir: DataReference
:param compute_target: The compute target to run the step on
:type compute_target: ComputeTarget
:return: The preprocess step, step outputs dictionary (keys: accuracy_file)
:rtype: EstimatorStep, dict
'''
accuracy_file = PipelineData(
name='accuracy_file',
pipeline_output_name='accuracy_file',
datastore=test_dir.datastore,
output_mode='mount',
is_directory=False)
outputs = [accuracy_file]
outputs_map = { 'accuracy_file': accuracy_file }
estimator = PyTorch(
source_directory=os.path.dirname(os.path.abspath(__file__)),
entry_script='evaluate.py',
framework_version='1.3',
compute_target=compute_target,
use_gpu=True)
step = EstimatorStep(
name="Evaluate Model",
estimator=estimator,
estimator_entry_script_arguments=[
'--test_dir', test_dir,
'--model_dir', model_dir,
'--accuracy_file', accuracy_file
],
inputs=[model_dir, test_dir],
outputs=outputs,
compute_target=compute_target,
allow_reuse=True)
return step, outputs_map
| 34.232143
| 87
| 0.708399
| 225
| 1,917
| 5.817778
| 0.355556
| 0.082506
| 0.05806
| 0.036669
| 0.154316
| 0.064171
| 0.064171
| 0.064171
| 0
| 0
| 0
| 0.001333
| 0.217527
| 1,917
| 55
| 88
| 34.854545
| 0.871333
| 0.261346
| 0
| 0.055556
| 0
| 0
| 0.079179
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027778
| false
| 0
| 0.222222
| 0
| 0.277778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c0955be4956893e543176a5a678b0a7caa5514d
| 4,806
|
py
|
Python
|
configs/mobilenet_cfbi.py
|
yoxu515/CFBI
|
0bab1e3c9fc3e3ba0629f716d60221e8f8d9d586
|
[
"BSD-3-Clause"
] | 312
|
2020-03-15T03:51:52.000Z
|
2022-03-23T07:33:39.000Z
|
configs/mobilenet_cfbi.py
|
geekJZY/CFBI
|
90a0cd6a3e7961f47f266c7620e8dc281dc43ac8
|
[
"BSD-3-Clause"
] | 55
|
2020-06-27T06:39:27.000Z
|
2022-03-24T19:02:15.000Z
|
configs/mobilenet_cfbi.py
|
geekJZY/CFBI
|
90a0cd6a3e7961f47f266c7620e8dc281dc43ac8
|
[
"BSD-3-Clause"
] | 41
|
2020-07-28T00:52:04.000Z
|
2022-03-25T08:49:47.000Z
|
import torch
import argparse
import os
import sys
import cv2
import time
class Configuration():
def __init__(self):
self.EXP_NAME = 'mobilenetv2_cfbi'
self.DIR_ROOT = './'
self.DIR_DATA = os.path.join(self.DIR_ROOT, 'datasets')
self.DIR_DAVIS = os.path.join(self.DIR_DATA, 'DAVIS')
self.DIR_YTB = os.path.join(self.DIR_DATA, 'YTB/train')
self.DIR_YTB_EVAL = os.path.join(self.DIR_DATA, 'YTB/valid')
self.DIR_RESULT = os.path.join(self.DIR_ROOT, 'result', self.EXP_NAME)
self.DIR_CKPT = os.path.join(self.DIR_RESULT, 'ckpt')
self.DIR_LOG = os.path.join(self.DIR_RESULT, 'log')
self.DIR_IMG_LOG = os.path.join(self.DIR_RESULT, 'log', 'img')
self.DIR_TB_LOG = os.path.join(self.DIR_RESULT, 'log', 'tensorboard')
self.DIR_EVALUATION = os.path.join(self.DIR_RESULT, 'eval')
self.DATASETS = ['youtubevos']
self.DATA_WORKERS = 4
self.DATA_RANDOMCROP = (465, 465)
self.DATA_RANDOMFLIP = 0.5
self.DATA_MAX_CROP_STEPS = 5
self.DATA_MIN_SCALE_FACTOR = 1.
self.DATA_MAX_SCALE_FACTOR = 1.3
self.DATA_SHORT_EDGE_LEN = 480
self.DATA_RANDOM_REVERSE_SEQ = True
self.DATA_DAVIS_REPEAT = 30
self.DATA_CURR_SEQ_LEN = 3
self.DATA_RANDOM_GAP_DAVIS = 3
self.DATA_RANDOM_GAP_YTB = 3
self.PRETRAIN = True
self.PRETRAIN_FULL = False
self.PRETRAIN_MODEL = './pretrain_models/mobilenetv2-deeplabv3p.pth.tar'
self.MODEL_BACKBONE = 'mobilenet'
self.MODEL_MODULE = 'networks.cfbi.cfbi'
self.MODEL_OUTPUT_STRIDE = 16
self.MODEL_ASPP_OUTDIM = 256
self.MODEL_SHORTCUT_DIM = 48
self.MODEL_SEMANTIC_EMBEDDING_DIM = 100
self.MODEL_HEAD_EMBEDDING_DIM = 256
self.MODEL_PRE_HEAD_EMBEDDING_DIM = 64
self.MODEL_GN_GROUPS = 32
self.MODEL_GN_EMB_GROUPS = 25
self.MODEL_MULTI_LOCAL_DISTANCE = [2, 4, 6, 8, 10, 12]
self.MODEL_LOCAL_DOWNSAMPLE = True
self.MODEL_REFINE_CHANNELS = 64 # n * 32
self.MODEL_LOW_LEVEL_INPLANES = 256 if self.MODEL_BACKBONE == 'resnet' else 24
self.MODEL_RELATED_CHANNELS = 64
self.MODEL_EPSILON = 1e-5
self.MODEL_MATCHING_BACKGROUND = True
self.MODEL_GCT_BETA_WD = True
self.MODEL_FLOAT16_MATCHING = True
self.MODEL_FREEZE_BN = True
self.MODEL_FREEZE_BACKBONE = False
self.TRAIN_TOTAL_STEPS = 100000
self.TRAIN_START_STEP = 0
self.TRAIN_LR = 0.01
self.TRAIN_MOMENTUM = 0.9
self.TRAIN_COSINE_DECAY = False
self.TRAIN_WARM_UP_STEPS = 1000
self.TRAIN_WEIGHT_DECAY = 15e-5
self.TRAIN_POWER = 0.9
self.TRAIN_GPUS = 4
self.TRAIN_BATCH_SIZE = 8
self.TRAIN_START_SEQ_TRAINING_STEPS = self.TRAIN_TOTAL_STEPS / 2
self.TRAIN_TBLOG = False
self.TRAIN_TBLOG_STEP = 60
self.TRAIN_LOG_STEP = 20
self.TRAIN_IMG_LOG = False
self.TRAIN_TOP_K_PERCENT_PIXELS = 0.15
self.TRAIN_HARD_MINING_STEP = self.TRAIN_TOTAL_STEPS / 2
self.TRAIN_CLIP_GRAD_NORM = 5.
self.TRAIN_SAVE_STEP = 1000
self.TRAIN_MAX_KEEP_CKPT = 8
self.TRAIN_RESUME = False
self.TRAIN_RESUME_CKPT = None
self.TRAIN_RESUME_STEP = 0
self.TRAIN_AUTO_RESUME = True
self.TRAIN_GLOBAL_ATROUS_RATE = 1
self.TRAIN_LOCAL_ATROUS_RATE = 1
self.TRAIN_GLOBAL_CHUNKS = 20
self.TRAIN_DATASET_FULL_RESOLUTION = True
self.TEST_GPU_ID = 0
self.TEST_DATASET = 'youtubevos'
self.TEST_DATASET_FULL_RESOLUTION = False
self.TEST_DATASET_SPLIT = ['val']
self.TEST_CKPT_PATH = None
self.TEST_CKPT_STEP = None # if "None", evaluate the latest checkpoint.
self.TEST_FLIP = False
self.TEST_MULTISCALE = [1]
self.TEST_MIN_SIZE = None
self.TEST_MAX_SIZE = 800 * 1.3 if self.TEST_MULTISCALE == [1] else 800
self.TEST_WORKERS = 4
self.TEST_GLOBAL_CHUNKS = 4
self.TEST_GLOBAL_ATROUS_RATE = 2
self.TEST_LOCAL_ATROUS_RATE = 1
# dist
self.DIST_ENABLE = True
self.DIST_BACKEND = "gloo"
self.DIST_URL = "file://./sharefile"
self.DIST_START_GPU = 0
self.__check()
def __check(self):
if not torch.cuda.is_available():
raise ValueError('config.py: cuda is not avalable')
if self.TRAIN_GPUS == 0:
raise ValueError('config.py: the number of GPU is 0')
for path in [self.DIR_RESULT, self.DIR_CKPT, self.DIR_LOG, self.DIR_EVALUATION, self.DIR_IMG_LOG, self.DIR_TB_LOG]:
if not os.path.isdir(path):
os.makedirs(path)
cfg = Configuration()
| 36.687023
| 123
| 0.645651
| 678
| 4,806
| 4.247788
| 0.300885
| 0.096875
| 0.034722
| 0.048611
| 0.13125
| 0.104861
| 0.067014
| 0.030208
| 0
| 0
| 0
| 0.036302
| 0.266334
| 4,806
| 130
| 124
| 36.969231
| 0.780488
| 0.011236
| 0
| 0
| 0
| 0
| 0.05813
| 0.01011
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017699
| false
| 0
| 0.053097
| 0
| 0.079646
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c09c2107e354abe29a0559333bd163e132e44d0
| 4,551
|
py
|
Python
|
tensorflow/python/util/tf_should_use_test.py
|
npow/tensorflow
|
99ae68bba52bb6338af06f37bb104128d7af6fb4
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/util/tf_should_use_test.py
|
npow/tensorflow
|
99ae68bba52bb6338af06f37bb104128d7af6fb4
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/util/tf_should_use_test.py
|
npow/tensorflow
|
99ae68bba52bb6338af06f37bb104128d7af6fb4
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for tf_should_use."""
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import gc
import sys
from tensorflow.python.framework import constant_op
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
from tensorflow.python.util import tf_should_use
@contextlib.contextmanager
def reroute_error():
"""Temporarily reroute errors written to tf_logging.error into `captured`."""
with test.mock.patch.object(tf_should_use.tf_logging, 'error') as error:
with test.mock.patch.object(tf_should_use.tf_logging, 'fatal') as fatal:
yield error, fatal
class TfShouldUseTest(test.TestCase):
def testAddShouldUseWarningWhenNotUsed(self):
c = constant_op.constant(0, name='blah0')
def in_this_function():
h = tf_should_use._add_should_use_warning(c)
del h
with reroute_error() as (error, _):
in_this_function()
error.assert_called()
msg = '\n'.join(error.call_args[0])
self.assertIn('Object was never used', msg)
self.assertIn('blah0:0', msg)
self.assertIn('in_this_function', msg)
self.assertFalse(gc.garbage)
def testAddShouldUseFatalWhenNotUsed(self):
c = constant_op.constant(0, name='blah0')
def in_this_function():
h = tf_should_use._add_should_use_warning(c, fatal_error=True)
del h
with reroute_error() as (_, fatal):
in_this_function()
fatal.assert_called()
msg = '\n'.join(fatal.call_args[0])
self.assertIn('Object was never used', msg)
self.assertIn('blah0:0', msg)
self.assertIn('in_this_function', msg)
self.assertFalse(gc.garbage)
def _testAddShouldUseWarningWhenUsed(self, fn, name):
c = constant_op.constant(0, name=name)
with reroute_error() as (error, fatal):
h = tf_should_use._add_should_use_warning(c)
fn(h)
del h
error.assert_not_called()
fatal.assert_not_called()
def testAddShouldUseWarningWhenUsedWithAdd(self):
def add(h):
_ = h + 1
self._testAddShouldUseWarningWhenUsed(add, name='blah_add')
gc.collect()
self.assertFalse(gc.garbage)
def testAddShouldUseWarningWhenUsedWithGetName(self):
def get_name(h):
_ = h.name
self._testAddShouldUseWarningWhenUsed(get_name, name='blah_get_name')
gc.collect()
self.assertFalse(gc.garbage)
def testShouldUseResult(self):
@tf_should_use.should_use_result
def return_const(value):
return constant_op.constant(value, name='blah2')
with reroute_error() as (error, _):
return_const(0.0)
error.assert_called()
msg = '\n'.join(error.call_args[0])
self.assertIn('Object was never used', msg)
self.assertIn('blah2:0', msg)
self.assertIn('return_const', msg)
gc.collect()
self.assertFalse(gc.garbage)
def testShouldUseResultWhenNotReallyUsed(self):
@tf_should_use.should_use_result
def return_const(value):
return constant_op.constant(value, name='blah3')
with reroute_error() as (error, _):
with self.test_session():
return_const(0.0)
# Creating another op and executing it does not mark the
# unused op as being "used".
v = constant_op.constant(1.0, name='meh')
v.eval()
error.assert_called()
msg = '\n'.join(error.call_args[0])
self.assertIn('Object was never used', msg)
self.assertIn('blah3:0', msg)
self.assertIn('return_const', msg)
gc.collect()
self.assertFalse(gc.garbage)
# Tests that mark_used is available in the API.
def testMarkUsed(self):
@tf_should_use.should_use_result
def return_const(value):
return constant_op.constant(value, name='blah3')
with self.test_session():
return_const(0.0).mark_used()
if __name__ == '__main__':
test.main()
| 33.463235
| 80
| 0.702703
| 612
| 4,551
| 5.014706
| 0.269608
| 0.046921
| 0.035842
| 0.046921
| 0.478658
| 0.427175
| 0.405018
| 0.38058
| 0.36103
| 0.350603
| 0
| 0.009851
| 0.174687
| 4,551
| 135
| 81
| 33.711111
| 0.807242
| 0.202373
| 0
| 0.525253
| 0
| 0
| 0.067463
| 0
| 0
| 0
| 0
| 0
| 0.242424
| 1
| 0.161616
| false
| 0
| 0.10101
| 0.030303
| 0.30303
| 0.010101
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c0a433f8f2a1c5fe05d98092959a53a97b1beea
| 8,767
|
bzl
|
Python
|
tools/jdk/local_java_repository.bzl
|
loongarch64/bazel
|
44c30aceec076a0c25f506508704df0b9aeb6578
|
[
"Apache-2.0"
] | 16,989
|
2015-09-01T19:57:15.000Z
|
2022-03-31T23:54:00.000Z
|
tools/jdk/local_java_repository.bzl
|
loongarch64/bazel
|
44c30aceec076a0c25f506508704df0b9aeb6578
|
[
"Apache-2.0"
] | 12,562
|
2015-09-01T09:06:01.000Z
|
2022-03-31T22:26:20.000Z
|
tools/jdk/local_java_repository.bzl
|
loongarch64/bazel
|
44c30aceec076a0c25f506508704df0b9aeb6578
|
[
"Apache-2.0"
] | 3,707
|
2015-09-02T19:20:01.000Z
|
2022-03-31T17:06:14.000Z
|
# Copyright 2020 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rules for importing and registering a local JDK."""
load(":default_java_toolchain.bzl", "JVM8_TOOLCHAIN_CONFIGURATION", "default_java_toolchain")
def _detect_java_version(repository_ctx, java_bin):
properties_out = repository_ctx.execute([java_bin, "-XshowSettings:properties"]).stderr
# This returns an indented list of properties separated with newlines:
# " java.vendor.url.bug = ... \n"
# " java.version = 11.0.8\n"
# " java.version.date = 2020-11-05\"
strip_properties = [property.strip() for property in properties_out.splitlines()]
version_property = [property for property in strip_properties if property.startswith("java.version = ")]
if len(version_property) != 1:
return None
version_value = version_property[0][len("java.version = "):]
parts = version_value.split(".")
major = parts[0]
if len(parts) == 1:
return major
elif major == "1": # handles versions below 1.8
minor = parts[1]
return minor
return major
def local_java_runtime(name, java_home, version, runtime_name = None, visibility = ["//visibility:public"]):
"""Defines a java_runtime target together with Java runtime and compile toolchain definitions.
Java runtime toolchain is constrained by flag --java_runtime_version having
value set to either name or version argument.
Java compile toolchains are created for --java_language_version flags values
between 8 and version (inclusive). Java compile toolchains use the same
(local) JDK for compilation. This requires a different configuration for JDK8
than the newer versions.
Args:
name: name of the target.
java_home: Path to the JDK.
version: Version of the JDK.
runtime_name: name of java_runtime target if it already exists.
visibility: Visibility that will be applied to the java runtime target
"""
if runtime_name == None:
runtime_name = name
native.java_runtime(
name = runtime_name,
java_home = java_home,
visibility = visibility,
)
native.config_setting(
name = name + "_name_setting",
values = {"java_runtime_version": name},
visibility = ["//visibility:private"],
)
native.config_setting(
name = name + "_version_setting",
values = {"java_runtime_version": version},
visibility = ["//visibility:private"],
)
native.config_setting(
name = name + "_name_version_setting",
values = {"java_runtime_version": name + "_" + version},
visibility = ["//visibility:private"],
)
native.alias(
name = name + "_settings_alias",
actual = select({
name + "_name_setting": name + "_name_setting",
name + "_version_setting": name + "_version_setting",
"//conditions:default": name + "_name_version_setting",
}),
visibility = ["//visibility:private"],
)
native.toolchain(
name = "runtime_toolchain_definition",
target_settings = [":%s_settings_alias" % name],
toolchain_type = "@bazel_tools//tools/jdk:runtime_toolchain_type",
toolchain = runtime_name,
)
if version == "8":
default_java_toolchain(
name = name + "_toolchain_java8",
configuration = JVM8_TOOLCHAIN_CONFIGURATION,
source_version = version,
target_version = version,
java_runtime = runtime_name,
)
elif type(version) == type("") and version.isdigit() and int(version) > 8:
for version in range(8, int(version) + 1):
default_java_toolchain(
name = name + "_toolchain_java" + str(version),
source_version = str(version),
target_version = str(version),
java_runtime = runtime_name,
)
# else version is not recognized and no compilation toolchains are predefined
def _local_java_repository_impl(repository_ctx):
"""Repository rule local_java_repository implementation.
Args:
repository_ctx: repository context
"""
java_home = repository_ctx.attr.java_home
java_home_path = repository_ctx.path(java_home)
if not java_home_path.exists:
fail('The path indicated by the "java_home" attribute "%s" (absolute: "%s") ' +
"does not exist." % (java_home, str(java_home_path)))
repository_ctx.file(
"WORKSPACE",
"# DO NOT EDIT: automatically generated WORKSPACE file for local_java_repository\n" +
"workspace(name = \"{name}\")\n".format(name = repository_ctx.name),
)
extension = ".exe" if repository_ctx.os.name.lower().find("windows") != -1 else ""
java_bin = java_home_path.get_child("bin").get_child("java" + extension)
if not java_bin.exists:
# Java binary does not exist
repository_ctx.file(
"BUILD.bazel",
_NOJDK_BUILD_TPL.format(
local_jdk = repository_ctx.name,
java_binary = "bin/java" + extension,
java_home = java_home,
),
False,
)
return
# Detect version
version = repository_ctx.attr.version if repository_ctx.attr.version != "" else _detect_java_version(repository_ctx, java_bin)
# Prepare BUILD file using "local_java_runtime" macro
build_file = ""
if repository_ctx.attr.build_file != None:
build_file = repository_ctx.read(repository_ctx.path(repository_ctx.attr.build_file))
runtime_name = '"jdk"' if repository_ctx.attr.build_file else None
local_java_runtime_macro = """
local_java_runtime(
name = "%s",
runtime_name = %s,
java_home = "%s",
version = "%s",
)
""" % (repository_ctx.name, runtime_name, java_home, version)
repository_ctx.file(
"BUILD.bazel",
'load("@bazel_tools//tools/jdk:local_java_repository.bzl", "local_java_runtime")\n' +
build_file +
local_java_runtime_macro,
)
# Symlink all files
for file in repository_ctx.path(java_home).readdir():
repository_ctx.symlink(file, file.basename)
# Build file template, when JDK does not exist
_NOJDK_BUILD_TPL = '''load("@bazel_tools//tools/jdk:fail_rule.bzl", "fail_rule")
fail_rule(
name = "jdk",
header = "Auto-Configuration Error:",
message = ("Cannot find Java binary {java_binary} in {java_home}; either correct your JAVA_HOME, " +
"PATH or specify Java from remote repository (e.g. " +
"--java_runtime_version=remotejdk_11")
)
config_setting(
name = "localjdk_setting",
values = {{"java_runtime_version": "{local_jdk}"}},
visibility = ["//visibility:private"],
)
toolchain(
name = "runtime_toolchain_definition",
target_settings = [":localjdk_setting"],
toolchain_type = "@bazel_tools//tools/jdk:runtime_toolchain_type",
toolchain = ":jdk",
)
'''
_local_java_repository_rule = repository_rule(
implementation = _local_java_repository_impl,
local = True,
configure = True,
attrs = {
"java_home": attr.string(),
"version": attr.string(),
"build_file": attr.label(),
},
)
def local_java_repository(name, java_home, version = "", build_file = None):
"""Registers a runtime toolchain for local JDK and creates an unregistered compile toolchain.
Toolchain resolution is constrained with --java_runtime_version flag
having value of the "name" or "version" parameter.
Java compile toolchains are created for --java_language_version flags values
between 8 and version (inclusive). Java compile toolchains use the same
(local) JDK for compilation.
If there is no JDK "virtual" targets are created, which fail only when actually needed.
Args:
name: A unique name for this rule.
java_home: Location of the JDK imported.
build_file: optionally BUILD file template
version: optionally java version
"""
_local_java_repository_rule(name = name, java_home = java_home, version = version, build_file = build_file)
native.register_toolchains("@" + name + "//:runtime_toolchain_definition")
| 37.626609
| 130
| 0.666705
| 1,061
| 8,767
| 5.282752
| 0.240339
| 0.035682
| 0.027119
| 0.011418
| 0.255129
| 0.17413
| 0.138448
| 0.106334
| 0.072792
| 0.072792
| 0
| 0.006083
| 0.231208
| 8,767
| 232
| 131
| 37.788793
| 0.825519
| 0.285046
| 0
| 0.168919
| 0
| 0.006757
| 0.2881
| 0.096088
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027027
| false
| 0
| 0
| 0
| 0.060811
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c0ab106ed9ecd5a4593bfc5cb160cb433ae9bfc
| 2,563
|
py
|
Python
|
corehq/apps/fixtures/resources/v0_1.py
|
SEL-Columbia/commcare-hq
|
992ee34a679c37f063f86200e6df5a197d5e3ff6
|
[
"BSD-3-Clause"
] | 1
|
2015-02-10T23:26:39.000Z
|
2015-02-10T23:26:39.000Z
|
corehq/apps/fixtures/resources/v0_1.py
|
SEL-Columbia/commcare-hq
|
992ee34a679c37f063f86200e6df5a197d5e3ff6
|
[
"BSD-3-Clause"
] | null | null | null |
corehq/apps/fixtures/resources/v0_1.py
|
SEL-Columbia/commcare-hq
|
992ee34a679c37f063f86200e6df5a197d5e3ff6
|
[
"BSD-3-Clause"
] | null | null | null |
from couchdbkit import ResourceNotFound
from tastypie import fields as tp_f
from corehq.apps.api.resources import JsonResource
from corehq.apps.api.resources.v0_1 import (
CustomResourceMeta,
RequirePermissionAuthentication,
)
from corehq.apps.api.util import get_object_or_not_exist
from corehq.apps.fixtures.models import FixtureDataItem, FixtureDataType
from corehq.apps.users.models import Permissions
def convert_fdt(fdi):
try:
fdt = FixtureDataType.get(fdi.data_type_id)
fdi.fixture_type = fdt.tag
return fdi
except ResourceNotFound:
return fdi
class FixtureResource(JsonResource):
type = "fixture"
fields = tp_f.DictField(attribute='try_fields_without_attributes',
readonly=True, unique=True)
# when null, that means the ref'd fixture type was not found
fixture_type = tp_f.CharField(attribute='fixture_type', readonly=True,
null=True)
id = tp_f.CharField(attribute='_id', readonly=True, unique=True)
def obj_get(self, bundle, **kwargs):
return convert_fdt(get_object_or_not_exist(
FixtureDataItem, kwargs['pk'], kwargs['domain']))
def obj_get_list(self, bundle, **kwargs):
domain = kwargs['domain']
parent_id = bundle.request.GET.get("parent_id", None)
parent_ref_name = bundle.request.GET.get("parent_ref_name", None)
references = bundle.request.GET.get("references", None)
child_type = bundle.request.GET.get("child_type", None)
type_id = bundle.request.GET.get("fixture_type_id", None)
type_tag = bundle.request.GET.get("fixture_type", None)
if parent_id and parent_ref_name and child_type and references:
parent_fdi = FixtureDataItem.get(parent_id)
fdis = list(
FixtureDataItem.by_field_value(
domain, child_type, parent_ref_name,
parent_fdi.fields_without_attributes[references])
)
elif type_id or type_tag:
type_id = type_id or FixtureDataType.by_domain_tag(
domain, type_tag).one()
fdis = list(FixtureDataItem.by_data_type(domain, type_id))
else:
fdis = list(FixtureDataItem.by_domain(domain))
return [convert_fdt(fdi) for fdi in fdis] or []
class Meta(CustomResourceMeta):
authentication = RequirePermissionAuthentication(Permissions.edit_apps)
object_class = FixtureDataItem
resource_name = 'fixture'
limit = 0
| 39.430769
| 79
| 0.673039
| 309
| 2,563
| 5.359223
| 0.294498
| 0.025362
| 0.057971
| 0.068841
| 0.123188
| 0.036232
| 0
| 0
| 0
| 0
| 0
| 0.001542
| 0.240734
| 2,563
| 64
| 80
| 40.046875
| 0.849435
| 0.02263
| 0
| 0.037037
| 0
| 0
| 0.057131
| 0.011586
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.12963
| 0.018519
| 0.37037
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c0c9d4712283b7b6b90ddca4309f49cea6694d9
| 737
|
py
|
Python
|
fastapi_router_controller/lib/controller_loader.py
|
KiraPC/fastapi-router-controller
|
e105701ebce2e03a0e00ac182c10941daf1b7e22
|
[
"MIT"
] | 21
|
2021-03-30T19:39:46.000Z
|
2022-03-30T22:27:39.000Z
|
fastapi_router_controller/lib/controller_loader.py
|
KiraPC/fastapi-router-controller
|
e105701ebce2e03a0e00ac182c10941daf1b7e22
|
[
"MIT"
] | 12
|
2021-03-30T20:52:15.000Z
|
2022-02-23T09:20:42.000Z
|
fastapi_router_controller/lib/controller_loader.py
|
KiraPC/fastapi-router-controller
|
e105701ebce2e03a0e00ac182c10941daf1b7e22
|
[
"MIT"
] | 6
|
2021-04-03T19:17:55.000Z
|
2021-12-20T10:20:57.000Z
|
import os
import importlib
class ControllerLoader:
"""
The ControllerLoader class.
"""
@staticmethod
def load(directory, package):
"""
It is an utility to load automatically all the python
module presents on a given directory
"""
for module in os.listdir(directory):
sub_dir = directory + "/" + module
if os.path.isdir(sub_dir):
ControllerLoader.load(sub_dir, "{}.{}".format(package, module))
if module == "__init__.py" or module[-3:] != ".py":
continue
else:
module_import_name = "{}.{}".format(package, module[:-3])
importlib.import_module(module_import_name)
| 29.48
| 79
| 0.561737
| 76
| 737
| 5.289474
| 0.526316
| 0.044776
| 0.094527
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004016
| 0.324288
| 737
| 24
| 80
| 30.708333
| 0.803213
| 0.160109
| 0
| 0
| 0
| 0
| 0.043706
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.285714
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c0e902c9bd14492f727e042bd245ed10c04c202
| 2,739
|
py
|
Python
|
zenslackchat/eventsview.py
|
uktrade/zenslackchat
|
8071757e1ea20a433783c6a7c47f25b046692682
|
[
"MIT"
] | 2
|
2020-12-30T07:46:12.000Z
|
2022-02-01T16:37:34.000Z
|
zenslackchat/eventsview.py
|
uktrade/zenslackchat
|
8071757e1ea20a433783c6a7c47f25b046692682
|
[
"MIT"
] | 7
|
2021-04-14T16:17:29.000Z
|
2022-01-25T11:48:18.000Z
|
zenslackchat/eventsview.py
|
uktrade/zenslackchat
|
8071757e1ea20a433783c6a7c47f25b046692682
|
[
"MIT"
] | 1
|
2021-06-06T09:46:47.000Z
|
2021-06-06T09:46:47.000Z
|
import pprint
import logging
from django.conf import settings
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.response import Response
from zenslackchat.message import handler
from zenslackchat.models import SlackApp
from zenslackchat.models import ZendeskApp
class Events(APIView):
"""Handle Events using the webapp instead of using the RTM API.
This is handy as i don't need to run a specifc bot process just to handle
events. Instead I can just using the webapp REST API for this.
Handy documentation for Slack events: https://api.slack.com/events-api
The app needs to subscribe to events to receive them. From
https://api.slack.com/apps/<APP ID>/event-subscriptions you need to:
- Enable Events from "Off" to "On"
- Enter the "Request URL" e.g.: http://<instance id>.ngrok.io/slack/events/
- Then "Subscribe to events on behalf of users"
- Click "Add Workspace Event" and add "message.channels".
Message on channels will now start being recieved. The bot will need to be
invited to a channel first.
"""
def post(self, request, *args, **kwargs):
"""Events will come in over a POST request.
"""
log = logging.getLogger(__name__)
slack_message = request.data
if slack_message.get('token') != settings.SLACK_VERIFICATION_TOKEN:
log.error("Slack message verification failed!")
return Response(status=status.HTTP_403_FORBIDDEN)
# verification challenge, convert to signature verification instead:
if slack_message.get('type') == 'url_verification':
return Response(data=slack_message, status=status.HTTP_200_OK)
if 'event' in slack_message:
event = slack_message.get('event')
if settings.DEBUG:
log.debug(f'event received:\n{pprint.pformat(event)}\n')
try:
handler(
event,
our_channel=settings.SRE_SUPPORT_CHANNEL,
slack_client=SlackApp.client(),
zendesk_client=ZendeskApp.client(),
workspace_uri=settings.SLACK_WORKSPACE_URI,
zendesk_uri=settings.ZENDESK_TICKET_URI,
user_id=settings.ZENDESK_USER_ID,
group_id=settings.ZENDESK_GROUP_ID,
)
except: # noqa
# I want all event even if they cause me problems. If I don't
# accept the webhook will be marked as broken and then no more
# events will be sent.
log.exception("Slack message_handler error: ")
return Response(status=status.HTTP_200_OK)
| 38.041667
| 79
| 0.649507
| 349
| 2,739
| 4.982808
| 0.424069
| 0.055204
| 0.029327
| 0.032202
| 0.049454
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004539
| 0.276013
| 2,739
| 71
| 80
| 38.577465
| 0.872416
| 0.359985
| 0
| 0
| 0
| 0
| 0.082938
| 0.021327
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027778
| false
| 0
| 0.25
| 0
| 0.388889
| 0.055556
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c0ea6f1c1da094761872bcebae0cfc6089b3d54
| 16,882
|
py
|
Python
|
sdv/docker/sdvstate/internal/validator/airship/compute_check.py
|
opnfv/cirv-sdv
|
31fb310d3fd1c9c1f12cfe0c654870e24f5efab6
|
[
"Apache-2.0"
] | 2
|
2021-09-16T06:31:45.000Z
|
2022-03-09T19:59:55.000Z
|
sdv/docker/sdvstate/internal/validator/airship/compute_check.py
|
opnfv/cirv-sdv
|
31fb310d3fd1c9c1f12cfe0c654870e24f5efab6
|
[
"Apache-2.0"
] | null | null | null |
sdv/docker/sdvstate/internal/validator/airship/compute_check.py
|
opnfv/cirv-sdv
|
31fb310d3fd1c9c1f12cfe0c654870e24f5efab6
|
[
"Apache-2.0"
] | 2
|
2021-05-11T14:41:01.000Z
|
2021-05-14T05:59:38.000Z
|
# Copyright 2020 University Of Delhi.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Compute Related Checks
"""
import configparser
import json
import re
import logging
from tools.kube_utils import kube_exec, get_pod_with_labels
from tools.conf import settings
from internal import store_result
###########
# Checks
###########
def isolated_cores_check():
"""
isolated_cores_check
"""
logger = logging.getLogger(__name__)
traced_value = trace_isolated_cores()
required_value = required_isolated_cores()
result = {'category': 'compute',
'case_name': 'isolated_cores_check',
'details': {'traced_cores': traced_value,
'required_cores': required_value
}
}
if is_ranges_equals(traced_value, required_value):
result['criteria'] = 'pass'
else:
result['criteria'] = 'fail'
store_result(logger, result)
return result
def reserved_vnf_cores_check():
"""
reserved_vnf_cores_check
"""
logger = logging.getLogger(__name__)
traced_value = trace_reserved_vnf_cores()
required_value = required_reserved_vnf_cores()
result = {'category': 'compute',
'case_name': 'reserved_vnf_cores_check',
'details': {'traced_cores': traced_value,
'required_cores': required_value
}
}
if is_ranges_equals(traced_value, required_value):
result['criteria'] = 'pass'
else:
result['criteria'] = 'fail'
store_result(logger, result)
return result
def vswitch_pmd_cores_check():
"""
vswitch_pmd_cores_check
"""
logger = logging.getLogger(__name__)
traced_value = trace_vswitch_pmd_cores()
required_value = required_vswitch_pmd_cores()
result = {'category': 'compute',
'case_name': 'vswitch_pmd_cores_check',
'details': {'traced_cores': traced_value,
'required_cores': required_value
}
}
if is_ranges_equals(traced_value, required_value):
result['criteria'] = 'pass'
else:
result['criteria'] = 'fail'
store_result(logger, result)
return result
def vswitch_dpdk_lcores_check():
"""
vswitch_dpdk_lcores_check
"""
logger = logging.getLogger(__name__)
traced_value = trace_vswitch_dpdk_lcores()
required_value = required_vswitch_dpdk_lcores()
result = {'category': 'compute',
'case_name': 'vswitch_dpdk_lcores_check',
'details': {'traced_cores': traced_value,
'required_cores': required_value
}
}
if is_ranges_equals(traced_value, required_value):
result['criteria'] = 'pass'
else:
result['criteria'] = 'fail'
store_result(logger, result)
return result
def os_reserved_cores_check():
"""
os_reserved_cores_check
"""
logger = logging.getLogger(__name__)
traced_value = trace_os_reserved_cores()
required_value = required_os_reserved_cores()
result = {'category': 'compute',
'case_name': 'os_reserved_cores_check',
'details': {'traced_cores': traced_value,
'required_cores': required_value
}
}
if is_ranges_equals(traced_value, required_value):
result['criteria'] = 'pass'
else:
result['criteria'] = 'fail'
store_result(logger, result)
return result
def nova_scheduler_filters_check():
"""
nova_scheduler_filters_check
"""
logger = logging.getLogger(__name__)
traced_value = trace_nova_scheduler_filters()
required_value = required_nova_scheduler_filters()
result = {'category': 'compute',
'case_name': 'nova_scheduler_filters_check',
'details': {'traced_filters': traced_value,
'required_filters': required_value
}
}
if are_lists_equal(traced_value, required_value):
result['criteria'] = 'pass'
else:
result['criteria'] = 'fail'
store_result(logger, result)
return result
def cpu_allocation_ratio_check():
"""
cpu_allocation_ratio_check
"""
logger = logging.getLogger(__name__)
traced_value = trace_cpu_allocation_ratio()
required_value = required_cpu_allocation_ratio()
result = {'category': 'compute',
'case_name': 'cpu_allocation_ratio_check',
'details': {'traced_ratio': traced_value,
'required_ratio': required_value
}
}
if traced_value == required_value:
result['criteria'] = 'pass'
else:
result['criteria'] = 'fail'
store_result(logger, result)
return result
###############
# helper functions
###############
def trace_isolated_cores():
"""
Trace isolated_cores from Airship deployment
:return: value traced from `isolcpus` key in `/proc/cmdline`
"""
pod = get_pod_with_labels('application=nova,component=compute')
cmd = ['cat', '/proc/cmdline']
proc_cmd = kube_exec(pod, cmd)
for option in proc_cmd.split():
if 'isolcpus' in option:
_, isolcpus_value = split_key_value(option)
break
return isolcpus_value
def required_isolated_cores():
"""
Returns value of `isolated_cpus` from platform_profile used by
Role for worker nodes in PDF
:return: isolated_cores value expected by the PDF
"""
worker_role = settings.getValue('WORKER_ROLE_NAME')
profile = get_platform_profile_by_role(worker_role)
return profile['isolated_cpus']
def trace_reserved_vnf_cores():
"""
Trace vnf_reserved_cores from Airship deployment
:return: value traced from `vcpu_pin_set` key in nova.conf
of actual deployment
"""
try:
config = get_nova_conf()
vcpu_pin_set = config.get('DEFAULT', 'vcpu_pin_set')
except (configparser.NoOptionError, configparser.MissingSectionHeaderError):
vcpu_pin_set = ''
return vcpu_pin_set
def required_reserved_vnf_cores():
"""
Returns value of vnf_cores from platform_profile used by
Role for worker nodes in PDF
:return: vnf_reserverd_core value expected by the PDF
"""
worker_role = settings.getValue('WORKER_ROLE_NAME')
profile = get_platform_profile_by_role(worker_role)
return profile['vnf_cores']
def trace_vswitch_pmd_cores():
"""
Trace vswitch_pmd_cores from Airship deployment
:return: value traced from `other_config:pmd-cpu-mask` in
openvswitchdb using ovs-vsctl
"""
ovs_pod = get_pod_with_labels('application=openvswitch,component=openvswitch-vswitchd')
cmd = ['ovs-vsctl', '-t', '5', 'get', 'Open_vSwitch', '.', 'other_config']
response = kube_exec(ovs_pod, cmd)
# convert config str to json str
match = re.findall("[a-zA-Z0-9-]+=", response)
for key in match:
response = response.replace(key, '"' + key[:-1] + '":')
match = re.findall(":[a-zA-Z0-9-]+", response)
for key in match:
response = response.replace(key[1:], '"' + key[1:] + '"')
config = json.loads(response)
if 'pmd-cpu-mask' in config:
pmd_cores = hex_to_comma_list(config['pmd-cpu-mask'])
else:
pmd_cores = ''
return pmd_cores
def required_vswitch_pmd_cores():
"""
Returns value of vswitch_pmd_cores from platform_profile used by
Role for worker nodes in PDF
:return: vswitch_pmd_cores value expected by the PDF
"""
worker_role = settings.getValue('WORKER_ROLE_NAME')
profile = get_platform_profile_by_role(worker_role)
return profile['vswitch_pmd_cores']
def trace_vswitch_dpdk_lcores():
"""
Trace vswitch_dpdk_lcores from Airship deployment
:return: value traced from `other_config:dpdk-lcore-mask` in
openvswitchdb using ovs-vsctl
"""
ovs_pod = get_pod_with_labels('application=openvswitch,component=openvswitch-vswitchd')
cmd = ['ovs-vsctl', '-t', '5', 'get', 'Open_vSwitch', '.', 'other_config']
response = kube_exec(ovs_pod, cmd)
# convert config str to json str
match = re.findall("[a-zA-Z0-9-]+=", response)
for key in match:
response = response.replace(key, '"' + key[:-1] + '":')
match = re.findall(":[a-zA-Z0-9-]+", response)
for key in match:
response = response.replace(key[1:], '"' + key[1:] + '"')
config = json.loads(response)
if 'dpdk-lcore-mask' in config:
pmd_cores = hex_to_comma_list(config['dpdk-lcore-mask'])
else:
pmd_cores = ''
return pmd_cores
def required_vswitch_dpdk_lcores():
"""
Returns value of vswitch_dpdk_lcores from platform_profile used by
Role for worker nodes in PDF
:return: vswitch_dpdk_lcores value expected by the PDF
"""
worker_role = settings.getValue('WORKER_ROLE_NAME')
profile = get_platform_profile_by_role(worker_role)
return profile['vswitch_dpdk_lcores']
def trace_os_reserved_cores():
"""
Trace os_reserved_cores from Airship deployment
os_reserved_cores = all_cores - (reserved_vnf_cores +
vswitch_pmd_cores +
vswitch_dpdk_lcores)
"""
worker_role = settings.getValue('WORKER_ROLE_NAME')
all_cores = get_cores_by_role(worker_role)
reserved_vnf_cores = trace_reserved_vnf_cores()
vswitch_pmd_cores = trace_vswitch_pmd_cores()
vswitch_dpdk_lcores = trace_vswitch_dpdk_lcores()
non_os_cores = []
non_os_cores.extend(convert_range_to_list(reserved_vnf_cores))
non_os_cores.extend(convert_range_to_list(vswitch_pmd_cores))
non_os_cores.extend(convert_range_to_list(vswitch_dpdk_lcores))
os_reserved_cores = set(all_cores).difference(set(non_os_cores))
# return as string with comma separated value
return ','.join(map(str, list(os_reserved_cores)))
def required_os_reserved_cores():
"""
Returns value of os_reserved_cores from platform_profile used by
Role for worker nodes in PDF
:return: os_reserved_cores value expected by the PDF
"""
worker_role = settings.getValue('WORKER_ROLE_NAME')
profile = get_platform_profile_by_role(worker_role)
return profile['os_reserved_cores']
def trace_nova_scheduler_filters():
"""
Trace scheduler_filters from Airship deployment
:return: value traced from `enabled_filters` key in nova.conf
of actual deployment
"""
try:
config = get_nova_conf()
filters = config.get('filter_scheduler', 'enabled_filters')
except (configparser.NoOptionError, configparser.MissingSectionHeaderError):
filters = ''
filters = filters.split(',')
map(str.strip, filters)
return filters
def required_nova_scheduler_filters():
"""
Required nova scheduler_filters by the PDF
"""
pdf = settings.getValue('pdf_file')
filters = pdf['vim_functional']['scheduler_filters']
filters = filters.split(',')
map(str.strip, filters)
return filters
def trace_cpu_allocation_ratio():
"""
Trace cpu_allocation_ratio from Airship deployment
:return: value traced from `cpu_allocation_ratio` key in nova.conf
of actual deployment
"""
try:
config = get_nova_conf()
cpu_allocation_ratio = config.get('DEFAULT', 'cpu_allocation_ratio')
except (configparser.NoOptionError, configparser.MissingSectionHeaderError):
cpu_allocation_ratio = ''
return float(cpu_allocation_ratio)
def required_cpu_allocation_ratio():
"""
Required cpu_allocation_ratio by the PDF
"""
pdf = settings.getValue('pdf_file')
cpu_allocation_ratio = pdf['vim_functional']['cpu_allocation_ratio']
return float(cpu_allocation_ratio)
def get_role(role_name):
"""
Searches and returns role with `role_name`
"""
roles = settings.getValue('pdf_file')['roles']
for role in roles:
if role['name'] == role_name:
role_details = role
return role_details
def get_platform_profile(profile_name):
"""
Searches and returns platform_profile with `profile_name`
"""
platform_profiles = settings.getValue('pdf_file')['platform_profiles']
for profile in platform_profiles:
if profile['profile_name'] == profile_name:
profile_details = profile
return profile_details
def get_processor_profile(profile_name):
"""
Searches and returns processor_profile with `profile_name`
"""
processor_profiles = settings.getValue('pdf_file')['processor_profiles']
for profile in processor_profiles:
if profile['profile_name'] == profile_name:
profile_details = profile
return profile_details
def get_platform_profile_by_role(role_name):
"""
Returns platform profile details of a role
"""
role = get_role(role_name)
profile = get_platform_profile(role['platform_profile'])
return profile
def get_hardware_profile_by_role(role_name):
"""
Returns hardware profile details of a role
"""
role = get_role(role_name)
hardware_profiles = settings.getValue('pdf_file')['hardware_profiles']
for profile in hardware_profiles:
if profile['profile_name'] == role['hardware_profile']:
profile_details = profile
return profile_details
def get_cores_by_role(role_name):
"""
Returns cpu cores list of server hardware used in the role
"""
hardware_profile = get_hardware_profile_by_role(role_name)
processor_profile = hardware_profile['profile_info']['processor_profile']
profile = get_processor_profile(processor_profile)
cpus = []
for numa in profile['profile_info']['numas']:
cpus.extend(convert_range_to_list(numa['cpu_set']))
return cpus
def get_nova_conf():
"""
Returns parsed nova.conf
"""
pod = get_pod_with_labels('application=nova,component=compute')
cmd = ['cat', '/etc/nova/nova.conf']
response = kube_exec(pod, cmd)
config = configparser.ConfigParser()
config.read_string(response)
return config
### cpu cores related helper function
def convert_range_to_list(x):
"""
Returns list of numbers from given range as string
e.g.: convert_range_to_list('3-5') will give [3, 4, 5]
"""
# pylint: disable=C0103
result = []
for part in x.split(','):
if '-' in part:
a, b = part.split('-')
a, b = int(a), int(b)
result.extend(range(a, b + 1))
elif part != '':
a = int(part)
result.append(a)
# remove duplicates
result = list(dict.fromkeys(result))
return result
def is_ranges_equals(range1, range2):
"""
Checks whether two ranges passed as string are equal
e.g.: is_ranges_equals('2-5', '2-4,5') returns true
"""
set1 = set(convert_range_to_list(range1))
set2 = set(convert_range_to_list(range2))
return set1 == set2
def are_lists_equal(list1, list2):
"""
Checks whether two list are identicals
"""
set1 = set(list1)
set2 = set(list2)
return set1 == set2
def hex_to_comma_list(hex_mask):
"""
Converts CPU mask given in hex to list of cores
"""
binary = bin(int(hex_mask, 16))[2:]
reversed_binary = binary[::-1]
i = 0
output = ""
for bit in reversed_binary:
if bit == '1':
output = output + str(i) + ','
i = i + 1
return output[:-1]
def comma_list_to_hex(cpus):
"""
Converts a list of cpu cores in corresponding hex value
of cpu-mask
"""
cpu_arr = cpus.split(",")
binary_mask = 0
for cpu in cpu_arr:
binary_mask = binary_mask | (1 << int(cpu))
return format(binary_mask, '02x')
def split_key_value(key_value_str, delimiter='='):
"""
splits given string into key and value based on delimiter
:param key_value_str: example string `someKey=somevalue`
:param delimiter: default delimiter is `=`
:return: [ key, value]
"""
key, value = key_value_str.split(delimiter)
key = key.strip()
value = value.strip()
return key, value
| 25.197015
| 91
| 0.65093
| 2,028
| 16,882
| 5.116371
| 0.130178
| 0.022263
| 0.029491
| 0.013878
| 0.555127
| 0.499325
| 0.463184
| 0.431284
| 0.4016
| 0.352062
| 0
| 0.005245
| 0.243395
| 16,882
| 669
| 92
| 25.234679
| 0.807093
| 0.216384
| 0
| 0.435484
| 0
| 0
| 0.137253
| 0.025965
| 0
| 0
| 0
| 0
| 0
| 1
| 0.109677
| false
| 0.022581
| 0.022581
| 0
| 0.241935
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c1112a8d3df95d531441fb2f11172b25c1ca8ae
| 1,945
|
py
|
Python
|
src/tests/client_side/test_main.py
|
JulianSobott/OpenDrive
|
0593c994c3bccccc4351557c42d13f3535b6b6c1
|
[
"Apache-2.0"
] | 1
|
2021-03-18T16:20:46.000Z
|
2021-03-18T16:20:46.000Z
|
src/tests/client_side/test_main.py
|
JulianSobott/OpenDrive
|
0593c994c3bccccc4351557c42d13f3535b6b6c1
|
[
"Apache-2.0"
] | 2
|
2019-06-04T21:50:23.000Z
|
2019-06-14T13:20:50.000Z
|
src/tests/client_side/test_main.py
|
JulianSobott/OpenDrive
|
0593c994c3bccccc4351557c42d13f3535b6b6c1
|
[
"Apache-2.0"
] | null | null | null |
import os
import threading
import time
import unittest
from OpenDrive.client_side import file_changes_json as c_json
from OpenDrive.client_side import interface
from OpenDrive.client_side import main
from OpenDrive.client_side import paths as client_paths
from OpenDrive.server_side import paths as server_paths
from tests.client_side.helper_client import h_register_dummy_user_device_client
from tests.helper_all import h_client_routine, h_start_server_process, h_stop_server_process, \
h_clear_init_all_folders, h_create_empty
class TestMain(unittest.TestCase):
def setUp(self) -> None:
h_clear_init_all_folders()
self._server_process = h_start_server_process()
self.folder1_abs_local_path = client_paths.normalize_path(client_paths.LOCAL_CLIENT_DATA, "folder1")
h_create_empty(self.folder1_abs_local_path)
main.MIN_UPDATE_PAUSE_TIME = 1
def tearDown(self) -> None:
main.shutdown()
h_stop_server_process(self._server_process)
@h_client_routine(clear_folders=False)
def putest_start_logged_in(self):
user = h_register_dummy_user_device_client()
main_thread = threading.Thread(target=main.start, daemon=True)
main_thread.start()
time.sleep(2) # wait till changes.json is created
interface.add_sync_folder(self.folder1_abs_local_path, "folder1")
expected_content = c_json.get_all_data()
file_path = os.path.join(self.folder1_abs_local_path, "dummy.txt")
with open(file_path, "w") as f:
f.write("Hello World")
time.sleep(5) # wait till synchronization finished
expected_path = os.path.join(server_paths.get_users_root_folder(user.user_id), "folder1/dummy.txt")
self.assertTrue(os.path.exists(expected_path), "dummy file is not pulled to server!")
self.assertEqual(expected_content, c_json.get_all_data())
time.sleep(1) # wait till waiting...
| 42.282609
| 108
| 0.748586
| 281
| 1,945
| 4.825623
| 0.341637
| 0.057522
| 0.056047
| 0.067847
| 0.271386
| 0.088496
| 0.044248
| 0
| 0
| 0
| 0
| 0.006832
| 0.172237
| 1,945
| 45
| 109
| 43.222222
| 0.835404
| 0.045758
| 0
| 0
| 0
| 0
| 0.046976
| 0
| 0
| 0
| 0
| 0
| 0.052632
| 1
| 0.078947
| false
| 0
| 0.289474
| 0
| 0.394737
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c1540d22f910c13d547019d54ee005a23d41b8e
| 559
|
py
|
Python
|
country/management/commands/populate_countries.py
|
okchaty/django-country
|
740bc25956dc1b87f44486538a62037e0bd0ac94
|
[
"MIT"
] | 1
|
2020-04-02T16:50:38.000Z
|
2020-04-02T16:50:38.000Z
|
country/management/commands/populate_countries.py
|
okchaty/django-country
|
740bc25956dc1b87f44486538a62037e0bd0ac94
|
[
"MIT"
] | 4
|
2020-03-30T15:39:55.000Z
|
2020-04-10T15:04:28.000Z
|
country/management/commands/populate_countries.py
|
okchaty/django-country
|
740bc25956dc1b87f44486538a62037e0bd0ac94
|
[
"MIT"
] | null | null | null |
from django.conf import settings
from django.core.management import call_command
from django.core.management.base import BaseCommand
from os import path
class Command(BaseCommand):
help = "Populates data"
def handle(self, *args, **options):
fixture_path = path.join(path.dirname(
path.dirname(
path.dirname(
path.abspath(__file__)
)
)
), "fixtures/")
settings.FIXTURE_DIRS = (fixture_path,)
call_command("loaddata", "country", verbosity=1)
| 27.95
| 56
| 0.615385
| 59
| 559
| 5.677966
| 0.559322
| 0.089552
| 0.134328
| 0.143284
| 0.110448
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002513
| 0.288014
| 559
| 19
| 57
| 29.421053
| 0.839196
| 0
| 0
| 0.125
| 0
| 0
| 0.067979
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.25
| 0
| 0.4375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c1606fa8a8ca96d6fb7ac7c7412b894e0bb3a10
| 417
|
py
|
Python
|
formatter.py
|
Staist/Python-Text-Formatter
|
6ae865d45301906eaa133551301dc785602f5b38
|
[
"MIT"
] | null | null | null |
formatter.py
|
Staist/Python-Text-Formatter
|
6ae865d45301906eaa133551301dc785602f5b38
|
[
"MIT"
] | null | null | null |
formatter.py
|
Staist/Python-Text-Formatter
|
6ae865d45301906eaa133551301dc785602f5b38
|
[
"MIT"
] | null | null | null |
dosyaadi = input("Enter file name: ")
dosyaadi = str(dosyaadi + ".txt")
with open(dosyaadi, 'r') as file :
dosyaicerigi = file.read()
silinecek = str(input("Enter the text that you wish to delete: "))
dosyaicerigi = dosyaicerigi.replace(silinecek, '')
with open(dosyaadi, 'w') as file:
file.write(dosyaicerigi)
file.close()
print("-" * 30)
print("Successfully deleted!")
print("-" * 30)
| 26.0625
| 67
| 0.647482
| 51
| 417
| 5.294118
| 0.568627
| 0.074074
| 0.118519
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011869
| 0.191847
| 417
| 15
| 68
| 27.8
| 0.789318
| 0
| 0
| 0.166667
| 0
| 0
| 0.21393
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.25
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c168858057ebcae4ef4e91a7860a8034fcefa15
| 6,106
|
py
|
Python
|
covid19/classification/helpers.py
|
salvacarrion/mltests
|
e4ac9711c1c80171f302edc88011fbe06e754490
|
[
"MIT"
] | null | null | null |
covid19/classification/helpers.py
|
salvacarrion/mltests
|
e4ac9711c1c80171f302edc88011fbe06e754490
|
[
"MIT"
] | 1
|
2022-01-01T06:09:26.000Z
|
2022-01-01T06:09:26.000Z
|
covid19/classification/helpers.py
|
salvacarrion/mltests
|
e4ac9711c1c80171f302edc88011fbe06e754490
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
@tf.function
def BinaryAccuracy_Infiltrates(y_true, y_pred, i=0):
return tf.keras.metrics.binary_accuracy(y_true[:, i], y_pred[:, i])
@tf.function
def BinaryAccuracy_Pneumonia(y_true, y_pred, i=1):
return tf.keras.metrics.binary_accuracy(y_true[:, i], y_pred[:, i])
@tf.function
def BinaryAccuracy_Covid19(y_true, y_pred, i=2):
return tf.keras.metrics.binary_accuracy(y_true[:, i], y_pred[:, i])
@tf.function
def BinaryAccuracy_Normal(y_true, y_pred, i=3):
return tf.keras.metrics.binary_accuracy(y_true[:, i], y_pred[:, i])
class CustomModelCheckpoint(tf.keras.callbacks.ModelCheckpoint):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.wait_epoch_warmup = kwargs.get("wait_epoch_warmup")
def on_epoch_end(self, epoch, logs=None):
if self.wait_epoch_warmup:
if (epoch + 1) >= self.wait_epoch_warmup:
super().on_epoch_end(epoch, logs)
else:
self.epochs_since_last_save += 1
print(f"Skipping save model (wait_epoch_warmup={self.wait_epoch_warmup - (epoch + 1)})")
else:
super().on_epoch_end(epoch, logs)
class CustomEarlyStopping(tf.keras.callbacks.EarlyStopping):
def __init__(self, *args, **kwargs):
self.minimum_epochs = kwargs.get("minimum_epochs", 0)
kwargs.pop('minimum_epochs', None) # Problems with EarlyStopping kwargs
super().__init__(*args, **kwargs)
def on_epoch_end(self, epoch, logs=None):
if epoch >= self.minimum_epochs:
super().on_epoch_end(epoch, logs)
def get_losses():
losses = [tf.keras.losses.BinaryCrossentropy()]
return losses
def get_metrics(single_output_idx, add_normal=False):
metrics = []
if single_output_idx is None: # Multi-label
print("###### Multi-label classification ######")
metrics += [
BinaryAccuracy_Infiltrates,
BinaryAccuracy_Pneumonia,
BinaryAccuracy_Covid19
]
# Add normal class
if add_normal:
metrics.append(BinaryAccuracy_Normal)
else:
print(f"###### Multi-class classification (cls: '{single_output_idx}') ######")
metrics = [
tf.keras.metrics.BinaryAccuracy(),
tf.keras.metrics.AUC(),
tf.keras.metrics.Precision(),
tf.keras.metrics.Recall()
]
return metrics
def get_model(backbone, classes=None, target_size=None, freeze_base_model=True, ignore_model=None):
istrainable = not freeze_base_model
# Select backbone
if backbone == "resnet50":
from tensorflow.keras.applications.resnet import ResNet50 as TFModel
from tensorflow.keras.applications.resnet import preprocess_input
elif backbone == "resnet50v2":
from tensorflow.keras.applications.resnet_v2 import ResNet50V2 as TFModel
from tensorflow.keras.applications.resnet_v2 import preprocess_input
elif backbone == "resnet101v2":
from tensorflow.keras.applications.resnet_v2 import ResNet101V2 as TFModel
from tensorflow.keras.applications.resnet_v2 import preprocess_input
elif backbone == "vgg16":
from tensorflow.keras.applications.vgg16 import VGG16 as TFModel
from tensorflow.keras.applications.vgg16 import preprocess_input
elif backbone == "efficientnetb0":
from tensorflow.keras.applications.efficientnet import EfficientNetB0 as TFModel
from tensorflow.keras.applications.efficientnet import preprocess_input
elif backbone == "efficientnetb7":
from tensorflow.keras.applications.efficientnet import EfficientNetB7 as TFModel
from tensorflow.keras.applications.efficientnet import preprocess_input
else:
raise ValueError(f"Unknown backbone: {backbone}")
if ignore_model:
model = None
else:
# Instantiate base model with pre-trained weights
base_model = TFModel(input_shape=(*target_size, 3), include_top=False, weights="imagenet")
# Freeze base model
# base_model.trainable = istrainable
for layers in base_model.layers:
layers.trainable = istrainable
# Create a new model on top
inputs = base_model.input
x = base_model(inputs)
# Option A
x = tf.keras.layers.GlobalAveragePooling2D(name='avg_pool')(x)
# Option B
# x = tf.keras.layers.Flatten(name='flatten')(x)
# x = tf.keras.layers.Dense(512, activation='relu', name='fc1')(x)
# x = tf.keras.layers.Dense(512, activation='relu', name='fc2')(x)
# Outputs
outputs = tf.keras.layers.Dense(classes, activation="sigmoid", name='predictions')(x)
model = tf.keras.Model(inputs, outputs)
return model, preprocess_input
def add_tabular_input(model, classes):
# Input1
input1 = model.input
input2 = tf.keras.layers.Input(shape=(2,), name="input_2b")
# Pre-outputs 1x3 + 1x3
output1 = model.output
output2 = tf.keras.layers.Dense(classes, activation="sigmoid", name='output_tab')(input2)
# Outputs
x = tf.keras.layers.Concatenate(axis=1)([output1, output2])
output = tf.keras.layers.Dense(classes, activation="sigmoid", name='final_predictions')(x)
model = tf.keras.Model([input1, input2], output)
return model
def unfreeze_base_model(model, n=None, unfreeze=True):
base_model = model.layers[1].layers
# Select number of layers to unfreeze
idx = 0
if n is not None:
if isinstance(n, int):
idx = n
print(f"Unfreezing {len(base_model) - idx} layers")
elif isinstance(n, float) and 0.0 < n <= 1.0:
idx = int(len(base_model) * n)
print(f"Unfreezing {idx} layers")
else:
raise ValueError("Invalid number of layers")
# We unfreeze all layers but BatchNorm (to not destroy the non-trainable weights)
for layer in base_model[-idx:]:
if not isinstance(layer, tf.keras.layers.BatchNormalization):
layer.trainable = True
| 35.5
| 104
| 0.664265
| 748
| 6,106
| 5.259358
| 0.221925
| 0.040925
| 0.057956
| 0.09456
| 0.396289
| 0.346467
| 0.252415
| 0.217844
| 0.182766
| 0.166497
| 0
| 0.016698
| 0.225188
| 6,106
| 171
| 105
| 35.707602
| 0.814838
| 0.092041
| 0
| 0.234783
| 0
| 0
| 0.089247
| 0.011586
| 0
| 0
| 0
| 0
| 0
| 1
| 0.113043
| false
| 0
| 0.113043
| 0.034783
| 0.313043
| 0.043478
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c16d8f05cc4bb4747f1b27b93145e440fc653d6
| 3,528
|
py
|
Python
|
null/twitter/twmedia-dl.py
|
mikoim/funstuff
|
3c391c76784a4bb37983c1a251773bfa61182ce1
|
[
"MIT"
] | null | null | null |
null/twitter/twmedia-dl.py
|
mikoim/funstuff
|
3c391c76784a4bb37983c1a251773bfa61182ce1
|
[
"MIT"
] | null | null | null |
null/twitter/twmedia-dl.py
|
mikoim/funstuff
|
3c391c76784a4bb37983c1a251773bfa61182ce1
|
[
"MIT"
] | null | null | null |
import re
import json
import time
import sys
import httplib2
from twitter import *
import magic
class TwitterMediaDL:
http = httplib2.Http(".cache")
baseUrl = "https://twitter.com"
consumer_key = ""
consumer_secret = ""
access_token_key = ""
access_token_secret = ""
t = Twitter(auth=OAuth(access_token_key, access_token_secret, consumer_key, consumer_secret))
remaining = None
def http_wrapper(self, uri):
resp, content = self.http.request(
uri=uri, method='GET'
)
return content
def get_medias(self, nickname):
ids = []
for tweet in re.findall("twitter.com/(.+)/status/([0-9]+)",
self.http_wrapper(self.baseUrl + '/%s/media' % nickname).decode()):
ids.append(int(tweet[1]))
max_id = ids[len(ids) - 1]
while 1:
res_raw = self.http_wrapper(
self.baseUrl + '/i/profiles/show/%s/media_timeline?include_available_features=1&include_entities=1&max_id=%d' % (
nickname, max_id)).decode()
try:
res = json.loads(res_raw)
except:
print(res_raw)
time.sleep(5)
res_raw = self.http_wrapper(
self.baseUrl + '/i/profiles/show/%s/media_timeline?include_available_features=1&include_entities=1&max_id=%d' % (
nickname, max_id)).decode()
res = json.loads(res_raw)
if not res['has_more_items']:
break
for tweet in re.findall("twitter.com/(.+)/status/([0-9]+)", res['items_html']):
ids.append(int(tweet[1]))
max_id = int(res['max_id'])
return list(set(ids))
def get_image_url(self, tweet_id):
lst = []
if self.remaining is None or self.remaining % 10 is 0 or self.remaining <= 1:
self.check_limit()
r = self.t.statuses.show(_id=tweet_id, _method='GET')
self.remaining -= 1
print('{:d}\t{:d}\t{:s}'.format(tweet_id, self.get_unix_epoch(r['created_at']), r['text']))
for m in r['entities']['media']:
lst.append(m['media_url'] + ':orig')
return lst
def check_limit(self):
r = self.t.application.rate_limit_status(_method='GET')['resources']['statuses']['/statuses/show/:id']
self.remaining = r['remaining']
print("API Limit : {:d} / {:d} = {:f}".format(r['remaining'], r['limit'], r['remaining'] / r['limit']),
file=sys.stderr)
if r['remaining'] / r['limit'] < 0.10:
reset = r['reset'] - time.time()
print("Please wait... {:f}".format(reset), file=sys.stderr)
time.sleep(reset + 10)
@staticmethod
def get_file_extension(binary):
mime = magic.from_buffer(binary, True).decode()
return mime.split('/')[1]
@staticmethod
def get_unix_epoch(created_at):
return int(time.mktime(time.strptime(created_at, "%a %b %d %H:%M:%S +0000 %Y")))
if __name__ == '__main__':
for i in range(1, len(sys.argv)):
tw = TwitterMediaDL()
for tweetID in tw.get_medias(sys.argv[i]):
list_url = tw.get_image_url(tweetID)
for j in range(0, len(list_url)):
raw = tw.http_wrapper(list_url[j])
ext = tw.get_file_extension(raw)
with open('{:d}_{:d}.{:s}'.format(tweetID, j, ext), 'wb') as f:
f.write(raw)
| 30.947368
| 133
| 0.552721
| 453
| 3,528
| 4.125828
| 0.302428
| 0.018727
| 0.032103
| 0.030498
| 0.264312
| 0.23114
| 0.197967
| 0.173355
| 0.173355
| 0.173355
| 0
| 0.012857
| 0.294501
| 3,528
| 113
| 134
| 31.221239
| 0.738047
| 0
| 0
| 0.146341
| 0
| 0.02439
| 0.161281
| 0.070295
| 0
| 0
| 0
| 0
| 0
| 1
| 0.073171
| false
| 0
| 0.085366
| 0.012195
| 0.329268
| 0.04878
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c189de34ca4832b1a00970032415cde76a25896
| 9,133
|
py
|
Python
|
girder/models/group.py
|
scottwittenburg/girder
|
a5062badc97bf2a87a385648f2ff3f9ff1990a75
|
[
"Apache-2.0"
] | null | null | null |
girder/models/group.py
|
scottwittenburg/girder
|
a5062badc97bf2a87a385648f2ff3f9ff1990a75
|
[
"Apache-2.0"
] | null | null | null |
girder/models/group.py
|
scottwittenburg/girder
|
a5062badc97bf2a87a385648f2ff3f9ff1990a75
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2013 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import datetime
from .model_base import AccessControlledModel,\
ValidationException,\
AccessException
from girder.constants import AccessType
class Group(AccessControlledModel):
"""
Groups are simply groups of users. The primary use of grouping users is
to simplify access control for resources in the system, but they can
be used for other purposes that require groupings of users as well.
Group membership is stored in the database on the user document only;
there is no "users" field in this model. This is to optimize for the most
common use case for querying membership, which involves checking access
control policies, which is always done relative to a specific user. The
task of querying all members within a group is much less common and
typically only performed ona single group at a time, so doing a find on the
indexed group list in the user collection is sufficiently fast.
Users with READ access on the group can see the group and its members.
Users with WRITE access on the group can add and remove members and
change the name or description.
Users with ADMIN access can delete the entire group.
"""
def initialize(self):
self.name = 'group'
self.ensureIndices(['lowerName'])
self.ensureTextIndex({
'name': 10,
'description': 1
})
def validate(self, doc):
doc['name'] = doc['name'].strip()
doc['lowerName'] = doc['name'].lower()
doc['description'] = doc['description'].strip()
if not doc['name']:
raise ValidationException('Group name must not be empty.', 'name')
q = {
'lowerName': doc['lowerName'],
}
if '_id' in doc:
q['_id'] = {'$ne': doc['_id']}
duplicates = self.find(q, limit=1, fields=['_id'])
if duplicates.count() != 0:
raise ValidationException('A group with that name already'
'exists.', 'name')
return doc
def list(self, user=None, limit=50, offset=0, sort=None):
"""
Search for groups or simply list all visible groups.
:param text: Pass this to perform a text search of all groups.
:param user: The user to search as.
:param limit: Result set size limit.
:param offset: Offset into the results.
:param sort: The sort direction.
"""
# Perform the find; we'll do access-based filtering of the result
# set afterward.
cursor = self.find({}, limit=0, sort=sort)
for r in self.filterResultsByPermission(cursor=cursor, user=user,
level=AccessType.READ,
limit=limit, offset=offset):
yield r
def remove(self, group):
"""
Delete a group, and all references to it in the database.
:param group: The group document to delete.
:type group: dict
"""
# Remove references to this group from user group membership lists
self.model('user').update({
'groups': group['_id']
}, {
'$pull': {'groups': group['_id']}
})
acQuery = {
'access.groups.id': group['_id']
}
acUpdate = {
'$pull': {
'access.groups': {'id': group['_id']}
}
}
# Remove references to this group from access-controlled collections.
self.update(acQuery, acUpdate)
self.model('collection').update(acQuery, acUpdate)
self.model('folder').update(acQuery, acUpdate)
self.model('user').update(acQuery, acUpdate)
# Finally, delete the document itself
AccessControlledModel.remove(self, group)
def getMembers(self, group, offset=0, limit=50, sort=None):
"""
Return the list of all users who belong to this group.
:param group: The group to list members on.
:param offset: Offset into the result set of users.
:param limit: Result set size limit.
:param sort: Sort parameter for the find query.
:returns: List of user documents.
"""
q = {
'groups': group['_id']
}
cursor = self.model('user').find(
q, offset=offset, limit=limit, sort=sort)
users = []
for user in cursor:
users.append(user)
return users
def addUser(self, group, user, level=AccessType.READ):
"""
Add the user to the group. Records membership in the group in the
user document, and also grants the specified access level on the
group itself to the user. Any group member has at least read access on
the group.
"""
if not 'groups' in user:
user['groups'] = []
if not group['_id'] in user['groups']:
user['groups'].append(group['_id'])
self.model('user').save(user, validate=False)
self.setUserAccess(group, user, level, save=True)
return group
def joinGroup(self, group, user):
"""
Call this when the user accepts an invitation.
"""
if not 'groupInvites' in user:
user['groupInvites'] = []
for invite in user['groupInvites']:
if invite['groupId'] == group['_id']:
self.addUser(group, user, level=invite['level'])
user['groupInvites'].remove(invite)
self.model('user').save(user, validate=False)
break
else:
raise AccessException('User was not invited to this group.')
return group
def inviteUser(self, group, user, level=AccessType.READ):
"""
Invite a user to join the group. Inviting them automatically
grants the user read access to the group so that they can see it.
Once they accept the invitation, they will be given the specified level
of access.
"""
# User has to be able to see the group to join it
self.setUserAccess(group, user, AccessType.READ, save=True)
if group['_id'] in user.get('groups', []):
raise ValidationException('User is already in this group.')
if not 'groupInvites' in user:
user['groupInvites'] = []
for invite in user['groupInvites']:
if invite['groupId'] == group['_id']:
invite['level'] = level
break
else:
user['groupInvites'].append({
'groupId': group['_id'],
'level': level
})
return self.model('user').save(user, validate=False)
def removeUser(self, group, user):
"""
Remove the user from the group.
"""
# Remove group membership for this user.
if 'groups' in user and group['_id'] in user['groups']:
user['groups'].remove(group['_id'])
self.model('user').save(user, validate=False)
# Remove all group access for this user on this group.
self.setUserAccess(group, user, level=None, save=True)
return group
def createGroup(self, name, creator, description='', public=True):
"""
Create a new group. The creator will be given admin access to it.
:param name: The name of the folder.
:type name: str
:param description: Description for the folder.
:type description: str
:param public: Whether the group is publicly visible.
:type public: bool
:param creator: User document representing the creator of the group.
:type creator: dict
:returns: The group document that was created.
"""
assert type(public) is bool
now = datetime.datetime.now()
group = {
'name': name,
'description': description,
'created': now,
'updated': now
}
self.setPublic(group, public=public)
# Now validate and save the group
self.save(group)
# We make the creator a member of this group and also grant them
# admin access over the group.
self.addUser(group, creator, level=AccessType.ADMIN)
return group
| 34.858779
| 79
| 0.581518
| 1,092
| 9,133
| 4.84707
| 0.260989
| 0.027206
| 0.017193
| 0.012847
| 0.173626
| 0.108823
| 0.085018
| 0.048744
| 0.048744
| 0.033251
| 0
| 0.003321
| 0.307566
| 9,133
| 261
| 80
| 34.992337
| 0.83365
| 0.394394
| 0
| 0.218487
| 0
| 0
| 0.122746
| 0
| 0
| 0
| 0
| 0
| 0.008403
| 1
| 0.084034
| false
| 0
| 0.02521
| 0
| 0.176471
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c1a065c357d38d64659fb6993766afa52a31235
| 9,999
|
py
|
Python
|
main.py
|
acitv/plugin.video.aci
|
c836096c90affd80949e51cd24517709a63eff52
|
[
"MIT"
] | null | null | null |
main.py
|
acitv/plugin.video.aci
|
c836096c90affd80949e51cd24517709a63eff52
|
[
"MIT"
] | null | null | null |
main.py
|
acitv/plugin.video.aci
|
c836096c90affd80949e51cd24517709a63eff52
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import sys
import urllib
import urlparse
# import xbmc
import xbmcgui
import xbmcplugin
import aci
# Get the plugin url in plugin:// notation.
_url = sys.argv[0]
# Get the plugin handle as an integer number.
_handle = int(sys.argv[1])
# Get an instance of ACI.
ATV = aci.ACI()
ATV.load_aci()
# Encode user agent headers for video.
user_agent_headers = urllib.urlencode({'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:47.0) Gecko/20100101 '
'Firefox/47.0 FirePHP/0.7.4',
'X-Requested-With': 'ShockwaveFlash/22.0.0.192'
})
def get_url(**kwargs):
"""
Create a URL for calling the plugin recursively from the given set of keyword arguments.
:param kwargs: "argument=value" pairs
:type kwargs: dict
:return: plugin call URL
:rtype: str
"""
return '{0}?{1}'.format(_url, urllib.urlencode(kwargs))
def get_categories():
"""
Get the list of video categories.
Here you can insert some parsing code that retrieves
the list of video categories (e.g. 'Movies', 'TV-shows', 'Documentaries' etc.)
from some site or server.
.. note:: Consider using `generator functions <https://wiki.python.org/moin/Generators>`_
instead of returning lists.
:return: The list of video categories
:rtype: types.GeneratorType
"""
return ATV.aci.iterkeys()
def get_videos(category):
"""
Get the list of video files/streams.
Here you can insert some parsing code that retrieves
the list of video streams in the given category from some site or server.
.. note:: Consider using `generators functions <https://wiki.python.org/moin/Generators>`_
instead of returning lists.
:param category: Category name
:type category: str
:return: the list of videos in the category
:rtype: list
"""
return ATV.aci[category]
def list_categories():
"""
Create the list of video categories in the Kodi interface.
"""
# Set plugin category. It is displayed in some skins as the name
# of the current section.
xbmcplugin.setPluginCategory(_handle, 'ACI')
# Set plugin content. It allows Kodi to select appropriate views
# for this type of content.
xbmcplugin.setContent(_handle, 'videos')
# Get video categories
categories = get_categories()
# Iterate through categories
for category in categories:
# xbmc.log(category.encode("utf-8"), xbmc.LOGNOTICE)
# Create a list item with a text label and a thumbnail image.
list_item = xbmcgui.ListItem(label=category.title())
# Set graphics (thumbnail, fanart, banner, poster, landscape etc.) for the list item.
# Here we use the same image for all items for simplicity's sake.
# In a real-life plugin you need to set each image accordingly.
list_item.setArt({'thumb': "icon.png",
'icon': "icon.png",
'fanart': "icon.png"})
# Set additional info for the list item.
# Here we use a category name for both properties for for simplicity's sake.
# setInfo allows to set various information for an item.
# For available properties see the following link:
# https://codedocs.xyz/xbmc/xbmc/group__python__xbmcgui__listitem.html#ga0b71166869bda87ad744942888fb5f14
# 'mediatype' is needed for a skin to display info for this ListItem correctly.
list_item.setInfo('video', {'title': category.title(),
'genre': category.title(),
'mediatype': 'video'})
# Create a URL for a plugin recursive call.
# Example: plugin://plugin.video.example/?action=listing&category=[category name]
url = get_url(action="listing", category=category)
# is_folder = True means that this item opens a sub-list of lower level items.
is_folder = True
# Add our item to the Kodi virtual folder listing.
xbmcplugin.addDirectoryItem(_handle, url, list_item, is_folder)
# Add a sort method for the virtual folder items (alphabetically, ignore articles)
xbmcplugin.addSortMethod(_handle, xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE)
# Finish creating a virtual folder.
xbmcplugin.endOfDirectory(_handle)
def list_videos(category):
"""
Create the list of playable videos in the Kodi interface.
:param category: Category name
:type category: str
"""
# Set plugin category. It is displayed in some skins as the name
# of the current section.
xbmcplugin.setPluginCategory(_handle, category)
# Set plugin content. It allows Kodi to select appropriate views
# for this type of content.
xbmcplugin.setContent(_handle, 'videos')
# Get the list of videos in the category.
videos = get_videos(category)
# Iterate through each video.
for video_id in videos:
# Get the video item to process.
video_item = videos[video_id]
# Create a list item with a text label and a thumbnail image.
list_item = xbmcgui.ListItem(label=video_item["title"])
# Set additional info for the list item.
# 'mediatype' is needed for skin to display info for this ListItem correctly.
list_item.setInfo('video', {'title': video_item["title"],
'genre': category.title(),
'mediatype': 'video'})
# Set graphics (thumbnail, fanart, banner, poster, landscape etc.) for the list item.
# Here we use the same image for all items for simplicity's sake.
# In a real-life plugin you need to set each image accordingly.
list_item.setArt({'thumb': video_item["thumbnail"],
'icon': video_item["thumbnail"],
'fanart': video_item["thumbnail"]
})
# Set 'IsPlayable' property to 'true'.
# This is mandatory for playable items!
list_item.setProperty('IsPlayable', 'true')
referer_header = urllib.urlencode({"Referer": video_item["location"]})
video_item['url'] += '|%s&%s' % (user_agent_headers, referer_header)
# Create a URL for a plugin recursive call.
# Example: plugin://plugin.video.example/?action=play&
# video=[video url]
url = get_url(action='play', video=video_item['url'])
# video_url = 'plugin://plugin.video.f4mTester/?url=' + urllib.quote_plus(video['video']) + \
# '&streamtype=HLSRETRY&name=' + urllib.quote_plus(video['name']) + \
# '&|User-Agent=Mozilla/5.0 (Windows NT 10.0; WOW64; rv:47.0) Gecko/20100101 Firefox/47.0 ' \
# 'FirePHP/0.7.4&X-Requested-With=ShockwaveFlash/22.0.0.192&Referer=' + \
# urllib.quote_plus(video['reference'])
# url = get_url(action='play', video=video_url)
# Add the list item to a virtual Kodi folder.
# is_folder = False means that this item won't open any sub-list.
is_folder = False
# Add our item to the Kodi virtual folder listing.
xbmcplugin.addDirectoryItem(_handle, url, list_item, is_folder)
# Add a sort method for the virtual folder items (alphabetically, ignore articles)
xbmcplugin.addSortMethod(_handle, xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE)
# Finish creating a virtual folder.
xbmcplugin.endOfDirectory(_handle)
def play_video(path):
"""
Play a video by the provided path.
:param path: Fully-qualified video URL
:type path: str
"""
# Create a playable item with a path to play.
play_item = xbmcgui.ListItem(path=path)
# Play with inputstream addon.
play_item.setProperty('inputstreamaddon', 'inputstream.adaptive')
play_item.setProperty('inputstream.adaptive.manifest_type', 'hls')
# Pass the item to the Kodi player.
xbmcplugin.setResolvedUrl(_handle, True, listitem=play_item)
def router(paramstring):
"""
Router function that calls other functions
depending on the provided paramstring
:param paramstring: URL encoded plugin paramstring
:type paramstring: str
"""
# Parse a URL-encoded paramstring to the dictionary of
# {<parameter>: <value>} elements
params = dict(urlparse.parse_qsl(paramstring))
# Check the parameters passed to the plugin
if params:
if params['action'] == 'listing':
# Load the videos for aci.
if params['category'] == "shows":
ATV.update_aci_shows()
print("Updated from main shows.")
elif params['category'] == "cable":
ATV.update_aci_cable()
print("Updated from main cable.")
elif params['category'] == "movies":
ATV.update_aci_movies()
print("Updated from main movies.")
# Display the list of videos in a provided category.
list_videos(params['category'])
elif params['action'] == 'play':
# Play a video from a provided URL.
play_video(params['video'])
else:
# If the provided paramstring does not contain a supported action
# we raise an exception. This helps to catch coding errors,
# e.g. typos in action names.
raise ValueError('Invalid paramstring: {0}!'.format(paramstring))
else:
# Load ATV.
ATV.load_aci()
# If the plugin is called from Kodi UI without any parameters,
# display the list of video categories
list_categories()
if __name__ == '__main__':
# Call the router function and pass the plugin call parameters to it.
# We use string slicing to trim the leading '?' from the plugin call paramstring
router(sys.argv[2][1:])
| 38.019011
| 116
| 0.633863
| 1,252
| 9,999
| 4.980032
| 0.241214
| 0.017963
| 0.015878
| 0.015718
| 0.449078
| 0.422935
| 0.411067
| 0.368565
| 0.345469
| 0.345469
| 0
| 0.013019
| 0.270227
| 9,999
| 262
| 117
| 38.164122
| 0.841442
| 0.523352
| 0
| 0.206897
| 0
| 0.011494
| 0.138362
| 0.013167
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08046
| false
| 0
| 0.068966
| 0
| 0.183908
| 0.034483
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c1ab77adfecf5628021417f2b5bb34c29a975d3
| 17,151
|
py
|
Python
|
coremltools/converters/mil/frontend/tensorflow/converter.py
|
VadimLevin/coremltools
|
66c17b0fa040a0d8088d33590ab5c355478a9e5c
|
[
"BSD-3-Clause"
] | 3
|
2018-10-02T17:23:01.000Z
|
2020-08-15T04:47:07.000Z
|
coremltools/converters/mil/frontend/tensorflow/converter.py
|
holzschu/coremltools
|
5ece9069a1487d5083f00f56afe07832d88e3dfa
|
[
"BSD-3-Clause"
] | null | null | null |
coremltools/converters/mil/frontend/tensorflow/converter.py
|
holzschu/coremltools
|
5ece9069a1487d5083f00f56afe07832d88e3dfa
|
[
"BSD-3-Clause"
] | 1
|
2021-05-07T15:38:20.000Z
|
2021-05-07T15:38:20.000Z
|
# Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import logging
from coremltools.converters.mil.input_types import (
InputType,
TensorType,
ImageType,
RangeDim,
_get_shaping_class,
)
from coremltools.converters.mil.input_types import Shape as InputShape
from coremltools.converters.mil.mil.var import Var
from coremltools.converters.mil.mil import get_new_symbol
from coremltools.converters.mil.mil.types.symbolic import is_symbolic
from coremltools.converters.mil.mil.types import is_tensor
from coremltools.converters.mil.mil import types
from .basic_graph_ops import topsort, simple_topsort
from .convert_utils import convert_graph
from coremltools.converters.mil.mil import Builder as mb
from coremltools.converters.mil.mil import Program
from coremltools.converters.mil.mil import Function
from .ssa_passes.tf_passes import tensorflow_passes
from coremltools.converters._profile_utils import _profile
# TranscriptionContext maintains a map of tf_node.name --> ssa_var available
# to the current TF --> tfssa transcription.
class TranscriptionContext:
def __init__(self, name=None):
self.name = name if name is not None else ""
self.context = {}
self.graphs = {}
# TF loops are represented as functions, so nested loops becomes
# stacked functions. Stacked functions are translated to nested
# blocks in Program, like
#
# while_loop(loop_vars=(%a, %b))
# cond_block1(%a.x, %b.x) {
# ...some ops
# } -> (%bool_var1)
# body_block1(%a.x, %b.x) {
# %ret_axx = while_loop(loop_vars=(%a.x,))
# cond_block2(%a.x.x) {
# ...some ops
# } -> (%bool_var2)
# body_block2(%a.x.x) {
# ...some ops
# } -> (%new_a.x.x)
# } -> (%ret_axx)
# ....some ops using %ret_a
# } -> (%ret_ax, %ret_bx)
#
# During the translation of cond_block2, we'd have func_input_stack
#
# (%a.x.x,)
# (%a.x, %b.x)
#
# where [%a.x.x] would be unstacked once cond_block2 is done.
self.func_input_stack = [] # list of tuple[Var]
def add(self, tf_name, ssa_vars, is_new_var=True):
"""
ssa_vars: list[Var] / tuple[Var] (multiple outputs) or
Var (single_output)
is_new_var: True if ssa_vars are newly created for tf_name.
"""
if tf_name in self.context:
# Overriding allow us to translate while_loop body twice (which is
# needed to figure out shapes changes during iterates)
msg = "TF var %s is added again. Overriding previous value"
logging.info(msg % tf_name)
if is_new_var and isinstance(ssa_vars, Var) and tf_name != ssa_vars.name:
msg = (
"MIL op's name ({}) does not match TensorFlow's node name ({})."
" Warning: Node added to context must have the same name as the name passed to context."
)
raise ValueError(msg.format(tf_name, ssa_vars.name))
self.context[tf_name] = ssa_vars
def add_graph(self, graph_name, graph):
self.graphs[graph_name] = graph
def get_graph(self, graph_name):
if graph_name not in self.graphs:
msg = "Graph '{}' not found in: {}"
raise KeyError(msg.format(graph_name, list(self.graphs.keys())))
return self.graphs[graph_name]
def stack_func_inputs(self, inputs):
self.func_input_stack.append(inputs)
def unstack_func_inputs(self):
if len(self.func_input_stack) == 0:
raise ValueError("No func input available")
self.func_input_stack.pop()
def get_func_inputs(self):
if len(self.func_input_stack) == 0:
raise ValueError("No func input available")
return self.func_input_stack[-1]
def __getitem__(self, tf_name):
if tf_name not in self.context:
msg = "TF var {} not found in context {}"
raise KeyError(msg.format(tf_name, self.name))
return self.context[tf_name]
def __contains__(self, tf_name):
return tf_name in self.context
class TFConverter:
def __init__(self, tfssa, inputs=None, outputs=None, **kwargs):
"""
tfssa: TensorFlow IR.
inputs: list of TensorType or ImageType, optional, defaults to None.
outputs: list of str or str, optional, defaults to None.
A list of names of the output nodes or a str for single output name.
If None, the converter will try to extract the output information from
TensorFlow model.
"""
self.tfssa = tfssa
self.global_type = {}
self.inputs = None
main_func = tfssa.functions["main"]
graph = main_func.graph
# Filter the inputs to only Placeholder names
tf_placeholder_names = [n for n in graph if graph[n].op == "Placeholder"]
placeholder_names = []
if inputs is not None:
# Check inputs format
if not isinstance(inputs, (list, tuple)):
raise ValueError(
"Type of inputs should be list or tuple, got {} instead.".format(
type(inputs)
)
)
if not all([isinstance(i, InputType) for i in inputs]):
raise ValueError(
"Type of inputs should be list or tuple of TensorType or ImageType, got {} instead.".format(
[type(i) for i in inputs]
)
)
# Special case: if there's only 1 input and 1 placeholder, we match them.
if len(tf_placeholder_names) == 1 and len(inputs) == 1:
if inputs[0].name is None:
inputs[0].name = tf_placeholder_names[0]
# filter out those inputs which is not in tf_placeholder_names
inputs = [x for x in inputs if x.name in tf_placeholder_names]
# We fill in shapes for user-specified input that doesn't have shape
for inp in inputs:
# Check inputs existence
if inp.name is None:
raise ValueError(
"Unable to infer input's name or input name was not provided"
)
if inp.name not in tf_placeholder_names:
raise ValueError(
"Input ({}) provided is not found in given tensorflow graph. Placeholders in graph are: {}".format(
inp.name, tf_placeholder_names
)
)
if inp.shape is None:
shape = self._get_placeholder_shape_from_tf_graph(tfgraph=graph, name=inp.name)
# _get_shaping_class does not accept -1 or None dimension.
shape = [get_new_symbol() if s is None or s == -1 else s \
for s in shape]
inp.shape = _get_shaping_class(shape)
# Extract placeholders that users didn't specify.
user_input_names = [inp.name for inp in inputs]
for name in tf_placeholder_names:
if name not in user_input_names:
placeholder_names.append(name)
else:
inputs = []
placeholder_names = tf_placeholder_names
# name -> (shape, mil_type) mapping. shape has type list[int]
added_inputs = {}
for inp in main_func.inputs:
if inp not in placeholder_names:
continue
node = graph[inp]
dtype = node.attr['dtype']
shape = self._get_placeholder_shape_from_tf_graph(tfgraph=graph, name=inp)
shape = [get_new_symbol() if s is None or s == -1 else s \
for s in shape]
inputs.append(TensorType(name=inp, shape=shape, dtype=dtype))
added_inputs[inp] = (shape, dtype)
if len(added_inputs) > 0:
logging.info(
"Adding Input not specified by users: '{}'".format(
added_inputs)
)
for idx, inp in enumerate(inputs):
# We set the default image format in TF as NHWC, since NHWC is used
# for TF unless GPU is specified as device.
if isinstance(inp, ImageType) and inputs[idx].channel_first is None:
inputs[idx].channel_first = False
self.inputs = tuple(inputs)
for inputtype in self.inputs:
if not isinstance(inputtype.shape, InputShape):
continue
if any([isinstance(s, RangeDim) for s in inputtype.shape.shape]):
continue
node = graph[inputtype.name]
shape = [-1 if is_symbolic(s) else s for s in inputtype.shape.shape]
node.attr["_output_shapes"] = [shape] # list of length 1
# infer outputs if not provided
self._validate_outputs(tfssa, outputs)
outputs = main_func.outputs if outputs is None else outputs
outputs = outputs if isinstance(outputs, (tuple, list)) else [outputs]
outputs = [x if isinstance(x, str) else x.name for x in outputs]
self.outputs = outputs
# We would like a stack so that we run conversion sequentially.
self.graph_stack = self._get_stack(tfssa, root="main")
self.context = TranscriptionContext()
self.tensorflow_passes = tensorflow_passes
def _get_placeholder_shape_from_tf_graph(self, tfgraph, name):
error_message = "Unable to determine the shape of input: {}." \
" Please provide its shape during conversion, using \n" \
"'ct.convert(..., inputs=[ct.TensorType(name='{}', shape=(_FILL_ME_) ),])".format(name, name)
if tfgraph[name].attr.get("shape", None) is not None:
shape = tfgraph[name].attr["shape"]
elif tfgraph[name].attr.get("_output_shapes", None) is not None:
shape = tfgraph[name].attr["_output_shapes"][0]
if shape is None:
raise ValueError(error_message)
else:
raise ValueError(error_message)
return shape
def _get_stack(self, tfssa, root="main"):
# We're trying to get a order of how to loop through the graphs.
# This is NOT necessarily a DAG.
dep = {x: [] for x in tfssa.functions}
for fname in tfssa.functions:
for node in tfssa.functions[fname].graph.values():
func_x, func_y = None, None
if node.op == "while":
func_x = node.attr["body_function"]
func_y = node.attr["cond_function"]
if func_x and fname not in dep[func_x]:
dep[func_x].append(fname)
if func_y and fname not in dep[func_y]:
dep[func_y].append(fname)
assert len(dep[root]) == 0
graph_stack = simple_topsort(dep)
return graph_stack
@staticmethod
def _get_tensor_name(tensor):
ret = None
if isinstance(tensor, str):
ret = tensor
else:
ret = tensor.name
return ret.split(":")[0]
def _validate_outputs(self, tfssa, outputs):
if outputs is None:
return
outputs = outputs if isinstance(outputs, (tuple, list)) else [outputs]
output_nodes = []
for f in tfssa.functions.values():
output_nodes += list(f.outputs)
all_nodes = []
for f in tfssa.functions.values():
all_nodes += list(f.graph.keys())
for n in outputs:
if self._get_tensor_name(n) not in output_nodes + all_nodes:
raise KeyError('Output node name "{}" does exist.'.format(n))
def check_placeholder_output(self, prog, outputs_name):
"""
Handle the cases where placeholder is output.
There is a case where the program is like
main(%Placeholder: (5,fp32)) {
block3() {
} -> (%Placeholder)
}
But self.outputs = ["Placeholder:0"]
We need to change the block output to Placeholder:0 by inserting an identity
"""
block = prog["main"]
input_name = [x.name for x in list(block.inputs.values())]
with block:
new_outputs = []
for output, output_name in zip(block.outputs, outputs_name):
if output.name not in input_name or output.name == output_name:
new_output = output
else:
new_output = mb.identity(x=output, name=output_name)
new_outputs.append(new_output)
block.set_outputs(new_outputs)
def convert_main_graph(self, prog, graph):
func_inputs = {}
for input_type in self.inputs:
func_inputs[input_type.name] = mb.placeholder(
input_type.shape.symbolic_shape, dtype=input_type.dtype)
prog.set_main_input_types(self.inputs)
with Function(func_inputs) as ssa_func:
# Get the input Var
for name in func_inputs.keys():
self.context.add(name, ssa_func.inputs[name])
outputs = convert_graph(self.context, graph, self.outputs)
ssa_func.set_outputs(outputs)
prog.add_function("main", ssa_func)
# check duplicate output
# Note: sometimes two outputs are pointing to the same Var, we should
# create mb.identity for those cases
block = prog["main"]
with block:
name_counts = {}
new_outputs = [output for output in block.outputs]
for i, v_o in enumerate(block.outputs):
if v_o.name not in name_counts:
name_counts[v_o.name] = 1
else:
name_counts[v_o.name] += 1
new_name = v_o.name + "_duplicate_" + str(name_counts[v_o.name])
x = mb.identity(x=v_o, name=new_name)
new_outputs[i] = x
block.set_outputs(new_outputs)
# Rename outputs to TF's name. This is needed when the last op doesn't
# generate a new Var (e.g., get_tuple, Identity etc.), and thus the
# last Var would have a different name than the last TF op's name.
#
# Example:
#
# TF code:
# x = tf.placeholder(tf.float32, shape=(1,))
# y = tf.placeholder(tf.float32, shape=(1,))
# c = lambda i, j: \
# tf.less(tf.math.reduce_mean(i), tf.math.reduce_mean(j))
# b = lambda i, j: (tf.add(i, 1), j)
# res = tf.while_loop(c, b, [x, y])
#
# Resulting nodes (excluding the nodes in while loop cond & body):
#
# node name: Placeholder op type: Placeholder inputs: []
# node name: Placeholder_1 op type: Placeholder inputs: []
# node name: make_input_0 op type: make_tuple inputs: ['Placeholder',
# 'Placeholder_1']
# node name: while_0 op type: while inputs: ['make_input_0']
# node name: while/Exit op type: get_tuple inputs: ['while_0']
# node name: while/Exit_1 op type: get_tuple inputs: ['while_0']
#
# Observe that return node `while/Exit` is an output from get_tuple,
# which in our translation simply unpack a python tuple of Vars
# ('while_0:0', 'while_0:1') returned from while_0 SSA op. We need to
# rename `while_0:0` to `while/Exit` in order for users to find the
# output.
# Note: only rename the output if the output is not Placeholder.
input_names = [x.name for x in self.inputs]
for v_o, out_name in zip(prog["main"].outputs, self.outputs):
if v_o.name != out_name and v_o.name not in input_names:
logging.info(
"Renaming output var: '{}' -> '{}'".format(v_o.name, out_name)
)
v_o.name = out_name
self.check_placeholder_output(prog, self.outputs)
@_profile
def convert(self):
prog = Program()
if len(self.graph_stack) == 0:
raise ValueError("At least one TF function must be present")
if self.graph_stack[0] != "main":
msg = "TF root graph must be named 'main'. Got {}"
raise ValueError(msg.format(self.graph_stack[0]))
graph = self.tfssa.functions["main"].graph
for g_name in self.graph_stack[1:]:
self.context.add_graph(g_name, self.tfssa.functions[g_name].graph)
self.convert_main_graph(prog, graph)
# Apply TF frontend passes on Program. These passes are different
# from passes applied to tfssa.
self.tensorflow_passes(prog)
return prog
| 41.527845
| 123
| 0.58329
| 2,214
| 17,151
| 4.368112
| 0.160795
| 0.023162
| 0.028436
| 0.028953
| 0.210113
| 0.142798
| 0.08479
| 0.063902
| 0.057078
| 0.046117
| 0
| 0.005791
| 0.325404
| 17,151
| 412
| 124
| 41.628641
| 0.830078
| 0.241444
| 0
| 0.133333
| 0
| 0
| 0.085858
| 0.002511
| 0
| 0
| 0
| 0
| 0.003922
| 1
| 0.066667
| false
| 0.015686
| 0.058824
| 0.003922
| 0.168627
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c1d42a55dc8480f71e72b9866ed7b027a303687
| 34,975
|
py
|
Python
|
moto/dynamodb2/parsing/expressions.py
|
orenmazor/moto
|
4778377e8ecaf729d26602a2c5202b72c1438503
|
[
"Apache-2.0"
] | 1
|
2021-12-12T04:23:06.000Z
|
2021-12-12T04:23:06.000Z
|
moto/dynamodb2/parsing/expressions.py
|
orenmazor/moto
|
4778377e8ecaf729d26602a2c5202b72c1438503
|
[
"Apache-2.0"
] | 17
|
2020-08-28T12:53:56.000Z
|
2020-11-10T01:04:46.000Z
|
moto/dynamodb2/parsing/expressions.py
|
orenmazor/moto
|
4778377e8ecaf729d26602a2c5202b72c1438503
|
[
"Apache-2.0"
] | 1
|
2021-07-06T22:44:47.000Z
|
2021-07-06T22:44:47.000Z
|
import logging
from abc import abstractmethod
import abc
import six
from collections import deque
from moto.dynamodb2.parsing.ast_nodes import (
UpdateExpression,
UpdateExpressionSetClause,
UpdateExpressionSetActions,
UpdateExpressionSetAction,
UpdateExpressionRemoveActions,
UpdateExpressionRemoveAction,
UpdateExpressionPath,
UpdateExpressionValue,
UpdateExpressionGroupedValue,
UpdateExpressionRemoveClause,
ExpressionPathDescender,
ExpressionSelector,
ExpressionAttribute,
ExpressionAttributeName,
ExpressionAttributeValue,
ExpressionValueOperator,
UpdateExpressionFunction,
UpdateExpressionAddClause,
UpdateExpressionAddActions,
UpdateExpressionAddAction,
UpdateExpressionDeleteAction,
UpdateExpressionDeleteActions,
UpdateExpressionDeleteClause,
)
from moto.dynamodb2.exceptions import InvalidTokenException, InvalidUpdateExpression
from moto.dynamodb2.parsing.tokens import Token, ExpressionTokenizer
class NestableExpressionParserMixin(object):
"""
For nodes that can be nested in themselves (recursive). Take for example UpdateExpression's grammar:
UpdateExpression => UpdateExpressionClause*
UpdateExpression => UpdateExpressionClause* UpdateExpression
If we consider it of structure
NestableExpression => TargetClause*
NestableExpression => TargetClause* NestableExpression
This pattern comes back multiple times. This Mixin adds re-usability for that type of pattern.
This approach is taken since it allows to remain the ordering of the Nodes as how the corresponding tokens where
in the originating expression.
"""
def __init__(self, *args, **kwargs):
self.target_clauses = deque()
def _parse_target_clause(self, factory_class):
"""
Args:
factory_class: The factory for the target clause e.g. UpdateExpressionSetClauseParser
Returns:
"""
logging.debug(
"Move token pos {pos} to continue parsing with specific factory class {fc}".format(
pos=self.token_pos, fc=factory_class.__class__.__name__
)
)
# noinspection PyProtectedMember
ast, token_pos = factory_class(**self._initializer_args())._parse_with_pos()
self.target_clauses.append(ast)
logging.debug(
"Continue where previous parsing ended {token_pos}".format(
token_pos=token_pos
)
)
self.token_pos = token_pos
@abstractmethod
def _initializer_args(self):
"""
Get the arguments of the initializer. This is implemented by the calling class. See ExpressionParser for an
example.
Returns:
dict: A dictionary of the initializer arguments
"""
@classmethod
@abstractmethod
def _nestable_class(cls):
"""
Get the class of the Node that will be created that would be nested. For the example in the docstring this would
be UpdateExpression
Returns:
class: The class of the Nodes that will be created.
"""
def _create_node(self):
"""
target_clauses has the nodes in order of encountering. Go through them backwards and build the tree bottom up.
This way left-deep-descending traversal will process nodes in order.
Continuing the example of an UpdateExpression:
For example SET a=3 REMOVE b
UpdateExpression
/ \
SET a=3 UpdateExpression
|
REMOVE b
self.target_clauses looks like: ( SET a=3 >> REMOVE b )
Returns:
moto.dynamodb2.ast_nodes.Node: Node of an AST representing the Expression as produced by the factory.
"""
assert len(self.target_clauses) > 0, "No nodes for {cn}".format(
cn=self.__class__.__name__
)
target_node = self._nestable_class()(children=[self.target_clauses.pop()])
while len(self.target_clauses) > 0:
target_node = self._nestable_class()(
children=[self.target_clauses.pop(), target_node]
)
return target_node
@six.add_metaclass(abc.ABCMeta)
class ExpressionParser:
"""Abstract class"""
def __init__(self, expression_token_list, token_pos=0):
"""
Args:
expression_token_list:
token_pos(int): Location where parsing is
"""
self.token_list = expression_token_list
self.token_pos = token_pos
def _initializer_args(self):
return {"expression_token_list": self.token_list, "token_pos": self.token_pos}
@abstractmethod
def _parse(self):
"""
Start parsing the token_list from token_pos for the factory type.
Returns:
moto.dynamodb2.ast_nodes.Node: AST which is root node of resulting abstract syntax tree
"""
@classmethod
def is_possible_start(cls, token):
return token is not None and cls._is_possible_start(token)
@classmethod
@abstractmethod
def _is_possible_start(cls, token):
"""
Args:
token(moto.dynamodb2.tokens.Token):
Returns:
bool: True if token is a possible start for entries processed by `cls`
"""
def _parse_with_pos(self):
"""
Start parsing the token_list from token_pos for the factory type and also return the resulting token_pos.
Returns:
(ast, token_pos): tuple of AST which is root node of resulting abstract syntax tree and token_pos is the
position in the tokenlist.
"""
return self._parse(), self.token_pos
def parse(self):
return self._parse()
def get_next_token_type(self):
"""
Get the type of the next token to be processed
Returns:
str: Token type or None if no more next token
"""
try:
return self.get_next_token().type
except AttributeError:
return None
def get_next_token(self):
"""
Get the next token to be processed
Returns:
moto.dynamodb2.tokens.Token: or None if no more next token
"""
try:
return self.token_list[self.token_pos]
except IndexError:
return None
def get_next_token_value(self):
"""
Get the value of the next token to be processed
Returns:
str: value or None if no more next token
"""
try:
return self.get_next_token().value
except AttributeError:
return None
def is_at_end(self):
"""Return boolean indicating whether we are at end of the parsing"""
return self.token_pos == len(self.token_list)
def is_at_start(self):
"""Return boolean indicating whether we are at start of the parsing"""
return self.token_pos == 0
def get_last_token_value(self):
"""Get the last token that was correctly parsed or return empty string"""
if self.token_pos > 0:
return self.token_list[self.token_pos - 1].value
else:
return ""
def get_last_token_type(self):
"""Get the last token type that was correctly parsed or return None"""
if self.token_pos > 0:
return self.token_list[self.token_pos - 1].type
else:
return None
def get_2nd_last_token_value_if_last_was_whitespace(self):
"""Get the 2nd last token that was correctly parsed if last one was whitespace or return empty string"""
if self.token_pos > 1 and self.get_last_token_type() == Token.WHITESPACE:
return self.token_list[self.token_pos - 2].value
else:
return ""
def get_following_token_value(self):
"""Get the token value after the one that is being parsed or empty string if non existent."""
try:
return self.token_list[self.token_pos + 1].value
except IndexError:
return ""
def get_following_token_type(self):
"""Get the token type after the one that is being parsed or None if non existent."""
try:
return self.token_list[self.token_pos + 1].type
except IndexError:
return None
def get_2nd_following_token_value_if_following_was_whitespace(self):
"""Get the 2nd following token that was correctly parsed if 1st one was whitespace or return empty string"""
if self.get_following_token_type() == Token.WHITESPACE:
try:
return self.token_list[self.token_pos + 2].value
except IndexError:
return ""
else:
return ""
def skip_white_space(self):
try:
while self.get_next_token_type() == Token.WHITESPACE:
self.token_pos += 1
except IndexError:
assert self.token_pos > 0, "We should always have positive indexes"
logging.debug("We are out of range so end is reached")
def process_token_of_type(self, token_type):
"""
Maker sure the next token is of type `token_type` if not raise unexpected token
Args:
token_type: A token type
Returns:
str: The value if the token is of type `token_type`
"""
if self.get_next_token_type() == token_type:
token_value = self.get_next_token_value()
self.goto_next_significant_token()
return token_value
else:
self.raise_unexpected_token()
def goto_next_significant_token(self):
"""Continue past current token and skip all whitespaces"""
self.token_pos += 1
self.skip_white_space()
def raise_unexpected_token(self):
if self.is_at_end():
problematic_token = "<EOF>"
problematic_token_in_near = ""
else:
problematic_token_in_near = problematic_token = self.get_next_token_value()
near = "".join(
[
self.get_2nd_last_token_value_if_last_was_whitespace(),
self.get_last_token_value(),
problematic_token_in_near,
self.get_following_token_value(),
self.get_2nd_following_token_value_if_following_was_whitespace(),
]
)
raise InvalidTokenException(problematic_token, near)
class NestableBinExpressionParser(ExpressionParser):
"""
For nodes that can be nested in themselves (recursive) but with an operation. Take for example
UpdateExpressionValue's grammar:
Value => Operand*
Value => Operand* + Value
Value => Operand* - Value
If we consider it of structure
NestableBinExpression => TargetClause*
NestableBinExpression => TargetClause* BinOp NestableBinExpression
This pattern comes back multiple times. This Mixin adds re-usability for that type of pattern.
This approach is taken since it allows to remain the ordering of the Nodes as how the corresponding tokens where
in the originating expression.
"""
def __init__(self, *args, **kwargs):
super(NestableBinExpressionParser, self).__init__(*args, **kwargs)
self.target_nodes = deque()
def _parse_target_clause(self, factory_class):
"""
Args:
factory_class: The factory for the target clause e.g. UpdateExpressionSetClauseParser
Returns:
"""
# noinspection PyProtectedMember
ast, self.token_pos = factory_class(
**self._initializer_args()
)._parse_with_pos()
self.target_nodes.append(ast)
logging.debug(
"Continue where previous parsing ended {token_pos}".format(
token_pos=self.token_pos
)
)
def _parse(self):
self._parse_target_clause(self._operand_factory_class())
while self._binop_factory_class().is_possible_start(self.get_next_token()):
self._parse_target_clause(self._binop_factory_class())
if self._operand_factory_class().is_possible_start(self.get_next_token()):
self._parse_target_clause(self._operand_factory_class())
else:
self.raise_unexpected_token()
return self._create_node()
@abstractmethod
def _operand_factory_class(self):
"""
Get the Parser class of the Operands for the Binary operations/actions.
Returns:
class:
"""
@abstractmethod
def _binop_factory_class(self):
"""
Get a factory that gets the possible binary operation.
Returns:
class: A class extending ExpressionParser
"""
def _create_node(self):
"""
target_clauses has the nodes in order of encountering. Go through them forward and build the tree bottom up.
For simplicity docstring will use Operand Node rather than the specific node
This way left-deep-descending traversal will process nodes in order.
Continuing the example of an UpdateExpressionValue:
For example value => a + :val - :val2
UpdateExpressionValue
/ | \
UpdateExpressionValue BinOp Operand
/ | | | |
UpdateExpressionValue BinOp Operand - :val2
/ | |
Operand + :val
|
a
self.target_nodes looks like: ( a >> + >> :val >> - >> :val2 )
Returns:
moto.dynamodb2.ast_nodes.Node: Node of an AST representing the Expression as produced by the factory.
"""
if len(self.target_nodes) == 1:
return UpdateExpressionValue(children=[self.target_nodes.popleft()])
else:
target_node = UpdateExpressionValue(
children=[
self.target_nodes.popleft(),
self.target_nodes.popleft(),
self.target_nodes.popleft(),
]
)
while len(self.target_nodes) >= 2:
target_node = UpdateExpressionValue(
children=[
target_node,
self.target_nodes.popleft(),
self.target_nodes.popleft(),
]
)
assert len(self.target_nodes) == 0
return target_node
class UpdateExpressionParser(ExpressionParser, NestableExpressionParserMixin):
"""
Parser to create update expressions
"""
@classmethod
def _sub_factories(cls):
return [
UpdateExpressionSetClauseParser,
UpdateExpressionAddClauseParser,
UpdateExpressionDeleteClauseParser,
UpdateExpressionRemoveClauseParser,
]
@classmethod
def _is_possible_start(cls, token):
pass
def __init__(self, *args, **kwargs):
super(UpdateExpressionParser, self).__init__(*args, **kwargs)
NestableExpressionParserMixin.__init__(self)
@classmethod
def _nestable_class(cls):
return UpdateExpression
def _parse_expression_clause(self, factory_class):
return self._parse_target_clause(factory_class)
def _parse_by_a_subfactory(self):
for sub_factory in self._sub_factories():
if sub_factory.is_possible_start(self.get_next_token()):
self._parse_expression_clause(sub_factory)
return True
return False
def _parse(self):
"""
Update Expression is the top-most node therefore it is expected to end up at the end of the expression.
"""
while True:
self.skip_white_space()
if self.is_at_end():
logging.debug("End reached")
break
elif self._parse_by_a_subfactory():
continue
else:
self.raise_unexpected_token()
return self._create_node()
@classmethod
def make(cls, expression_str):
token_list = ExpressionTokenizer.make_list(expression_str)
return cls(token_list).parse()
class UpdateExpressionSetClauseParser(ExpressionParser):
"""
UpdateExpressionSetClause => SET SetActions
"""
@classmethod
def _is_possible_start(cls, token):
return token.type == Token.ATTRIBUTE and token.value.upper() == "SET"
def _parse(self):
assert self.is_possible_start(self.get_next_token())
self.goto_next_significant_token()
ast, self.token_pos = UpdateExpressionSetActionsParser(
**self._initializer_args()
)._parse_with_pos()
# noinspection PyProtectedMember
return UpdateExpressionSetClause(children=[ast])
class UpdateExpressionActionsParser(ExpressionParser, NestableExpressionParserMixin):
"""
UpdateExpressionSetActions
"""
def __init__(self, *args, **kwargs):
super(UpdateExpressionActionsParser, self).__init__(*args, **kwargs)
NestableExpressionParserMixin.__init__(self)
@classmethod
def _is_possible_start(cls, token):
raise RuntimeError(
"{class_name} cannot be identified by the next token.".format(
class_name=cls._nestable_class().__name__
)
)
@classmethod
@abstractmethod
def _nestable_class(cls):
return UpdateExpressionSetActions
@classmethod
@abstractmethod
def _nested_expression_parser_class(cls):
"""Returns the parser for the query part that creates the nested nodes"""
def _parse(self):
"""
UpdateExpressionSetActions is inside the expression so it can be followed by others. Process SetActions one by
one until no more SetAction.
"""
self.skip_white_space()
while self._nested_expression_parser_class().is_possible_start(
self.get_next_token()
):
self._parse_target_clause(self._nested_expression_parser_class())
self.skip_white_space()
if self.get_next_token_type() == Token.COMMA:
self.goto_next_significant_token()
else:
break
if len(self.target_clauses) == 0:
logging.debug(
"Didn't encounter a single {nc} in {nepc}.".format(
nc=self._nestable_class().__name__,
nepc=self._nested_expression_parser_class().__name__,
)
)
self.raise_unexpected_token()
return self._create_node()
class UpdateExpressionSetActionsParser(UpdateExpressionActionsParser):
"""
UpdateExpressionSetActions
"""
@classmethod
def _nested_expression_parser_class(cls):
return UpdateExpressionSetActionParser
@classmethod
def _nestable_class(cls):
return UpdateExpressionSetActions
class UpdateExpressionSetActionParser(ExpressionParser):
"""
SetAction => Path = Value
So we create an UpdateExpressionSetAction Node that has 2 children. Left child Path and right child Value.
"""
@classmethod
def _is_possible_start(cls, token):
return UpdateExpressionPathParser.is_possible_start(token)
def _parse(self):
"""
UpdateExpressionSetActionParser only gets called when expecting a SetAction. So we should be aggressive on
raising invalid Tokens. We can thus do the following:
1) Process path
2) skip whitespace if there are any
3) Process equal-sign token
4) skip whitespace if there are any
3) Process value
"""
path, self.token_pos = UpdateExpressionPathParser(
**self._initializer_args()
)._parse_with_pos()
self.skip_white_space()
self.process_token_of_type(Token.EQUAL_SIGN)
self.skip_white_space()
value, self.token_pos = UpdateExpressionValueParser(
**self._initializer_args()
)._parse_with_pos()
return UpdateExpressionSetAction(children=[path, value])
class UpdateExpressionPathParser(ExpressionParser):
"""
Paths are selectors within items to specify a part within an Item. DynamoDB does not impose much restrictions on the
data it stores but it does store more strict restrictions on how they are represented in UpdateExpression's.
"""
def __init__(self, *args, **kwargs):
super(UpdateExpressionPathParser, self).__init__(*args, **kwargs)
self.path_nodes = []
@classmethod
def _is_possible_start(cls, token):
"""
Args:
token(Token): the token to be checked
Returns:
bool: Whether the token could be the start of an UpdateExpressionPath
"""
if token.type == Token.ATTRIBUTE_NAME:
return True
elif token.type == Token.ATTRIBUTE and token.value.upper() != "REMOVE":
"""We have to make sure remove is not passed"""
return True
return False
def _parse(self):
return self.process_path()
def process_path(self):
self.parse_path()
return UpdateExpressionPath(children=self.path_nodes)
def parse_path(self):
"""
A path is comprised of:
- Attribute: the name of an attribute as how it is stored which has no special characters
- ATTRIBUTE_NAME: A placeholder that has no special characters except leading # to refer to attributes that
have a name that is not allowed in an UpdateExpression)
- DOT's: These are used to decent in a nested structure. When a DOT is in a path expression it is never part
of an attribute name but always means to descent into a MAP. We will call each descend a patch
chain
- SELECTORs: E.g.: [1] These are used to select an element in ordered datatypes like a list.
Whitespaces can be between all these elements that build a path. For SELECTORs it is also allowed to have
whitespaces between brackets and numbers but the number cannot be split up with spaces
Attributes and attribute_names must be separated with DOT's.
Returns:
UpdateExpressionPath:
"""
self.parse_path_chain()
while self.is_next_token_start_of_patch_chain():
self.process_dot()
self.parse_path_chain()
def is_next_token_start_of_patch_chain(self):
return self.get_next_token_type() == Token.DOT
def process_dot(self):
self.path_nodes.append(ExpressionPathDescender())
self.goto_next_significant_token()
def parse_path_chain(self):
self.process_attribute_identifying_token()
self.skip_white_space()
while self.is_next_token_start_of_selector():
self.process_selector()
self.skip_white_space()
def process_attribute_identifying_token(self):
if self.get_next_token_type() == Token.ATTRIBUTE:
self.path_nodes.append(ExpressionAttribute(self.get_next_token_value()))
elif self.get_next_token_type() == Token.ATTRIBUTE_NAME:
self.path_nodes.append(ExpressionAttributeName(self.get_next_token_value()))
else:
self.raise_unexpected_token()
self.goto_next_significant_token()
def is_next_token_start_of_selector(self):
return self.get_next_token_type() == Token.OPEN_SQUARE_BRACKET
def process_selector(self):
"""
Process the selector is only called when a selector must be processed. So do the following actions:
- skip opening bracket
- skip optional spaces
- read numeric literal
- skip optional spaces
- pass closing bracket
"""
self.process_token_of_type(Token.OPEN_SQUARE_BRACKET)
selector_value = self.process_token_of_type(Token.NUMBER)
self.process_token_of_type(Token.CLOSE_SQUARE_BRACKET)
self.path_nodes.append(ExpressionSelector(selector_value))
class UpdateExpressionValueParser(NestableBinExpressionParser):
@classmethod
def _is_possible_start(cls, token):
return UpdateExpressionOperandParser.is_possible_start(token)
def _operand_factory_class(self):
return UpdateExpressionOperandParser
def _binop_factory_class(self):
return UpdateExpressionValueOperatorParser
class UpdateExpressionGroupedValueParser(ExpressionParser):
"""
A grouped value is an Update Expression value clause that is surrounded by round brackets. Each Operand can be
a grouped value by itself.
"""
def _parse(self):
self.process_token_of_type(Token.OPEN_ROUND_BRACKET)
value, self.token_pos = UpdateExpressionValueParser(
**self._initializer_args()
)._parse_with_pos()
self.process_token_of_type(Token.CLOSE_ROUND_BRACKET)
return UpdateExpressionGroupedValue(children=value)
@classmethod
def _is_possible_start(cls, token):
return token.type == Token.OPEN_ROUND_BRACKET
class UpdateExpressionValueOperatorParser(ExpressionParser):
OPERATION_TOKENS = [Token.PLUS_SIGN, Token.MINUS_SIGN]
@classmethod
def _is_possible_start(cls, token):
return token.type in cls.OPERATION_TOKENS
def _parse(self):
operation_value = self.get_next_token_value()
assert operation_value in self.OPERATION_TOKENS
self.goto_next_significant_token()
return ExpressionValueOperator(operation_value)
class UpdateExpressionOperandParser(ExpressionParser):
"""
Grammar
Operand* => AttributeValue
Operand* => UpdateExpressionFunction
Operand* => Path
Operand* => GroupedValue
"""
@classmethod
def _sub_factories(cls):
return [
UpdateExpressionAttributeValueParser,
UpdateExpressionFunctionParser,
UpdateExpressionPathParser,
UpdateExpressionGroupedValueParser,
]
@classmethod
def _is_possible_start(cls, token):
return any(parser.is_possible_start(token) for parser in cls._sub_factories())
def _parse(self):
for factory in self._sub_factories():
if factory.is_possible_start(self.get_next_token()):
node, self.token_pos = factory(
**self._initializer_args()
)._parse_with_pos()
return node
self.raise_unexpected_token()
class UpdateExpressionAttributeValueParser(ExpressionParser):
def _parse(self):
attr_value = ExpressionAttributeValue(
self.process_token_of_type(Token.ATTRIBUTE_VALUE)
)
return attr_value
@classmethod
def _is_possible_start(cls, token):
return token.type == Token.ATTRIBUTE_VALUE
class UpdateExpressionAttributeValueOrPathParser(ExpressionParser):
def _parse(self):
if UpdateExpressionAttributeValueParser.is_possible_start(
self.get_next_token()
):
token, self.token_pos = UpdateExpressionAttributeValueParser(
**self._initializer_args()
)._parse_with_pos()
else:
token, self.token_pos = UpdateExpressionPathParser(
**self._initializer_args()
)._parse_with_pos()
return token
@classmethod
def _is_possible_start(cls, token):
return any(
[
UpdateExpressionAttributeValueParser.is_possible_start(token),
UpdateExpressionPathParser.is_possible_start(token),
]
)
class UpdateExpressionFunctionParser(ExpressionParser):
"""
A helper to process a function of an Update Expression
"""
# Map function to the factories for its elements
FUNCTIONS = {
"if_not_exists": [
UpdateExpressionPathParser,
UpdateExpressionAttributeValueOrPathParser,
],
"list_append": [UpdateExpressionOperandParser, UpdateExpressionOperandParser],
}
@classmethod
def _is_possible_start(cls, token):
"""
Check whether a token is supposed to be a function
Args:
token(Token): the token to check
Returns:
bool: True if token is the start of a function.
"""
if token.type == Token.ATTRIBUTE:
return token.value in cls.FUNCTIONS.keys()
else:
return False
def _parse(self):
function_name = self.get_next_token_value()
if function_name not in self.FUNCTIONS.keys():
# Function names are case sensitive
raise InvalidUpdateExpression(function_name)
self.goto_next_significant_token()
self.process_token_of_type(Token.OPEN_ROUND_BRACKET)
function_elements = [function_name]
function_arguments = self.FUNCTIONS[function_name]
for i, func_elem_factory in enumerate(function_arguments):
func_elem, self.token_pos = func_elem_factory(
**self._initializer_args()
)._parse_with_pos()
function_elements.append(func_elem)
if i + 1 < len(function_arguments):
self.skip_white_space()
self.process_token_of_type(Token.COMMA)
self.process_token_of_type(Token.CLOSE_ROUND_BRACKET)
return UpdateExpressionFunction(children=function_elements)
class UpdateExpressionRemoveClauseParser(ExpressionParser):
"""
UpdateExpressionRemoveClause => REMOVE RemoveActions
"""
def _parse(self):
assert self.is_possible_start(self.get_next_token())
self.goto_next_significant_token()
ast, self.token_pos = UpdateExpressionRemoveActionsParser(
**self._initializer_args()
)._parse_with_pos()
# noinspection PyProtectedMember
return UpdateExpressionRemoveClause(children=[ast])
@classmethod
def _is_possible_start(cls, token):
"""REMOVE is not a keyword"""
return token.type == Token.ATTRIBUTE and token.value.upper() == "REMOVE"
class UpdateExpressionRemoveActionsParser(UpdateExpressionActionsParser):
"""
UpdateExpressionSetActions
"""
@classmethod
def _nested_expression_parser_class(cls):
return UpdateExpressionRemoveActionParser
@classmethod
def _nestable_class(cls):
return UpdateExpressionRemoveActions
class UpdateExpressionRemoveActionParser(ExpressionParser):
"""
RemoveAction => Path = Value
So we create an UpdateExpressionSetAction Node that has 2 children. Left child Path and right child Value.
"""
@classmethod
def _is_possible_start(cls, token):
return UpdateExpressionPathParser.is_possible_start(token)
def _parse(self):
"""
UpdateExpressionRemoveActionParser only gets called when expecting a RemoveAction. So we should be aggressive on
raising invalid Tokens. We can thus do the following:
1) Process path
2) skip whitespace if there are any
"""
path, self.token_pos = UpdateExpressionPathParser(
**self._initializer_args()
)._parse_with_pos()
self.skip_white_space()
return UpdateExpressionRemoveAction(children=[path])
class UpdateExpressionAddClauseParser(ExpressionParser):
def _parse(self):
assert self.is_possible_start(self.get_next_token())
self.goto_next_significant_token()
ast, self.token_pos = UpdateExpressionAddActionsParser(
**self._initializer_args()
)._parse_with_pos()
# noinspection PyProtectedMember
return UpdateExpressionAddClause(children=[ast])
@classmethod
def _is_possible_start(cls, token):
return token.type == Token.ATTRIBUTE and token.value.upper() == "ADD"
class UpdateExpressionAddActionsParser(UpdateExpressionActionsParser):
"""
UpdateExpressionSetActions
"""
@classmethod
def _nested_expression_parser_class(cls):
return UpdateExpressionAddActionParser
@classmethod
def _nestable_class(cls):
return UpdateExpressionAddActions
@six.add_metaclass(abc.ABCMeta)
class UpdateExpressionPathValueParser(ExpressionParser):
def _parse_path_and_value(self):
"""
UpdateExpressionAddActionParser only gets called when expecting an AddAction. So we should be aggressive on
raising invalid Tokens. We can thus do the following:
1) Process path
2) skip whitespace if there are any
3) Process a value
4) skip whitespace if there are any
Returns:
[path, value]: A list containing the Path node and the AttributeValue nodes
"""
path, self.token_pos = UpdateExpressionPathParser(
**self._initializer_args()
)._parse_with_pos()
self.skip_white_space()
value, self.token_pos = UpdateExpressionAttributeValueParser(
**self._initializer_args()
)._parse_with_pos()
self.skip_white_space()
return [path, value]
class UpdateExpressionAddActionParser(UpdateExpressionPathValueParser):
@classmethod
def _is_possible_start(cls, token):
return UpdateExpressionPathParser.is_possible_start(token)
def _parse(self):
return UpdateExpressionAddAction(children=self._parse_path_and_value())
class UpdateExpressionDeleteClauseParser(ExpressionParser):
def _parse(self):
assert self.is_possible_start(self.get_next_token())
self.goto_next_significant_token()
ast, self.token_pos = UpdateExpressionDeleteActionsParser(
**self._initializer_args()
)._parse_with_pos()
# noinspection PyProtectedMember
return UpdateExpressionDeleteClause(children=[ast])
@classmethod
def _is_possible_start(cls, token):
return token.type == Token.ATTRIBUTE and token.value.upper() == "DELETE"
class UpdateExpressionDeleteActionsParser(UpdateExpressionActionsParser):
"""
UpdateExpressionSetActions
"""
@classmethod
def _nested_expression_parser_class(cls):
return UpdateExpressionDeleteActionParser
@classmethod
def _nestable_class(cls):
return UpdateExpressionDeleteActions
class UpdateExpressionDeleteActionParser(UpdateExpressionPathValueParser):
@classmethod
def _is_possible_start(cls, token):
return UpdateExpressionPathParser.is_possible_start(token)
def _parse(self):
return UpdateExpressionDeleteAction(children=self._parse_path_and_value())
| 33.597502
| 120
| 0.651237
| 3,689
| 34,975
| 5.924912
| 0.123069
| 0.019399
| 0.026765
| 0.018301
| 0.521069
| 0.464657
| 0.386146
| 0.349408
| 0.314819
| 0.253237
| 0
| 0.002219
| 0.27857
| 34,975
| 1,040
| 121
| 33.629808
| 0.864022
| 0.264989
| 0
| 0.480836
| 0
| 0
| 0.01886
| 0.00088
| 0
| 0
| 0
| 0
| 0.013937
| 1
| 0.170732
| false
| 0.001742
| 0.013937
| 0.062718
| 0.38676
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c1e3b72b32866f599c7e926ceb63efd29d9c600
| 5,332
|
py
|
Python
|
dftbplus_step/tk_optimization.py
|
molssi-seamm/dftbplus_step
|
e5b9c7462d92c25fc6f27db5e4324b05bb42e224
|
[
"BSD-3-Clause"
] | 1
|
2022-01-24T05:14:03.000Z
|
2022-01-24T05:14:03.000Z
|
dftbplus_step/tk_optimization.py
|
molssi-seamm/dftbplus_step
|
e5b9c7462d92c25fc6f27db5e4324b05bb42e224
|
[
"BSD-3-Clause"
] | 10
|
2020-12-16T21:36:37.000Z
|
2022-03-17T01:53:54.000Z
|
dftbplus_step/tk_optimization.py
|
molssi-seamm/dftbplus_step
|
e5b9c7462d92c25fc6f27db5e4324b05bb42e224
|
[
"BSD-3-Clause"
] | 1
|
2022-01-14T15:26:49.000Z
|
2022-01-14T15:26:49.000Z
|
# -*- coding: utf-8 -*-
"""The graphical part of a DFTB+ Optimization node"""
import logging
import tkinter as tk
import tkinter.ttk as ttk
import dftbplus_step
logger = logging.getLogger(__name__)
class TkOptimization(dftbplus_step.TkEnergy):
def __init__(
self,
tk_flowchart=None,
node=None,
canvas=None,
x=120,
y=20,
w=200,
h=50,
my_logger=logger,
keyword_metadata=None,
):
"""Initialize the graphical Tk DFTB+ optimization step
Keyword arguments:
"""
self.results_widgets = []
super().__init__(
tk_flowchart=tk_flowchart,
node=node,
canvas=canvas,
x=x,
y=y,
w=w,
h=h,
my_logger=my_logger,
keyword_metadata=keyword_metadata,
)
def right_click(self, event):
"""Probably need to add our dialog..."""
super().right_click(event)
self.popup_menu.add_command(label="Edit..", command=self.edit)
self.popup_menu.tk_popup(event.x_root, event.y_root, 0)
def create_dialog(
self, title="Edit DFTB+ Optimization Step", calculation="optimization"
):
"""Create the dialog!"""
self.logger.debug("Creating the dialog")
super().create_dialog(title=title, calculation=calculation)
# Create all the widgets
P = self.node.parameters
# Frame to isolate widgets
opt_frame = self["optimization frame"] = ttk.LabelFrame(
self["frame"],
borderwidth=4,
relief="sunken",
text="Optimization Parameters",
labelanchor="n",
padding=10,
)
for key in dftbplus_step.OptimizationParameters.parameters:
self[key] = P[key].widget(opt_frame)
self.logger.debug("Finished creating the dialog")
def reset_dialog(self, widget=None):
super().reset_dialog()
row = 0
self["optimization frame"].grid(row=row, column=1, sticky=tk.EW)
row += 1
# And the widgets in our frame
self.reset_optimization_frame()
return row
def reset_optimization_frame(self):
"""Layout the optimization frame according to the current values.
SD CG gDIIS LBFGS FIRE
------------------ ------------------- ------------------- ------------------- --------
MovedAtoms MovedAtoms MovedAtoms MovedAtoms TimeStep
MaxForceComponent MaxForceComponent MaxForceComponent MaxForceComponent
MaxSteps MaxSteps MaxSteps MaxSteps
OutputPrefix OutputPrefix OutputPrefix OutputPrefix
AppendGeometries AppendGeometries AppendGeometries AppendGeometries
Constraints Constraints Constraints Constraints
LatticeOpt LatticeOpt LatticeOpt LatticeOpt
FixAngles FixAngles FixAngles FixAngles
FixLengths
Isotropic Isotropic Isotropic Isotropic
Pressure Pressure Pressure Pressure
MaxAtomStep MaxAtomStep MaxAtomStep
MaxLatticeStep MaxLatticeStep MaxLatticeStep MaxLatticeStep
ConvergentForcesOnly ConvergentForcesOnly ConvergentForcesOnly ConvergentForcesOnly
StepSize Alpha Memory
Generations LineSearch
""" # noqa: E501
frame = self["optimization frame"]
for slave in frame.grid_slaves():
slave.grid_forget()
method = self["optimization method"].get()
widgets = []
widgets1 = []
row = 0
w = self["optimization method"]
w.grid(row=row, column=0, columnspan=2, sticky=tk.EW)
widgets.append(w)
row += 1
if method == "Steepest descents":
w = self["StepSize"]
w.grid(row=row, column=1, sticky=tk.EW)
widgets1.append(w)
row += 1
elif "gDIIS" in method:
w = self["Alpha"]
w.grid(row=row, column=1, sticky=tk.EW)
widgets1.append(w)
row += 1
w = self["Generations"]
w.grid(row=row, column=1, sticky=tk.EW)
widgets1.append(w)
row += 1
elif "LBFGS" in method:
w = self["Memory"]
w.grid(row=row, column=1, sticky=tk.EW)
widgets1.append(w)
row += 1
w = self["LineSearch"]
w.grid(row=row, column=1, sticky=tk.EW)
widgets1.append(w)
row += 1
for widget in (
"MaxForceComponent",
"MaxSteps",
"MaxAtomStep",
"stop_if_scc_fails",
):
w = self[widget]
w.grid(row=row, column=0, columnspan=2, sticky=tk.EW)
widgets.append(w)
row += 1
return row
| 32.120482
| 100
| 0.515941
| 498
| 5,332
| 5.427711
| 0.293173
| 0.020718
| 0.029597
| 0.047355
| 0.145764
| 0.145764
| 0.145764
| 0.145764
| 0.135775
| 0.135775
| 0
| 0.01346
| 0.386909
| 5,332
| 165
| 101
| 32.315152
| 0.813399
| 0.317329
| 0
| 0.278846
| 0
| 0
| 0.098837
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.048077
| false
| 0
| 0.038462
| 0
| 0.115385
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c1fe311f29bf7609a66d633ca361b9c555f8538
| 3,512
|
py
|
Python
|
skywalking/client/grpc.py
|
cooolr/skywalking-python
|
42176ff4b732000f2a75eac1affee2a681379df7
|
[
"Apache-2.0"
] | null | null | null |
skywalking/client/grpc.py
|
cooolr/skywalking-python
|
42176ff4b732000f2a75eac1affee2a681379df7
|
[
"Apache-2.0"
] | null | null | null |
skywalking/client/grpc.py
|
cooolr/skywalking-python
|
42176ff4b732000f2a75eac1affee2a681379df7
|
[
"Apache-2.0"
] | null | null | null |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import grpc
from skywalking.protocol.common.Common_pb2 import KeyStringValuePair
from skywalking.protocol.language_agent.Tracing_pb2_grpc import TraceSegmentReportServiceStub
from skywalking.protocol.logging.Logging_pb2_grpc import LogReportServiceStub
from skywalking.protocol.management.Management_pb2 import InstancePingPkg, InstanceProperties
from skywalking.protocol.management.Management_pb2_grpc import ManagementServiceStub
from skywalking.protocol.profile.Profile_pb2 import ProfileTaskCommandQuery
from skywalking.protocol.profile.Profile_pb2_grpc import ProfileTaskStub
from skywalking import config
from skywalking.client import ServiceManagementClient, TraceSegmentReportService, ProfileTaskChannelService, \
LogDataReportService
from skywalking.command import command_service
from skywalking.loggings import logger
from skywalking.profile import profile_task_execution_service
class GrpcServiceManagementClient(ServiceManagementClient):
def __init__(self, channel: grpc.Channel):
self.service_stub = ManagementServiceStub(channel)
def send_instance_props(self):
self.service_stub.reportInstanceProperties(InstanceProperties(
service=config.service_name,
serviceInstance=config.service_instance,
properties=[KeyStringValuePair(key='language', value='Python')],
))
def send_heart_beat(self):
logger.debug(
'service heart beats, [%s], [%s]',
config.service_name,
config.service_instance,
)
self.service_stub.keepAlive(InstancePingPkg(
service=config.service_name,
serviceInstance=config.service_instance,
))
class GrpcTraceSegmentReportService(TraceSegmentReportService):
def __init__(self, channel: grpc.Channel):
self.report_stub = TraceSegmentReportServiceStub(channel)
def report(self, generator):
self.report_stub.collect(generator)
class GrpcLogDataReportService(LogDataReportService):
def __init__(self, channel: grpc.Channel):
self.report_stub = LogReportServiceStub(channel)
def report(self, generator):
self.report_stub.collect(generator)
class GrpcProfileTaskChannelService(ProfileTaskChannelService):
def __init__(self, channel: grpc.Channel):
self.task_stub = ProfileTaskStub(channel)
def do_query(self):
query = ProfileTaskCommandQuery(
service=config.service_name,
serviceInstance=config.service_instance,
lastCommandTime=profile_task_execution_service.get_last_command_create_time()
)
commands = self.task_stub.getProfileTaskCommands(query)
command_service.receive_command(commands)
| 39.909091
| 110
| 0.768508
| 379
| 3,512
| 6.955145
| 0.377309
| 0.063733
| 0.058422
| 0.027314
| 0.23824
| 0.23824
| 0.174507
| 0.149469
| 0.081184
| 0.048558
| 0
| 0.003759
| 0.166856
| 3,512
| 87
| 111
| 40.367816
| 0.897129
| 0.214123
| 0
| 0.296296
| 0
| 0
| 0.016405
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.240741
| 0
| 0.481481
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c228e2ac32c2ad15f711401f0894056b88a3776
| 1,388
|
py
|
Python
|
ng/distributions/Distribution.py
|
forons/noise-generator
|
033906165adaf6e620c03bf0b91f19b6d9890cf0
|
[
"MIT"
] | null | null | null |
ng/distributions/Distribution.py
|
forons/noise-generator
|
033906165adaf6e620c03bf0b91f19b6d9890cf0
|
[
"MIT"
] | null | null | null |
ng/distributions/Distribution.py
|
forons/noise-generator
|
033906165adaf6e620c03bf0b91f19b6d9890cf0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import logging
from enum import Enum
from .NormalDist import NormalDist
from .UniformDist import UniformDist
class Distribution(Enum):
UNIFORM = 0
GAUSSIAN = 1
POISSON = 2
@staticmethod
def determine_distribution(distribution, distribution_params):
distribution_upper = distribution.upper()
if not Distribution[distribution_upper] or Distribution[distribution_upper] is None:
raise IndexError('Distribution not supported `{}`. Try one of: {}'.format(
distribution, [(elem.value, elem.name) for elem in Distribution]))
if Distribution[distribution_upper] == Distribution.UNIFORM:
if not distribution_params:
distribution_params = 0.5
return UniformDist(rate=float(distribution_params))
if Distribution[distribution_upper] == Distribution.GAUSSIAN:
if not distribution_params:
distribution_params = [0., 1.]
return NormalDist(loc=float(distribution_params[0]),
scale=float(distribution_params[1]))
if Distribution[distribution_upper] is Distribution.POISSON:
pass
raise IndexError('Distribution not supported `{}`. Try one of: {}'.format(
distribution, [(elem.value, elem.name) for elem in Distribution]))
| 42.060606
| 92
| 0.659942
| 142
| 1,388
| 6.34507
| 0.352113
| 0.159822
| 0.160932
| 0.103219
| 0.417314
| 0.321865
| 0.321865
| 0.228635
| 0.228635
| 0.228635
| 0
| 0.010557
| 0.24928
| 1,388
| 32
| 93
| 43.375
| 0.854127
| 0.03098
| 0
| 0.222222
| 0
| 0
| 0.069993
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037037
| false
| 0.037037
| 0.148148
| 0
| 0.407407
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c22a7a412610e81fee1ef9b39c31356e4fa70c7
| 258
|
py
|
Python
|
test/rename.py
|
Riteme/test
|
b511d6616a25f4ae8c3861e2029789b8ee4dcb8d
|
[
"BSD-Source-Code"
] | 3
|
2018-08-30T09:43:20.000Z
|
2019-12-03T04:53:43.000Z
|
test/rename.py
|
Riteme/test
|
b511d6616a25f4ae8c3861e2029789b8ee4dcb8d
|
[
"BSD-Source-Code"
] | null | null | null |
test/rename.py
|
Riteme/test
|
b511d6616a25f4ae8c3861e2029789b8ee4dcb8d
|
[
"BSD-Source-Code"
] | null | null | null |
import os
import sys
filename = sys.argv[1]
from_id = int(sys.argv[2])
to_id = int(sys.argv[2])
for i in range(from_id, to_id + 1):
sys.system("mv {0}.in{1} {0}{1}.in".format(filename, i))
sys.system("mv {0}.out{1} {0}{1}.out".format(filename, i))
| 23.454545
| 62
| 0.624031
| 53
| 258
| 2.962264
| 0.377358
| 0.133758
| 0.101911
| 0.152866
| 0.165605
| 0
| 0
| 0
| 0
| 0
| 0
| 0.054545
| 0.147287
| 258
| 10
| 63
| 25.8
| 0.659091
| 0
| 0
| 0
| 0
| 0
| 0.178295
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c23f93517014abc612473feea3755466fd55cec
| 683
|
py
|
Python
|
dash_docs/chapters/dash_core_components/Textarea/examples/textarea_basic.py
|
kozo2/dash-docs
|
5140cfd1fda439233e8b95e2443332a32a2453f5
|
[
"MIT"
] | 1
|
2021-04-11T03:08:43.000Z
|
2021-04-11T03:08:43.000Z
|
dash_docs/chapters/dash_core_components/Textarea/examples/textarea_basic.py
|
kozo2/dash-docs
|
5140cfd1fda439233e8b95e2443332a32a2453f5
|
[
"MIT"
] | null | null | null |
dash_docs/chapters/dash_core_components/Textarea/examples/textarea_basic.py
|
kozo2/dash-docs
|
5140cfd1fda439233e8b95e2443332a32a2453f5
|
[
"MIT"
] | null | null | null |
import dash
from dash.dependencies import Input, Output
import dash_html_components as html
import dash_core_components as dcc
app = dash.Dash(__name__)
app.layout = html.Div([
dcc.Textarea(
id='textarea-example',
value='Textarea content initialized\nwith multiple lines of text',
style={'width': '100%', 'height': 300},
),
html.Div(id='textarea-example-output', style={'whiteSpace': 'pre-line'})
])
@app.callback(
Output('textarea-example-output', 'children'),
[Input('textarea-example', 'value')]
)
def update_output(value):
return 'You have entered: \n{}'.format(value)
if __name__ == '__main__':
app.run_server(debug=True)
| 26.269231
| 76
| 0.682284
| 87
| 683
| 5.149425
| 0.574713
| 0.133929
| 0.075893
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010526
| 0.165447
| 683
| 25
| 77
| 27.32
| 0.775439
| 0
| 0
| 0
| 0
| 0
| 0.308931
| 0.06735
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.190476
| 0.047619
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c244af15987164d1a6b58af8468dc053923ce6d
| 470
|
py
|
Python
|
eth/vm/forks/petersburg/blocks.py
|
ggs134/py-evm
|
5ad87356181b03c14a2452131f50fe8762127c84
|
[
"MIT"
] | 1,641
|
2017-11-24T04:24:22.000Z
|
2022-03-31T14:59:30.000Z
|
eth/vm/forks/petersburg/blocks.py
|
ggs134/py-evm
|
5ad87356181b03c14a2452131f50fe8762127c84
|
[
"MIT"
] | 1,347
|
2017-11-23T10:37:36.000Z
|
2022-03-20T16:31:44.000Z
|
eth/vm/forks/petersburg/blocks.py
|
ggs134/py-evm
|
5ad87356181b03c14a2452131f50fe8762127c84
|
[
"MIT"
] | 567
|
2017-11-22T18:03:27.000Z
|
2022-03-28T17:49:08.000Z
|
from rlp.sedes import (
CountableList,
)
from eth.rlp.headers import (
BlockHeader,
)
from eth.vm.forks.byzantium.blocks import (
ByzantiumBlock,
)
from .transactions import (
PetersburgTransaction,
)
class PetersburgBlock(ByzantiumBlock):
transaction_builder = PetersburgTransaction
fields = [
('header', BlockHeader),
('transactions', CountableList(transaction_builder)),
('uncles', CountableList(BlockHeader))
]
| 20.434783
| 61
| 0.697872
| 39
| 470
| 8.358974
| 0.564103
| 0.042945
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 470
| 22
| 62
| 21.363636
| 0.867021
| 0
| 0
| 0
| 0
| 0
| 0.051064
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.210526
| 0
| 0.368421
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c249535bfee369b506769f07912c622ac79fe51
| 5,107
|
py
|
Python
|
tests/runner.py
|
crnbaker/MONAI
|
a4b1144efdc27b197410033ae08bd587c8a1634a
|
[
"Apache-2.0"
] | 1
|
2020-12-03T21:28:09.000Z
|
2020-12-03T21:28:09.000Z
|
tests/runner.py
|
crnbaker/MONAI
|
a4b1144efdc27b197410033ae08bd587c8a1634a
|
[
"Apache-2.0"
] | null | null | null |
tests/runner.py
|
crnbaker/MONAI
|
a4b1144efdc27b197410033ae08bd587c8a1634a
|
[
"Apache-2.0"
] | 1
|
2020-06-11T13:03:02.000Z
|
2020-06-11T13:03:02.000Z
|
# Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import inspect
import os
import sys
import time
import unittest
from monai.utils import PerfContext
results: dict = dict()
class TimeLoggingTestResult(unittest.TextTestResult):
"""Overload the default results so that we can store the results."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.timed_tests = dict()
def startTest(self, test): # noqa: N802
"""Start timer, print test name, do normal test."""
self.start_time = time.time()
name = self.getDescription(test)
self.stream.write(f"Starting test: {name}...\n")
super().startTest(test)
def stopTest(self, test): # noqa: N802
"""On test end, get time, print, store and do normal behaviour."""
elapsed = time.time() - self.start_time
name = self.getDescription(test)
self.stream.write(f"Finished test: {name} ({elapsed:.03}s)\n")
if name in results:
raise AssertionError("expected all keys to be unique")
results[name] = elapsed
super().stopTest(test)
def print_results(results, discovery_time, thresh, status):
# only keep results >= threshold
results = dict(filter(lambda x: x[1] > thresh, results.items()))
if len(results) == 0:
return
print(f"\n\n{status}, printing completed times >{thresh}s in ascending order...\n")
timings = dict(sorted(results.items(), key=lambda item: item[1]))
for r in timings:
if timings[r] >= thresh:
print(f"{r} ({timings[r]:.03}s)")
print(f"test discovery time: {discovery_time:.03}s")
print(f"total testing time: {sum(results.values()):.03}s")
print("Remember to check above times for any errors!")
def parse_args(default_pattern):
parser = argparse.ArgumentParser(description="Runner for MONAI unittests with timing.")
parser.add_argument(
"-s", action="store", dest="path", default=".", help="Directory to start discovery (default: '%(default)s')"
)
parser.add_argument(
"-p",
action="store",
dest="pattern",
default=default_pattern,
help="Pattern to match tests (default: '%(default)s')",
)
parser.add_argument(
"-t",
"--thresh",
dest="thresh",
default=10.0,
type=float,
help="Display tests longer than given threshold (default: %(default)d)",
)
parser.add_argument(
"-v",
"--verbosity",
action="store",
dest="verbosity",
type=int,
default=1,
help="Verbosity level (default: %(default)d)",
)
parser.add_argument("-q", "--quick", action="store_true", dest="quick", default=False, help="Only do quick tests")
parser.add_argument(
"-f", "--failfast", action="store_true", dest="failfast", default=False, help="Stop testing on first failure"
)
args = parser.parse_args()
print(f"Running tests in folder: '{args.path}'")
if args.pattern:
print(f"With file pattern: '{args.pattern}'")
return args
def get_default_pattern(loader):
signature = inspect.signature(loader.discover)
params = {k: v.default for k, v in signature.parameters.items() if v.default is not inspect.Parameter.empty}
return params["pattern"]
if __name__ == "__main__":
loader = unittest.TestLoader()
default_pattern = get_default_pattern(loader)
# Parse input arguments
args = parse_args(default_pattern)
# If quick is desired, set environment variable
if args.quick:
os.environ["QUICKTEST"] = "True"
# Get all test names (optionally from some path with some pattern)
with PerfContext() as pc:
tests = loader.discover(args.path, args.pattern)
discovery_time = pc.total_time
print(f"time to discover tests: {discovery_time}s")
test_runner = unittest.runner.TextTestRunner(
resultclass=TimeLoggingTestResult, verbosity=args.verbosity, failfast=args.failfast
)
# Use try catches to print the current results if encountering exception or keyboard interruption
try:
test_result = test_runner.run(tests)
print_results(results, discovery_time, args.thresh, "tests finished")
sys.exit(not test_result.wasSuccessful())
except KeyboardInterrupt:
print_results(results, discovery_time, args.thresh, "tests cancelled")
sys.exit(1)
except Exception:
print_results(results, discovery_time, args.thresh, "exception reached")
raise
| 35.22069
| 118
| 0.662033
| 650
| 5,107
| 5.118462
| 0.36
| 0.031259
| 0.030658
| 0.033664
| 0.116622
| 0.107003
| 0.06853
| 0.055906
| 0.027653
| 0
| 0
| 0.007513
| 0.218132
| 5,107
| 144
| 119
| 35.465278
| 0.825695
| 0.196985
| 0
| 0.088235
| 0
| 0
| 0.22782
| 0.012042
| 0
| 0
| 0
| 0
| 0.009804
| 1
| 0.058824
| false
| 0
| 0.068627
| 0
| 0.166667
| 0.117647
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c25af4aec5e8d2b72efcbe5e7b1a661e7cc9946
| 963
|
py
|
Python
|
venv/Lib/site-packages/pandas/core/array_algos/transforms.py
|
arnoyu-hub/COMP0016miemie
|
59af664dcf190eab4f93cefb8471908717415fea
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/pandas/core/array_algos/transforms.py
|
arnoyu-hub/COMP0016miemie
|
59af664dcf190eab4f93cefb8471908717415fea
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/pandas/core/array_algos/transforms.py
|
arnoyu-hub/COMP0016miemie
|
59af664dcf190eab4f93cefb8471908717415fea
|
[
"MIT"
] | null | null | null |
"""
transforms.py is for shape-preserving functions.
"""
import numpy as np
def shift(values: np.ndarray, periods: int, axis: int, fill_value) -> np.ndarray:
new_values = values
if periods == 0 or values.size == 0:
return new_values.copy()
# make sure array sent to np.roll is c_contiguous
f_ordered = values.flags.f_contiguous
if f_ordered:
new_values = new_values.T
axis = new_values.ndim - axis - 1
if new_values.size:
new_values = np.roll(
new_values,
np.intp(periods),
axis=axis,
)
axis_indexer = [slice(None)] * values.ndim
if periods > 0:
axis_indexer[axis] = slice(None, periods)
else:
axis_indexer[axis] = slice(periods, None)
new_values[tuple(axis_indexer)] = fill_value
# restore original order
if f_ordered:
new_values = new_values.T
return new_values
| 24.692308
| 82
| 0.599169
| 126
| 963
| 4.396825
| 0.396825
| 0.194946
| 0.036101
| 0.046931
| 0.104693
| 0.104693
| 0.104693
| 0.104693
| 0
| 0
| 0
| 0.006015
| 0.30945
| 963
| 38
| 83
| 25.342105
| 0.827068
| 0.124611
| 0
| 0.166667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.041667
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c26b67f1983ed6d013acb44413f671a2be21260
| 7,534
|
py
|
Python
|
splunk_sdk/action/v1beta2/gen_action_service_api.py
|
ianlee4/splunk-cloud-sdk-python
|
d2870cd1e506d3844869d17becdcdf9d8d60a9a1
|
[
"ECL-2.0",
"Apache-2.0"
] | 12
|
2019-08-01T06:16:17.000Z
|
2021-04-16T20:00:02.000Z
|
splunk_sdk/action/v1beta2/gen_action_service_api.py
|
ianlee4/splunk-cloud-sdk-python
|
d2870cd1e506d3844869d17becdcdf9d8d60a9a1
|
[
"ECL-2.0",
"Apache-2.0"
] | 5
|
2020-09-27T12:03:24.000Z
|
2021-08-06T18:01:32.000Z
|
splunk_sdk/action/v1beta2/gen_action_service_api.py
|
ianlee4/splunk-cloud-sdk-python
|
d2870cd1e506d3844869d17becdcdf9d8d60a9a1
|
[
"ECL-2.0",
"Apache-2.0"
] | 4
|
2019-08-20T17:49:27.000Z
|
2022-03-27T16:39:10.000Z
|
# coding: utf-8
# Copyright © 2021 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# [http://www.apache.org/licenses/LICENSE-2.0]
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
############# This file is auto-generated. Do not edit! #############
"""
SDC Service: Action Service
With the Action service in Splunk Cloud Services, you can receive incoming trigger events and use pre-defined action templates to turn these events into meaningful actions.
OpenAPI spec version: v1beta2.12 (recommended default)
Generated by: https://openapi-generator.tech
"""
from requests import Response
from string import Template
from typing import List, Dict
from splunk_sdk.base_client import handle_response
from splunk_sdk.base_service import BaseService
from splunk_sdk.common.sscmodel import SSCModel, SSCVoidModel
from splunk_sdk.action.v1beta2.gen_models import Action
from splunk_sdk.action.v1beta2.gen_models import ActionMutable
from splunk_sdk.action.v1beta2.gen_models import ActionResult
from splunk_sdk.action.v1beta2.gen_models import ActionResultEmailDetail
from splunk_sdk.action.v1beta2.gen_models import PublicWebhookKey
from splunk_sdk.action.v1beta2.gen_models import ServiceError
from splunk_sdk.action.v1beta2.gen_models import TriggerEvent
class ActionService(BaseService):
"""
Action Service
Version: v1beta2.12
With the Action service in Splunk Cloud Services, you can receive incoming trigger events and use pre-defined action templates to turn these events into meaningful actions.
"""
def __init__(self, base_client):
super().__init__(base_client)
def create_action(self, action: Action, query_params: Dict[str, object] = None) -> Action:
"""
Creates an action template.
"""
if query_params is None:
query_params = {}
path_params = {
}
path = Template("/action/v1beta2/actions").substitute(path_params)
url = self.base_client.build_url(path)
data = action.to_dict()
response = self.base_client.post(url, json=data, params=query_params)
return handle_response(response, Action)
def delete_action(self, action_name: str, query_params: Dict[str, object] = None) -> SSCVoidModel:
"""
Removes an action template.
"""
if query_params is None:
query_params = {}
path_params = {
"action_name": action_name,
}
path = Template("/action/v1beta2/actions/${action_name}").substitute(path_params)
url = self.base_client.build_url(path)
response = self.base_client.delete(url, params=query_params)
return handle_response(response, )
def get_action(self, action_name: str, query_params: Dict[str, object] = None) -> Action:
"""
Returns a specific action template.
"""
if query_params is None:
query_params = {}
path_params = {
"action_name": action_name,
}
path = Template("/action/v1beta2/actions/${action_name}").substitute(path_params)
url = self.base_client.build_url(path)
response = self.base_client.get(url, params=query_params)
return handle_response(response, Action)
def get_action_status(self, action_name: str, status_id: str, query_params: Dict[str, object] = None) -> ActionResult:
"""
Returns the status of an action that was invoked. The status is available for 4 days after the last status change.
"""
if query_params is None:
query_params = {}
path_params = {
"action_name": action_name,
"status_id": status_id,
}
path = Template("/action/v1beta2/actions/${action_name}/status/${status_id}").substitute(path_params)
url = self.base_client.build_url(path)
response = self.base_client.get(url, params=query_params)
return handle_response(response, ActionResult)
def get_action_status_details(self, action_name: str, status_id: str, query_params: Dict[str, object] = None) -> List[ActionResultEmailDetail]:
"""
Returns the status details of the invoked email action. The status is available for 4 days after the last status change.
"""
if query_params is None:
query_params = {}
path_params = {
"action_name": action_name,
"status_id": status_id,
}
path = Template("/action/v1beta2/actions/${action_name}/status/${status_id}/details").substitute(path_params)
url = self.base_client.build_url(path)
response = self.base_client.get(url, params=query_params)
return handle_response(response, ActionResultEmailDetail)
def get_public_webhook_keys(self, query_params: Dict[str, object] = None) -> List[PublicWebhookKey]:
"""
Returns an array of one or two webhook keys. The first key is active. The second key, if present, is expired.
"""
if query_params is None:
query_params = {}
path_params = {
}
path = Template("/system/action/v1beta2/webhook/keys").substitute(path_params)
url = self.base_client.build_url(path)
response = self.base_client.get(url, params=query_params)
return handle_response(response, PublicWebhookKey)
def list_actions(self, query_params: Dict[str, object] = None) -> List[Action]:
"""
Returns the list of action templates.
"""
if query_params is None:
query_params = {}
path_params = {
}
path = Template("/action/v1beta2/actions").substitute(path_params)
url = self.base_client.build_url(path)
response = self.base_client.get(url, params=query_params)
return handle_response(response, Action)
def trigger_action(self, action_name: str, trigger_event: TriggerEvent, query_params: Dict[str, object] = None) -> SSCVoidModel:
"""
Invokes an action.
"""
if query_params is None:
query_params = {}
path_params = {
"action_name": action_name,
}
path = Template("/action/v1beta2/actions/${action_name}").substitute(path_params)
url = self.base_client.build_url(path)
data = trigger_event.to_dict()
response = self.base_client.post(url, json=data, params=query_params)
return handle_response(response, )
def update_action(self, action_name: str, action_mutable: ActionMutable, query_params: Dict[str, object] = None) -> Action:
"""
Modifies an action template.
"""
if query_params is None:
query_params = {}
path_params = {
"action_name": action_name,
}
path = Template("/action/v1beta2/actions/${action_name}").substitute(path_params)
url = self.base_client.build_url(path)
data = action_mutable.to_dict()
response = self.base_client.patch(url, json=data, params=query_params)
return handle_response(response, Action)
| 37.1133
| 177
| 0.668835
| 930
| 7,534
| 5.233333
| 0.195699
| 0.081364
| 0.054654
| 0.033285
| 0.671872
| 0.66242
| 0.656667
| 0.629957
| 0.5451
| 0.544483
| 0
| 0.00885
| 0.235068
| 7,534
| 202
| 178
| 37.29703
| 0.835502
| 0.218211
| 0
| 0.583333
| 0
| 0
| 0.078834
| 0.063818
| 0
| 0
| 0
| 0
| 0
| 1
| 0.092593
| false
| 0
| 0.12037
| 0
| 0.305556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c2a0b02facefb7ade979ad8ea41989718dd6e87
| 13,974
|
py
|
Python
|
frog/views/gallery.py
|
dreamhaven/Frog
|
66e50610d5059aa371e0a50b65ceddd4813b2bc1
|
[
"MIT"
] | 3
|
2021-10-03T23:11:24.000Z
|
2021-10-04T12:14:56.000Z
|
frog/views/gallery.py
|
dreamhaven/Frog
|
66e50610d5059aa371e0a50b65ceddd4813b2bc1
|
[
"MIT"
] | 7
|
2019-10-15T20:51:36.000Z
|
2020-02-27T18:25:26.000Z
|
frog/views/gallery.py
|
dreamhaven/Frog
|
66e50610d5059aa371e0a50b65ceddd4813b2bc1
|
[
"MIT"
] | 1
|
2020-09-30T11:23:55.000Z
|
2020-09-30T11:23:55.000Z
|
##################################################################################################
# Copyright (c) 2012 Brett Dixon
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
##################################################################################################
"""
Gallery API
::
GET / Lists the galleries currently visible by the current user
POST / Creates a gallery object
GET /id Gallery object if visible by the current user
PUT /id Adds image or video objects to the gallery
DELETE /id Removes image or video objects from the gallery
GET /filter Returns a filtered list of image and video objects
"""
import time
import functools
import logging
import requests
from django.core.mail import mail_managers
from django.http import JsonResponse
from django.core.exceptions import ImproperlyConfigured, PermissionDenied
from django.db.models import Q, Count
from django.db import connection
from django.db.utils import ProgrammingError
from django.template.loader import render_to_string
from django.views.decorators.http import require_POST
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.decorators import login_required
from django.conf import settings
import six
import json
try:
from haystack.query import SearchQuerySet
HAYSTACK = True
except (ImportError, ImproperlyConfigured):
HAYSTACK = False
from frog.models import (
Gallery,
Image,
Video,
Group,
GallerySubscription,
SiteConfig,
Piece,
)
from frog.common import Result, getObjectsFromGuids, getClientIP
LOGGER = logging.getLogger("frog")
try:
QUERY_MODELS = [
_
for _ in ContentType.objects.filter(app_label="frog")
if issubclass(_.model_class(), Piece)
]
except ProgrammingError:
pass
BATCH_LENGTH = 75
def index(request, obj_id=None):
"""Handles a request based on method and calls the appropriate function"""
if request.method == "GET":
return get(request, obj_id)
elif request.method == "POST":
return post(request)
elif request.method == "PUT":
return put(request, obj_id)
elif request.method == "DELETE":
return delete(request, obj_id)
def get(request, obj_id=None):
if obj_id:
obj = Gallery.objects.get(pk=obj_id)
if obj.security != Gallery.PUBLIC and request.user.is_anonymous:
raise PermissionDenied
else:
res = Result()
personal = []
clearance = Gallery.PUBLIC
if request.user.is_authenticated:
personal = Gallery.objects.filter(
security=Gallery.PERSONAL, owner=request.user
)
try:
clearance = request.user.frog_prefs.first().clearance
except AttributeError:
clearance = Gallery.PUBLIC
# Staff members should see everything
if request.user.is_staff:
clearance = Gallery.GUARDED
objects = Gallery.objects.filter(security__lte=clearance)
ids = []
for gallery in objects:
if gallery.security == Gallery.PERSONAL:
continue
if gallery.id in ids:
continue
ids.append(gallery.id)
res.append(gallery.json())
for gallery in personal:
res.append(gallery.json())
return JsonResponse(res.asDict())
@login_required
def post(request):
""" Create a Gallery """
defaultname = "New Gallery %i" % Gallery.objects.all().count()
data = json.loads(request.body)["body"]
title = data.get("title", defaultname)
description = data.get("description", "")
security = int(
data.get("security", request.user.frog_prefs.first().clearance)
)
g, created = Gallery.objects.get_or_create(title=title)
g.security = security
g.description = description
g.owner = request.user
g.save()
res = Result()
res.append(g.json())
res.message = "Gallery created" if created else ""
return JsonResponse(res.asDict())
@login_required
def put(request, obj_id=None):
""" Adds Image and Video objects to Gallery based on GUIDs """
data = json.loads(request.body)["body"]
guids = data.get("guids", "").split(",")
move = data.get("from")
security = data.get("security")
gallery = Gallery.objects.get(pk=obj_id)
# Set the security first so subsequent securityChecks will get the correct security level
if security is not None:
gallery.security = json.loads(security)
gallery.save()
for child in gallery.gallery_set.all():
child.security = gallery.security
child.save()
if guids:
items = getObjectsFromGuids(guids)
gallery.addItems(items)
if move:
fromgallery = Gallery.objects.get(pk=move)
fromgallery.removeItems(items)
res = Result()
res.append(gallery.json())
return JsonResponse(res.asDict())
@login_required
def delete(request, obj_id=None):
""" Removes ImageVideo objects from Gallery """
data = json.loads(request.body)
guids = data.get("guids").split(",")
items = getObjectsFromGuids(guids)
gallery = Gallery.objects.get(pk=obj_id)
LOGGER.info(
"{} removed {} from {}".format(request.user.email, guids, gallery)
)
gallery.removeItems(items)
res = Result()
return JsonResponse(res.asDict())
@login_required
def filterObjects(request, obj_id):
"""
Filters Gallery for the requested ImageVideo objects. Returns a Result object with
serialized objects
"""
if int(obj_id) == 0:
obj = None
else:
obj = Gallery.objects.get(pk=obj_id)
isanonymous = request.user.is_anonymous
if isanonymous and obj is None:
LOGGER.warning(
"There was an anonymous access attempt from {} to {}".format(
getClientIP(request), obj
)
)
raise PermissionDenied()
if isanonymous and obj and obj.security != Gallery.PUBLIC:
LOGGER.warning(
"There was an anonymous access attempt from {} to {}".format(
getClientIP(request), obj
)
)
raise PermissionDenied()
if obj and obj.security != Gallery.PERSONAL:
if request.user.frog_prefs.first().clearance < obj.security:
raise PermissionDenied()
tags = json.loads(request.GET.get("filters", "[[]]"))
more = json.loads(request.GET.get("more", "false"))
orderby = request.GET.get(
"orderby", request.user.frog_prefs.get().json()["orderby"]
)
tags = [t for t in tags if t]
return _filter(request, obj, tags=tags, more=more, orderby=orderby)
def _filter(request, object_, tags=None, more=False, orderby="created"):
"""Filters Piece objects from self based on filters, search, and range
:param tags: List of tag IDs to filter
:type tags: list
:param more -- bool, Returns more of the same filtered set of images based on session range
return list, Objects filtered
"""
res = Result()
idDict = {}
objDict = {}
data = {}
modelmap = {}
# Get all IDs for each model
for m in QUERY_MODELS:
modelmap[m.model_class()] = m.model
if object_:
idDict[m.model] = m.model_class().objects.filter(gallery=object_)
else:
idDict[m.model] = m.model_class().objects.all()
if idDict[m.model] is None:
continue
if tags:
for bucket in tags:
searchQuery = ""
o = None
for item in bucket:
if item == 0:
# filter by tagless
idDict[m.model].annotate(num_tags=Count("tags"))
if not o:
o = Q()
o |= Q(num_tags__lte=1)
break
elif isinstance(item, six.integer_types):
# filter by tag
if not o:
o = Q()
o |= Q(tags__id=item)
else:
# add to search string
searchQuery += item + " "
if not HAYSTACK:
if not o:
o = Q()
# use a basic search
o |= Q(title__icontains=item)
if HAYSTACK and searchQuery != "":
# once all tags have been filtered, filter by search
searchIDs = search(searchQuery, m.model_class())
if searchIDs:
if not o:
o = Q()
o |= Q(id__in=searchIDs)
if o:
# apply the filters
idDict[m.model] = (
idDict[m.model]
.annotate(num_tags=Count("tags"))
.filter(o)
)
else:
idDict[m.model] = idDict[m.model].none()
# Remove hidden items before slicing so we get an accurate count
idDict[m.model] = idDict[m.model].exclude(hidden=True)
# Remove deleted items before slicing so we get an accurate count
idDict[m.model] = idDict[m.model].exclude(deleted=True)
# Get all ids of filtered objects, this will be a very fast query
idDict[m.model] = list(
idDict[m.model]
.order_by("-{}".format(orderby))
.values_list("id", flat=True)
)
lastid = request.session.get("last_{}".format(m.model), 0)
if not idDict[m.model]:
continue
if not more:
lastid = idDict[m.model][0]
try:
index = idDict[m.model].index(lastid)
except ValueError:
index = 0
if more and lastid != 0:
index += 1
idDict[m.model] = idDict[m.model][index : index + BATCH_LENGTH]
# perform the main query to retrieve the objects we want
objDict[m.model] = m.model_class().objects.filter(
id__in=idDict[m.model]
)
objDict[m.model] = (
objDict[m.model]
.select_related("author")
.prefetch_related("tags")
.order_by("-{}".format(orderby))
)
objDict[m.model] = list(objDict[m.model])
# combine and sort all objects by date
objects = _sortObjects(orderby, **objDict)
objects = objects[:BATCH_LENGTH]
# Find out last ids
lastids = {}
for obj in objects:
lastids["last_{}".format(modelmap[obj.__class__])] = obj.id
for key, value in lastids.items():
request.session[key] = value
# serialize objects
for i in objects:
res.append(i.json())
data["count"] = len(objects)
if settings.DEBUG:
data["queries"] = connection.queries
res.value = data
return JsonResponse(res.asDict())
def _sortObjects(orderby="created", **kwargs):
"""Sorts lists of objects and combines them into a single list"""
o = []
for m in kwargs.values():
for l in iter(m):
o.append(l)
o = list(set(o))
sortfunc = _sortByCreated if orderby == "created" else _sortByModified
if six.PY2:
o.sort(sortfunc)
else:
o.sort(key=functools.cmp_to_key(sortfunc))
return o
def _sortByCreated(a, b):
"""Sort function for object by created date"""
if a.created < b.created:
return 1
elif a.created > b.created:
return -1
else:
return 0
def _sortByModified(a, b):
"""Sort function for object by modified date"""
if a.modified < b.modified:
return 1
elif a.modified > b.modified:
return -1
else:
return 0
def search(query, model):
""" Performs a search query and returns the object ids """
query = query.strip()
LOGGER.debug(query)
sqs = SearchQuerySet()
results = sqs.raw_search("{}*".format(query)).models(model)
if not results:
results = sqs.raw_search("*{}".format(query)).models(model)
if not results:
results = sqs.raw_search("*{}*".format(query)).models(model)
return [o.pk for o in results]
@require_POST
@login_required
def subscribe(request, obj_id):
gallery = Gallery.objects.get(pk=obj_id)
data = json.loads(request.body)["body"]
frequency = data.get("frequency", GallerySubscription.WEEKLY)
sub, created = GallerySubscription.objects.get_or_create(
gallery=gallery, user=request.user, frequency=frequency
)
if not created:
# it already existed so delete it
sub.delete()
return JsonResponse(Result().asDict())
| 30.312364
| 98
| 0.592672
| 1,640
| 13,974
| 4.990244
| 0.22622
| 0.02346
| 0.029326
| 0.01393
| 0.21652
| 0.192326
| 0.134164
| 0.084189
| 0.075391
| 0.075391
| 0
| 0.002138
| 0.297123
| 13,974
| 460
| 99
| 30.378261
| 0.831093
| 0.203163
| 0
| 0.245902
| 0
| 0
| 0.033586
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039344
| false
| 0.003279
| 0.068852
| 0
| 0.170492
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c2be9c37717776782c0be6604333fcf9bf8eb67
| 2,232
|
py
|
Python
|
pirates/speedchat/PSpeedChatQuestMenu.py
|
itsyaboyrocket/pirates
|
6ca1e7d571c670b0d976f65e608235707b5737e3
|
[
"BSD-3-Clause"
] | 3
|
2021-02-25T06:38:13.000Z
|
2022-03-22T07:00:15.000Z
|
pirates/speedchat/PSpeedChatQuestMenu.py
|
itsyaboyrocket/pirates
|
6ca1e7d571c670b0d976f65e608235707b5737e3
|
[
"BSD-3-Clause"
] | null | null | null |
pirates/speedchat/PSpeedChatQuestMenu.py
|
itsyaboyrocket/pirates
|
6ca1e7d571c670b0d976f65e608235707b5737e3
|
[
"BSD-3-Clause"
] | 1
|
2021-02-25T06:38:17.000Z
|
2021-02-25T06:38:17.000Z
|
# uncompyle6 version 3.2.0
# Python bytecode 2.4 (62061)
# Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)]
# Embedded file name: pirates.speedchat.PSpeedChatQuestMenu
from otp.speedchat.SCMenu import SCMenu
from otp.speedchat.SCTerminal import *
from otp.speedchat.SCStaticTextTerminal import SCStaticTextTerminal
from pirates.quest.Quest import Quest
from pirates.speedchat.PSpeedChatQuestTerminal import *
from pirates.pirate.LocalPirate import *
from pirates.quest.QuestStatus import *
from pirates.quest.QuestDNA import *
class PSpeedChatQuestMenu(SCMenu):
__module__ = __name__
def __init__(self):
SCMenu.__init__(self)
self.accept('localAvatarQuestAdded', self.__questMenuRefresh)
self.accept('localAvatarQuestUpdate', self.__questMenuRefresh)
self.accept('localAvatarQuestItemUpdate', self.__questMenuRefresh)
self.accept('localAvatarQuestComplete', self.__questMenuRefresh)
self.accept('localAvatarQuestDeleted', self.__questMenuRefresh)
def destroy(self):
SCMenu.destroy(self)
def __questMenuRefresh(self, quest, item=None, note=None):
self.clearMenu()
quests = localAvatar.questStatus.getCurrentQuests()
if quests is None:
return
for quest in quests:
q = quest
if q is None:
continue
if not q.isComplete():
self.__questAddSCChat(q)
return
def __questAddSCChat(self, quest):
qId = quest.questId
qDNA = QuestDB.QuestDict.get(qId)
if not qDNA:
return
qInt = qDNA.questInt
i = 0
for task in quest.questDNA.getTasks():
if len(quest.getSCSummaryText(0)) > 2:
self.append(PSpeedChatQuestTerminal(quest.getSCSummaryText(i), qInt, quest.giverId, 0, i))
if len(quest.getSCWhereIsText(0)) > 2:
self.append(PSpeedChatQuestTerminal(quest.getSCWhereIsText(i), qInt, quest.giverId, 1, i))
if len(quest.getSCHowToText(0)) > 2:
self.append(PSpeedChatQuestTerminal(quest.getSCHowToText(i), qInt, quest.giverId, 2, i))
i = i + 1
| 39.857143
| 106
| 0.670699
| 242
| 2,232
| 6.053719
| 0.380165
| 0.037543
| 0.065529
| 0.081911
| 0.081911
| 0.081911
| 0
| 0
| 0
| 0
| 0
| 0.032806
| 0.235215
| 2,232
| 56
| 107
| 39.857143
| 0.825425
| 0.09543
| 0
| 0.065217
| 0
| 0
| 0.057568
| 0.057568
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0
| 0.173913
| 0
| 0.369565
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c2cd54fa6ab0d6c947d651db03fbbb610a1bf1d
| 5,309
|
py
|
Python
|
spotifyembed/spotifyembed.py
|
R3XET/coffee-cogs
|
e7658213449ec140edaaf322514eaafb575f99bd
|
[
"MIT"
] | null | null | null |
spotifyembed/spotifyembed.py
|
R3XET/coffee-cogs
|
e7658213449ec140edaaf322514eaafb575f99bd
|
[
"MIT"
] | null | null | null |
spotifyembed/spotifyembed.py
|
R3XET/coffee-cogs
|
e7658213449ec140edaaf322514eaafb575f99bd
|
[
"MIT"
] | null | null | null |
# from redbot.core import Config
from redbot.core import Config, commands, checks
import asyncio
import aiohttp
import discord
from discord import Webhook, AsyncWebhookAdapter
import re
class Spotifyembed(commands.Cog):
"""Automatically send a reply to Spotify links with a link to the embed preview. Convenient for mobile users who can finally listen to music samples from Discord, without needing an account."""
def __init__(self, bot):
self.bot = bot
self.config = Config.get_conf(self, identifier=806715409318936616)
default_guild = {
"spotifyembedEnabled": False,
}
self.config.register_guild(**default_guild)
@commands.group(aliases=["setspembed", "setspe"])
@checks.guildowner_or_permissions()
async def setspotifyembed(self, ctx: commands.Context):
"""Set Spotify Embed settings"""
if not ctx.invoked_subcommand:
# Guild settings
e = discord.Embed(color=(await ctx.embed_colour()), title="Guild Settings", description="")
e.add_field(name="spotifyembedEnabled", value=(await self.config.guild(ctx.guild).spotifyembedEnabled()), inline=False)
await ctx.send(embed=e)
@setspotifyembed.command(name="enable")
async def setspembedenable(self, ctx):
"""Enable auto-responding to Spotify links"""
await self.config.guild(ctx.guild).spotifyembedEnabled.set(True)
await ctx.message.add_reaction("✅")
@setspotifyembed.command(name="disable")
async def setspembeddisable(self, ctx):
"""Disable auto-responding to Spotify links"""
await self.config.guild(ctx.guild).spotifyembedEnabled.set(False)
await ctx.message.add_reaction("✅")
@commands.command(aliases=["spembed", "spe"])
async def spotifyembed(self, ctx, spotifyLink, asMyself: bool=False):
"""Return a Spotify embed link
Can set asMyself to true/false, for sending as webhook"""
spembedSplit = spotifyLink.split('.com/')
sendMsg = spembedSplit[0] + ".com/embed/" + spembedSplit[1]
if asMyself == False:
return await ctx.send(sendMsg)
elif asMyself == True:
# Find a webhook that the bot made
try:
whooklist = await ctx.channel.webhooks()
whurl = ""
# Return if match
for wh in whooklist:
if self.bot.user == wh.user:
whurl = wh.url
# Make new webhook if one didn't exist
if whurl == "":
newHook = await ctx.channel.create_webhook(name="Webhook")
whurl = newHook.url
async with aiohttp.ClientSession() as session:
webhook = Webhook.from_url(whurl, adapter=AsyncWebhookAdapter(session))
await webhook.send(
sendMsg,
username=ctx.author.display_name,
avatar_url=ctx.author.avatar_url,
)
except discord.errors.Forbidden:
return await ctx.send(sendMsg)
else:
return await ctx.send("An error occurred.")
@commands.Cog.listener()
async def on_message(self, message: discord.Message):
if message.author.bot:
return
if message.webhook_id:
return
if message.guild is None:
return
spotifyembedEnabled = await self.config.guild(message.guild).spotifyembedEnabled()
if spotifyembedEnabled is not True:
return
# Ignore if we find [p]spotifyembed in the trigger message
spembedCommandIgnore = r"^\S{1,9}(spotifyembed|spembed|spe)(?=\s|$)"
spembedCommands = re.findall(spembedCommandIgnore, message.clean_content)
if len(spembedCommands) > 0:
return
# Ignore if we find no spotify links in the trigger message
spembedFinder = r"https\:\/\/open\.spotify\.com\/\w{4,12}\/\w{14,26}(?=\?|$|\s)"
spembedMatches = re.findall(spembedFinder, message.clean_content)
if len(spembedMatches) <= 0:
return
sendMsg = ""
for match in spembedMatches:
spembedSplit = match.split('.com/')
sendMsg += spembedSplit[0] + ".com/embed/" + spembedSplit[1] + "\n"
# Find a webhook that the bot made
try:
whooklist = await message.channel.webhooks()
whurl = ""
# Return if match
for wh in whooklist:
if self.bot.user == wh.user:
whurl = wh.url
# Make new webhook if one didn't exist
if whurl == "":
newHook = await message.channel.create_webhook(name="Webhook")
whurl = newHook.url
async with aiohttp.ClientSession() as session:
webhook = Webhook.from_url(whurl, adapter=AsyncWebhookAdapter(session))
await webhook.send(
sendMsg,
username=message.author.display_name,
avatar_url=message.author.avatar_url,
)
except discord.errors.Forbidden:
return await message.channel.send(sendMsg)
| 41.155039
| 197
| 0.595592
| 567
| 5,309
| 5.529101
| 0.303351
| 0.022967
| 0.019139
| 0.025518
| 0.440191
| 0.366507
| 0.349282
| 0.33429
| 0.33429
| 0.268581
| 0
| 0.00896
| 0.306272
| 5,309
| 128
| 198
| 41.476563
| 0.841705
| 0.098135
| 0
| 0.350515
| 0
| 0.010309
| 0.057646
| 0.022662
| 0
| 0
| 0
| 0
| 0
| 1
| 0.010309
| false
| 0
| 0.061856
| 0
| 0.185567
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c2d9f91e47f374b558a37fc891829c105809bba
| 4,714
|
py
|
Python
|
rlcard/utils/seeding.py
|
AdrianP-/rlcard
|
5b99dc8faa4c97ecac2d1189967b90c45d79624b
|
[
"MIT"
] | null | null | null |
rlcard/utils/seeding.py
|
AdrianP-/rlcard
|
5b99dc8faa4c97ecac2d1189967b90c45d79624b
|
[
"MIT"
] | null | null | null |
rlcard/utils/seeding.py
|
AdrianP-/rlcard
|
5b99dc8faa4c97ecac2d1189967b90c45d79624b
|
[
"MIT"
] | null | null | null |
#The MIT License
#
#Copyright (c) 2020 DATA Lab at Texas A&M University
#Copyright (c) 2016 OpenAI (https://openai.com)
#
#Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import hashlib
import numpy as np
import os
import struct
def colorize(string, color, bold=False, highlight = False):
"""Return string surrounded by appropriate terminal color codes to
print colorized text. Valid colors: gray, red, green, yellow,
blue, magenta, cyan, white, crimson
"""
attr = []
num = color2num[color]
if highlight: num += 10
attr.append(str(num))
if bold: attr.append('1')
attrs = ';'.join(attr)
return '\x1b[%sm%s\x1b[0m' % (attrs, string)
def error(msg, *args):
print(colorize('%s: %s'%('ERROR', msg % args), 'red'))
def np_random(seed=None):
if seed is not None and not (isinstance(seed, int) and 0 <= seed):
raise error.Error('Seed must be a non-negative integer or omitted, not {}'.format(seed))
seed = create_seed(seed)
rng = np.random.RandomState()
rng.seed(_int_list_from_bigint(hash_seed(seed)))
return rng, seed
def hash_seed(seed=None, max_bytes=8):
"""Any given evaluation is likely to have many PRNG's active at
once. (Most commonly, because the environment is running in
multiple processes.) There's literature indicating that having
linear correlations between seeds of multiple PRNG's can correlate
the outputs:
http://blogs.unity3d.com/2015/01/07/a-primer-on-repeatable-random-numbers/
http://stackoverflow.com/questions/1554958/how-different-do-random-seeds-need-to-be
http://dl.acm.org/citation.cfm?id=1276928
Thus, for sanity we hash the seeds before using them. (This scheme
is likely not crypto-strength, but it should be good enough to get
rid of simple correlations.)
Args:
seed (Optional[int]): None seeds from an operating system specific randomness source.
max_bytes: Maximum number of bytes to use in the hashed seed.
"""
if seed is None:
seed = create_seed(max_bytes=max_bytes)
hash = hashlib.sha512(str(seed).encode('utf8')).digest()
return _bigint_from_bytes(hash[:max_bytes])
def create_seed(a=None, max_bytes=8):
"""Create a strong random seed. Otherwise, Python 2 would seed using
the system time, which might be non-robust especially in the
presence of concurrency.
Args:
a (Optional[int, str]): None seeds from an operating system specific randomness source.
max_bytes: Maximum number of bytes to use in the seed.
"""
# Adapted from https://svn.python.org/projects/python/tags/r32/Lib/random.py
if a is None:
a = _bigint_from_bytes(os.urandom(max_bytes))
elif isinstance(a, str):
a = a.encode('utf8')
a += hashlib.sha512(a).digest()
a = _bigint_from_bytes(a[:max_bytes])
elif isinstance(a, int):
a = a % 2**(8 * max_bytes)
else:
raise error.Error('Invalid type for seed: {} ({})'.format(type(a), a))
return a
# TODO: don't hardcode sizeof_int here
def _bigint_from_bytes(bytes):
sizeof_int = 4
padding = sizeof_int - len(bytes) % sizeof_int
bytes += b'\0' * padding
int_count = int(len(bytes) / sizeof_int)
unpacked = struct.unpack("{}I".format(int_count), bytes)
accum = 0
for i, val in enumerate(unpacked):
accum += 2 ** (sizeof_int * 8 * i) * val
return accum
def _int_list_from_bigint(bigint):
# Special case 0
if bigint < 0:
raise error.Error('Seed must be non-negative, not {}'.format(bigint))
elif bigint == 0:
return [0]
ints = []
while bigint > 0:
bigint, mod = divmod(bigint, 2 ** 32)
ints.append(mod)
return ints
| 41.350877
| 461
| 0.694103
| 708
| 4,714
| 4.559322
| 0.427966
| 0.024783
| 0.018587
| 0.011772
| 0.099133
| 0.072491
| 0.057001
| 0.057001
| 0.057001
| 0.057001
| 0
| 0.01791
| 0.206406
| 4,714
| 113
| 462
| 41.716814
| 0.844961
| 0.535214
| 0
| 0
| 0
| 0
| 0.07882
| 0
| 0
| 0
| 0
| 0.00885
| 0
| 1
| 0.12069
| false
| 0
| 0.068966
| 0
| 0.310345
| 0.017241
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c2f421ab198ddb3faa7c72a6c2f2f1822a0634f
| 8,573
|
py
|
Python
|
ops/transforms.py
|
ex4sperans/freesound-classification
|
71b9920ce0ae376aa7f1a3a2943f0f92f4820813
|
[
"Apache-2.0"
] | 55
|
2019-06-30T02:36:10.000Z
|
2021-12-07T07:24:42.000Z
|
ops/transforms.py
|
ex4sperans/freesound-classification
|
71b9920ce0ae376aa7f1a3a2943f0f92f4820813
|
[
"Apache-2.0"
] | 13
|
2020-01-28T22:48:34.000Z
|
2022-03-11T23:50:36.000Z
|
ops/transforms.py
|
ex4sperans/freesound-classification
|
71b9920ce0ae376aa7f1a3a2943f0f92f4820813
|
[
"Apache-2.0"
] | 7
|
2019-07-21T15:54:16.000Z
|
2020-07-22T13:02:37.000Z
|
import random
import math
from functools import partial
import json
import pysndfx
import librosa
import numpy as np
import torch
from ops.audio import (
read_audio, compute_stft, trim_audio, mix_audio_and_labels,
shuffle_audio, cutout
)
SAMPLE_RATE = 44100
class Augmentation:
"""A base class for data augmentation transforms"""
pass
class MapLabels:
def __init__(self, class_map, drop_raw=True):
self.class_map = class_map
def __call__(self, dataset, **inputs):
labels = np.zeros(len(self.class_map), dtype=np.float32)
for c in inputs["raw_labels"]:
labels[self.class_map[c]] = 1.0
transformed = dict(inputs)
transformed["labels"] = labels
transformed.pop("raw_labels")
return transformed
class MixUp(Augmentation):
def __init__(self, p):
self.p = p
def __call__(self, dataset, **inputs):
transformed = dict(inputs)
if np.random.uniform() < self.p:
first_audio, first_labels = inputs["audio"], inputs["labels"]
random_sample = dataset.random_clean_sample()
new_audio, new_labels = mix_audio_and_labels(
first_audio, random_sample["audio"],
first_labels, random_sample["labels"]
)
transformed["audio"] = new_audio
transformed["labels"] = new_labels
return transformed
class FlipAudio(Augmentation):
def __init__(self, p):
self.p = p
def __call__(self, dataset, **inputs):
transformed = dict(inputs)
if np.random.uniform() < self.p:
transformed["audio"] = np.flipud(inputs["audio"])
return transformed
class AudioAugmentation(Augmentation):
def __init__(self, p):
self.p = p
def __call__(self, dataset, **inputs):
transformed = dict(inputs)
if np.random.uniform() < self.p:
effects_chain = (
pysndfx.AudioEffectsChain()
.reverb(
reverberance=random.randrange(50),
room_scale=random.randrange(50),
stereo_depth=random.randrange(50)
)
.pitch(shift=random.randrange(-300, 300))
.overdrive(gain=random.randrange(2, 10))
.speed(random.uniform(0.9, 1.1))
)
transformed["audio"] = effects_chain(inputs["audio"])
return transformed
class LoadAudio:
def __init__(self):
pass
def __call__(self, dataset, **inputs):
audio, sr = read_audio(inputs["filename"])
transformed = dict(inputs)
transformed["audio"] = audio
transformed["sr"] = sr
return transformed
class STFT:
eps = 1e-4
def __init__(self, n_fft, hop_size):
self.n_fft = n_fft
self.hop_size = hop_size
def __call__(self, dataset, **inputs):
stft = compute_stft(
inputs["audio"],
window_size=self.n_fft, hop_size=self.hop_size,
eps=self.eps)
transformed = dict(inputs)
transformed["stft"] = np.transpose(stft)
return transformed
class AudioFeatures:
eps = 1e-4
def __init__(self, descriptor, verbose=True):
name, *args = descriptor.split("_")
self.feature_type = name
if name == "stft":
n_fft, hop_size = args
self.n_fft = int(n_fft)
self.hop_size = int(hop_size)
self.n_features = self.n_fft // 2 + 1
self.padding_value = 0.0
if verbose:
print(
"\nUsing STFT features with params:\n",
"n_fft: {}, hop_size: {}".format(
n_fft, hop_size
)
)
elif name == "mel":
n_fft, hop_size, n_mel = args
self.n_fft = int(n_fft)
self.hop_size = int(hop_size)
self.n_mel = int(n_mel)
self.n_features = self.n_mel
self.padding_value = 0.0
if verbose:
print(
"\nUsing mel features with params:\n",
"n_fft: {}, hop_size: {}, n_mel: {}".format(
n_fft, hop_size, n_mel
)
)
elif name == "raw":
self.n_features = 1
self.padding_value = 0.0
if verbose:
print(
"\nUsing raw waveform features."
)
def __call__(self, dataset, **inputs):
transformed = dict(inputs)
if self.feature_type == "stft":
# stft = compute_stft(
# inputs["audio"],
# window_size=self.n_fft, hop_size=self.hop_size,
# eps=self.eps, log=True
# )
transformed["signal"] = np.expand_dims(inputs["audio"], -1)
elif self.feature_type == "mel":
stft = compute_stft(
inputs["audio"],
window_size=self.n_fft, hop_size=self.hop_size,
eps=self.eps, log=False
)
transformed["signal"] = np.expand_dims(inputs["audio"], -1)
elif self.feature_type == "raw":
transformed["signal"] = np.expand_dims(inputs["audio"], -1)
return transformed
class SampleSegment(Augmentation):
def __init__(self, ratio=(0.3, 0.9), p=1.0):
self.min, self.max = ratio
self.p = p
def __call__(self, dataset, **inputs):
transformed = dict(inputs)
if np.random.uniform() < self.p:
original_size = inputs["audio"].size
target_size = int(np.random.uniform(self.min, self.max) * original_size)
start = np.random.randint(original_size - target_size - 1)
transformed["audio"] = inputs["audio"][start:start+target_size]
return transformed
class ShuffleAudio(Augmentation):
def __init__(self, chunk_length=0.5, p=0.5):
self.chunk_length = chunk_length
self.p = p
def __call__(self, dataset, **inputs):
transformed = dict(inputs)
if np.random.uniform() < self.p:
transformed["audio"] = shuffle_audio(
transformed["audio"], self.chunk_length, sr=transformed["sr"])
return transformed
class CutOut(Augmentation):
def __init__(self, area=0.25, p=0.5):
self.area = area
self.p = p
def __call__(self, dataset, **inputs):
transformed = dict(inputs)
if np.random.uniform() < self.p:
transformed["audio"] = cutout(
transformed["audio"], self.area)
return transformed
class SampleLongAudio:
def __init__(self, max_length):
self.max_length = max_length
def __call__(self, dataset, **inputs):
transformed = dict(inputs)
if (inputs["audio"].size / inputs["sr"]) > self.max_length:
max_length = self.max_length * inputs["sr"]
start = np.random.randint(0, inputs["audio"].size - max_length)
transformed["audio"] = inputs["audio"][start:start+max_length]
return transformed
class OneOf:
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, dataset, **inputs):
transform = random.choice(self.transforms)
return transform(**inputs)
class DropFields:
def __init__(self, fields):
self.to_drop = fields
def __call__(self, dataset, **inputs):
transformed = dict()
for name, input in inputs.items():
if not name in self.to_drop:
transformed[name] = input
return transformed
class RenameFields:
def __init__(self, mapping):
self.mapping = mapping
def __call__(self, dataset, **inputs):
transformed = dict(inputs)
for old, new in self.mapping.items():
transformed[new] = transformed.pop(old)
return transformed
class Compose:
def __init__(self, transforms):
self.transforms = transforms
def switch_off_augmentations(self):
for t in self.transforms:
if isinstance(t, Augmentation):
t.p = 0.0
def __call__(self, dataset=None, **inputs):
for t in self.transforms:
inputs = t(dataset=dataset, **inputs)
return inputs
class Identity:
def __call__(self, dataset=None, **inputs):
return inputs
| 22.679894
| 84
| 0.561647
| 956
| 8,573
| 4.779289
| 0.16841
| 0.029109
| 0.03852
| 0.063033
| 0.443861
| 0.370759
| 0.324141
| 0.315605
| 0.26264
| 0.233749
| 0
| 0.010941
| 0.328356
| 8,573
| 378
| 85
| 22.679894
| 0.782563
| 0.019713
| 0
| 0.405405
| 0
| 0
| 0.045384
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.144144
| false
| 0.009009
| 0.040541
| 0.004505
| 0.342342
| 0.013514
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c30506aa8598c0388ff7d67c1b22762e60080e5
| 2,011
|
py
|
Python
|
figures/pp.py
|
mathematicalmichael/thesis
|
2906b10f94960c3e75bdb48e5b8b583f59b9441e
|
[
"MIT"
] | 6
|
2019-04-24T08:05:49.000Z
|
2020-12-28T20:34:29.000Z
|
figures/pp.py
|
mathematicalmichael/thesis
|
2906b10f94960c3e75bdb48e5b8b583f59b9441e
|
[
"MIT"
] | 59
|
2019-12-27T23:15:05.000Z
|
2021-11-24T17:52:57.000Z
|
figures/pp.py
|
mathematicalmichael/thesis
|
2906b10f94960c3e75bdb48e5b8b583f59b9441e
|
[
"MIT"
] | null | null | null |
#!/usr/env/bin python
import os
# os.environ['OMP_NUM_THREADS'] = '1'
from newpoisson import poisson
import numpy as np
from fenics import set_log_level, File, RectangleMesh, Point
mesh = RectangleMesh(Point(0,0), Point(1,1), 36, 36)
# comm = mesh.mpi_comm()
set_log_level(40) # ERROR=40
# from mpi4py import MPI
# comm = MPI.COMM_WORLD
# rank = comm.Get_rank()
if __name__=='__main__':
import argparse
parser = argparse.ArgumentParser(description="Poisson Problem")
parser.add_argument('-n', '--num', default = 10, type=int,
help="Number of samples")
parser.add_argument('-o', '--outfile', default='results',
help="Output filename (no extension)")
parser.add_argument('-i', '--input-dim', default=1, type=int)
parser.add_argument('-d', '--dist', default='u', help='Distribution. `n` (normal), `u` (uniform, default)')
args = parser.parse_args()
num_samples = args.num
dist = args.dist
outfile = args.outfile.replace('.pkl','')
inputdim = args.input_dim
if inputdim == 1: # U[1,5]
randsamples = 1 + 4*np.random.rand(num_samples)
else: # N(0,1)
if dist == 'n':
randsamples = np.random.randn(num_samples, inputdim)
elif dist == 'u':
randsamples = -4*np.random.rand(num_samples, inputdim)
else:
raise ValueError("Improper distribution choice, use `n` (normal), `u` (uniform)")
sample_seed_list = list(zip(range(num_samples), randsamples))
def wrapper(sample, outfile):
g=sample[1]
u = poisson(gamma=g, mesh=mesh)
# Save solution
fname = f"{outfile}-data/poisson-{int(sample[0]):06d}.xml"
File(fname, 'w') << u
return {int(sample[0]): {'u': fname, 'gamma': sample[1]}}
results = []
for sample in sample_seed_list:
r = wrapper(sample, outfile)
results.append(r)
# print(results)
import pickle
pickle.dump(results, open(f'{outfile}.pkl','wb'))
| 32.967213
| 111
| 0.61462
| 264
| 2,011
| 4.560606
| 0.435606
| 0.041528
| 0.056478
| 0.024917
| 0.038206
| 0.038206
| 0
| 0
| 0
| 0
| 0
| 0.019987
| 0.228742
| 2,011
| 60
| 112
| 33.516667
| 0.756286
| 0.103431
| 0
| 0.047619
| 0
| 0
| 0.169085
| 0.026228
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02381
| false
| 0
| 0.142857
| 0
| 0.190476
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c308137f6fcaffcc096aaa674f08780ed2a8ef7
| 3,606
|
py
|
Python
|
additions/irreducible_check.py
|
kluhan/seraphim
|
412b693effb15f80d348d6d885d7c781774bb8aa
|
[
"MIT"
] | null | null | null |
additions/irreducible_check.py
|
kluhan/seraphim
|
412b693effb15f80d348d6d885d7c781774bb8aa
|
[
"MIT"
] | null | null | null |
additions/irreducible_check.py
|
kluhan/seraphim
|
412b693effb15f80d348d6d885d7c781774bb8aa
|
[
"MIT"
] | null | null | null |
"""
Irreduzibilitätskriterien
Implementiert wurden das Eisenstein- und das Perronkriterium
Quellen:
https://rms.unibuc.ro/bulletin/pdf/53-3/perron.pdf
http://math-www.uni-paderborn.de/~chris/Index33/V/par5.pdf
Übergeben werden Polynome vom Typ Polynomial, keine direkten Listen von Koeffizienten
"""
import logging
import helper
import itertools
def factor(n):
# Faktorisierung einer Zahl n
i = 0
factors = []
for i in range(1, n + 1):
if n % i == 0:
factors.append(i)
return factors
def prime_factor(n):
# Primfaktorzerlegung einer Zahl n
i = 2
factors = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(i)
if n > 1:
factors.append(n)
return factors
# rekursive Implementierung von HCF
def hcf(x, y):
"""Highest common factor"""
if y == 0:
return x
else:
return hcf(y, x % y)
def is_polynomial_coprime(polynomial):
"""Überprüft, ob ein Polynom teilerfremd (coprime) ist"""
non_zero_polynomial = [
i for i in polynomial.coefficients if i != 0
] # Nullen würden Ergebnis von HCF verfälschen
if polynomial.degree() == 0:
return True
for x, y in itertools.combinations(non_zero_polynomial, 2):
if hcf(x, y) != 1:
return False
return True
# Quelle: https://rms.unibuc.ro/bulletin/pdf/53-3/perron.pdf
def is_irreducible_perron(polynomial):
"""
Prüft ein Polynom auf Irreduzierbarkeit (Perron).
Führender Koeffizient != 1 funktioniert nicht.
Keine Aussage möglich, wenn vorletzer Koeffizient kleiner ist als die absolute Summe der restlichen Koeffizienten
"""
if polynomial.degree() < 0:
return logging.error("Polynom ungültig")
const_coefficient = polynomial.coefficients[0]
if const_coefficient == 0:
return 0
lead_coefficient = polynomial.coefficients[polynomial.degree()]
assert lead_coefficient == 1
nm1_coefficient = abs(polynomial.coefficients[polynomial.degree() - 1])
total = 1
i = 0
for coeff in polynomial.coefficients:
if i < polynomial.degree() - 1:
total += abs(coeff)
i = i + 1
if nm1_coefficient > total:
return 1
return 2
# Quellen: https://www.uni-frankfurt.de/81429607/Stix_Algebra_SkriptWS2016_17.pdf
# http://math-www.uni-paderborn.de/~chris/Index33/V/par5.pdf
def is_irreducible_eisenstein(polynomial):
"""
Eine Implementierung des Eisensteinkriteriums.
"""
# Polynom muss einen Grad m >= 1 haben
if polynomial.degree() < 1:
return 2
# Voraussetzung für Eisenstein sind teilerfremde Koeffizienten
if helper.is_polynomial_coprime(polynomial is False):
return 2
# Prüfe, ob es eine Primzahl gibt, die alle Koeffizienten des Polynoms bis Grad m - 1 teilt. p^2 darf a0 nicht teilen
const_coeff = polynomial.coefficients[0]
if const_coeff == 0:
return 0
# Erhalte Primfaktorzerlegung der Konstante, um Grundlage von Primzahlen zu erhalten
prime_factors = helper.prime_factor(const_coeff)
for p in prime_factors:
if (
const_coeff % pow(p, 2) != 0
): # teilt p^2 den konstanten Koeffizienten, dann kann keine Aussage getroffen werden
return 2
for coeff in polynomial.coefficients[0 : polynomial.degree() - 1]:
if coeff % p != 0:
return 2 # teilt die Primzahl den Koeffizienten nicht, kann keine Aussage getroffen werden
return 1
| 27.112782
| 121
| 0.646977
| 457
| 3,606
| 5.041575
| 0.354486
| 0.06684
| 0.029514
| 0.013889
| 0.194878
| 0.106337
| 0.074219
| 0.074219
| 0.074219
| 0.074219
| 0
| 0.02719
| 0.265668
| 3,606
| 132
| 122
| 27.318182
| 0.8429
| 0.395729
| 0
| 0.287671
| 0
| 0
| 0.007667
| 0
| 0
| 0
| 0
| 0
| 0.013699
| 1
| 0.082192
| false
| 0
| 0.041096
| 0
| 0.356164
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c30bd2dd03a5aeb1d8422cd8b6cb2d539652200
| 39,763
|
py
|
Python
|
numba/stencils/stencil.py
|
auderson/numba
|
3d67c9850ab56457f418cf40af6245fd9c337705
|
[
"BSD-2-Clause"
] | 6,620
|
2015-01-04T08:51:04.000Z
|
2022-03-31T12:52:18.000Z
|
numba/stencils/stencil.py
|
auderson/numba
|
3d67c9850ab56457f418cf40af6245fd9c337705
|
[
"BSD-2-Clause"
] | 6,457
|
2015-01-04T03:18:41.000Z
|
2022-03-31T17:38:42.000Z
|
numba/stencils/stencil.py
|
auderson/numba
|
3d67c9850ab56457f418cf40af6245fd9c337705
|
[
"BSD-2-Clause"
] | 930
|
2015-01-25T02:33:03.000Z
|
2022-03-30T14:10:32.000Z
|
#
# Copyright (c) 2017 Intel Corporation
# SPDX-License-Identifier: BSD-2-Clause
#
import copy
import numpy as np
from llvmlite import ir as lir
from numba.core import types, typing, utils, ir, config, ir_utils, registry
from numba.core.typing.templates import (CallableTemplate, signature,
infer_global, AbstractTemplate)
from numba.core.imputils import lower_builtin
from numba.core.extending import register_jitable
from numba.core.errors import NumbaValueError
from numba.misc.special import literal_unroll
import numba
import operator
from numba.np import numpy_support
class StencilFuncLowerer(object):
'''Callable class responsible for lowering calls to a specific StencilFunc.
'''
def __init__(self, sf):
self.stencilFunc = sf
def __call__(self, context, builder, sig, args):
cres = self.stencilFunc.compile_for_argtys(sig.args, {},
sig.return_type, None)
res = context.call_internal(builder, cres.fndesc, sig, args)
context.add_linking_libs([cres.library])
return res
@register_jitable
def raise_if_incompatible_array_sizes(a, *args):
ashape = a.shape
# We need literal_unroll here because the stencil might take
# multiple input arrays with different types that are not compatible
# (e.g. values as float[:] and flags as bool[:])
# When more than three total arrays are given, the second and third
# are iterated over in the loop below. Without literal_unroll, their
# types have to match.
# An example failing signature without literal_unroll might be
# (float[:], float[:], bool[:]) (Just (float[:], bool[:]) wouldn't fail)
for arg in literal_unroll(args):
if a.ndim != arg.ndim:
raise ValueError("Secondary stencil array does not have same number "
" of dimensions as the first stencil input.")
argshape = arg.shape
for i in range(len(ashape)):
if ashape[i] > argshape[i]:
raise ValueError("Secondary stencil array has some dimension "
"smaller the same dimension in the first "
"stencil input.")
def slice_addition(the_slice, addend):
""" Called by stencil in Python mode to add the loop index to a
user-specified slice.
"""
return slice(the_slice.start + addend, the_slice.stop + addend)
class StencilFunc(object):
"""
A special type to hold stencil information for the IR.
"""
id_counter = 0
def __init__(self, kernel_ir, mode, options):
self.id = type(self).id_counter
type(self).id_counter += 1
self.kernel_ir = kernel_ir
self.mode = mode
self.options = options
self.kws = [] # remember original kws arguments
# stencils only supported for CPU context currently
self._typingctx = registry.cpu_target.typing_context
self._targetctx = registry.cpu_target.target_context
self._typingctx.refresh()
self._targetctx.refresh()
self._install_type(self._typingctx)
self.neighborhood = self.options.get("neighborhood")
self._type_cache = {}
self._lower_me = StencilFuncLowerer(self)
def replace_return_with_setitem(self, blocks, index_vars, out_name):
"""
Find return statements in the IR and replace them with a SetItem
call of the value "returned" by the kernel into the result array.
Returns the block labels that contained return statements.
"""
ret_blocks = []
for label, block in blocks.items():
scope = block.scope
loc = block.loc
new_body = []
for stmt in block.body:
if isinstance(stmt, ir.Return):
ret_blocks.append(label)
# If 1D array then avoid the tuple construction.
if len(index_vars) == 1:
rvar = ir.Var(scope, out_name, loc)
ivar = ir.Var(scope, index_vars[0], loc)
new_body.append(ir.SetItem(rvar, ivar, stmt.value, loc))
else:
# Convert the string names of the index variables into
# ir.Var's.
var_index_vars = []
for one_var in index_vars:
index_var = ir.Var(scope, one_var, loc)
var_index_vars += [index_var]
s_index_name = ir_utils.mk_unique_var("stencil_index")
s_index_var = ir.Var(scope, s_index_name, loc)
# Build a tuple from the index ir.Var's.
tuple_call = ir.Expr.build_tuple(var_index_vars, loc)
new_body.append(ir.Assign(tuple_call, s_index_var, loc))
rvar = ir.Var(scope, out_name, loc)
# Write the return statements original value into
# the array using the tuple index.
si = ir.SetItem(rvar, s_index_var, stmt.value, loc)
new_body.append(si)
else:
new_body.append(stmt)
block.body = new_body
return ret_blocks
def add_indices_to_kernel(self, kernel, index_names, ndim,
neighborhood, standard_indexed, typemap, calltypes):
"""
Transforms the stencil kernel as specified by the user into one
that includes each dimension's index variable as part of the getitem
calls. So, in effect array[-1] becomes array[index0-1].
"""
const_dict = {}
kernel_consts = []
if config.DEBUG_ARRAY_OPT >= 1:
print("add_indices_to_kernel", ndim, neighborhood)
ir_utils.dump_blocks(kernel.blocks)
if neighborhood is None:
need_to_calc_kernel = True
else:
need_to_calc_kernel = False
if len(neighborhood) != ndim:
raise ValueError("%d dimensional neighborhood specified for %d " \
"dimensional input array" % (len(neighborhood), ndim))
tuple_table = ir_utils.get_tuple_table(kernel.blocks)
relatively_indexed = set()
for block in kernel.blocks.values():
scope = block.scope
loc = block.loc
new_body = []
for stmt in block.body:
if (isinstance(stmt, ir.Assign) and
isinstance(stmt.value, ir.Const)):
if config.DEBUG_ARRAY_OPT >= 1:
print("remembering in const_dict", stmt.target.name,
stmt.value.value)
# Remember consts for use later.
const_dict[stmt.target.name] = stmt.value.value
if ((isinstance(stmt, ir.Assign)
and isinstance(stmt.value, ir.Expr)
and stmt.value.op in ['setitem', 'static_setitem']
and stmt.value.value.name in kernel.arg_names) or
(isinstance(stmt, ir.SetItem)
and stmt.target.name in kernel.arg_names)):
raise ValueError("Assignments to arrays passed to stencil " \
"kernels is not allowed.")
if (isinstance(stmt, ir.Assign)
and isinstance(stmt.value, ir.Expr)
and stmt.value.op in ['getitem', 'static_getitem']
and stmt.value.value.name in kernel.arg_names
and stmt.value.value.name not in standard_indexed):
# We found a getitem from the input array.
if stmt.value.op == 'getitem':
stmt_index_var = stmt.value.index
else:
stmt_index_var = stmt.value.index_var
# allow static_getitem since rewrite passes are applied
#raise ValueError("Unexpected static_getitem in add_indices_to_kernel.")
relatively_indexed.add(stmt.value.value.name)
# Store the index used after looking up the variable in
# the const dictionary.
if need_to_calc_kernel:
assert hasattr(stmt_index_var, 'name')
if stmt_index_var.name in tuple_table:
kernel_consts += [tuple_table[stmt_index_var.name]]
elif stmt_index_var.name in const_dict:
kernel_consts += [const_dict[stmt_index_var.name]]
else:
raise NumbaValueError("stencil kernel index is not "
"constant, 'neighborhood' option required")
if ndim == 1:
# Single dimension always has index variable 'index0'.
# tmpvar will hold the real index and is computed by
# adding the relative offset in stmt.value.index to
# the current absolute location in index0.
index_var = ir.Var(scope, index_names[0], loc)
tmpname = ir_utils.mk_unique_var("stencil_index")
tmpvar = ir.Var(scope, tmpname, loc)
stmt_index_var_typ = typemap[stmt_index_var.name]
# If the array is indexed with a slice then we
# have to add the index value with a call to
# slice_addition.
if isinstance(stmt_index_var_typ, types.misc.SliceType):
sa_var = ir.Var(scope, ir_utils.mk_unique_var("slice_addition"), loc)
sa_func = numba.njit(slice_addition)
sa_func_typ = types.functions.Dispatcher(sa_func)
typemap[sa_var.name] = sa_func_typ
g_sa = ir.Global("slice_addition", sa_func, loc)
new_body.append(ir.Assign(g_sa, sa_var, loc))
slice_addition_call = ir.Expr.call(sa_var, [stmt_index_var, index_var], (), loc)
calltypes[slice_addition_call] = sa_func_typ.get_call_type(self._typingctx, [stmt_index_var_typ, types.intp], {})
new_body.append(ir.Assign(slice_addition_call, tmpvar, loc))
new_body.append(ir.Assign(
ir.Expr.getitem(stmt.value.value, tmpvar, loc),
stmt.target, loc))
else:
acc_call = ir.Expr.binop(operator.add, stmt_index_var,
index_var, loc)
new_body.append(ir.Assign(acc_call, tmpvar, loc))
new_body.append(ir.Assign(
ir.Expr.getitem(stmt.value.value, tmpvar, loc),
stmt.target, loc))
else:
index_vars = []
sum_results = []
s_index_name = ir_utils.mk_unique_var("stencil_index")
s_index_var = ir.Var(scope, s_index_name, loc)
const_index_vars = []
ind_stencils = []
stmt_index_var_typ = typemap[stmt_index_var.name]
# Same idea as above but you have to extract
# individual elements out of the tuple indexing
# expression and add the corresponding index variable
# to them and then reconstitute as a tuple that can
# index the array.
for dim in range(ndim):
tmpname = ir_utils.mk_unique_var("const_index")
tmpvar = ir.Var(scope, tmpname, loc)
new_body.append(ir.Assign(ir.Const(dim, loc),
tmpvar, loc))
const_index_vars += [tmpvar]
index_var = ir.Var(scope, index_names[dim], loc)
index_vars += [index_var]
tmpname = ir_utils.mk_unique_var("ind_stencil_index")
tmpvar = ir.Var(scope, tmpname, loc)
ind_stencils += [tmpvar]
getitemname = ir_utils.mk_unique_var("getitem")
getitemvar = ir.Var(scope, getitemname, loc)
getitemcall = ir.Expr.getitem(stmt_index_var,
const_index_vars[dim], loc)
new_body.append(ir.Assign(getitemcall, getitemvar, loc))
# Get the type of this particular part of the index tuple.
if isinstance(stmt_index_var_typ, types.ConstSized):
one_index_typ = stmt_index_var_typ[dim]
else:
one_index_typ = stmt_index_var_typ[:]
# If the array is indexed with a slice then we
# have to add the index value with a call to
# slice_addition.
if isinstance(one_index_typ, types.misc.SliceType):
sa_var = ir.Var(scope, ir_utils.mk_unique_var("slice_addition"), loc)
sa_func = numba.njit(slice_addition)
sa_func_typ = types.functions.Dispatcher(sa_func)
typemap[sa_var.name] = sa_func_typ
g_sa = ir.Global("slice_addition", sa_func, loc)
new_body.append(ir.Assign(g_sa, sa_var, loc))
slice_addition_call = ir.Expr.call(sa_var, [getitemvar, index_vars[dim]], (), loc)
calltypes[slice_addition_call] = sa_func_typ.get_call_type(self._typingctx, [one_index_typ, types.intp], {})
new_body.append(ir.Assign(slice_addition_call, tmpvar, loc))
else:
acc_call = ir.Expr.binop(operator.add, getitemvar,
index_vars[dim], loc)
new_body.append(ir.Assign(acc_call, tmpvar, loc))
tuple_call = ir.Expr.build_tuple(ind_stencils, loc)
new_body.append(ir.Assign(tuple_call, s_index_var, loc))
new_body.append(ir.Assign(
ir.Expr.getitem(stmt.value.value,s_index_var,loc),
stmt.target,loc))
else:
new_body.append(stmt)
block.body = new_body
if need_to_calc_kernel:
# Find the size of the kernel by finding the maximum absolute value
# index used in the kernel specification.
neighborhood = [[0,0] for _ in range(ndim)]
if len(kernel_consts) == 0:
raise NumbaValueError("Stencil kernel with no accesses to "
"relatively indexed arrays.")
for index in kernel_consts:
if isinstance(index, tuple) or isinstance(index, list):
for i in range(len(index)):
te = index[i]
if isinstance(te, ir.Var) and te.name in const_dict:
te = const_dict[te.name]
if isinstance(te, int):
neighborhood[i][0] = min(neighborhood[i][0], te)
neighborhood[i][1] = max(neighborhood[i][1], te)
else:
raise NumbaValueError(
"stencil kernel index is not constant,"
"'neighborhood' option required")
index_len = len(index)
elif isinstance(index, int):
neighborhood[0][0] = min(neighborhood[0][0], index)
neighborhood[0][1] = max(neighborhood[0][1], index)
index_len = 1
else:
raise NumbaValueError(
"Non-tuple or non-integer used as stencil index.")
if index_len != ndim:
raise NumbaValueError(
"Stencil index does not match array dimensionality.")
return (neighborhood, relatively_indexed)
def get_return_type(self, argtys):
if config.DEBUG_ARRAY_OPT >= 1:
print("get_return_type", argtys)
ir_utils.dump_blocks(self.kernel_ir.blocks)
if not isinstance(argtys[0], types.npytypes.Array):
raise NumbaValueError("The first argument to a stencil kernel must "
"be the primary input array.")
from numba.core import typed_passes
typemap, return_type, calltypes, _ = typed_passes.type_inference_stage(
self._typingctx,
self._targetctx,
self.kernel_ir,
argtys,
None,
{})
if isinstance(return_type, types.npytypes.Array):
raise NumbaValueError(
"Stencil kernel must return a scalar and not a numpy array.")
real_ret = types.npytypes.Array(return_type, argtys[0].ndim,
argtys[0].layout)
return (real_ret, typemap, calltypes)
def _install_type(self, typingctx):
"""Constructs and installs a typing class for a StencilFunc object in
the input typing context.
"""
_ty_cls = type('StencilFuncTyping_' +
str(self.id),
(AbstractTemplate,),
dict(key=self, generic=self._type_me))
typingctx.insert_user_function(self, _ty_cls)
def compile_for_argtys(self, argtys, kwtys, return_type, sigret):
# look in the type cache to find if result array is passed
(_, result, typemap, calltypes) = self._type_cache[argtys]
new_func = self._stencil_wrapper(result, sigret, return_type,
typemap, calltypes, *argtys)
return new_func
def _type_me(self, argtys, kwtys):
"""
Implement AbstractTemplate.generic() for the typing class
built by StencilFunc._install_type().
Return the call-site signature.
"""
if (self.neighborhood is not None and
len(self.neighborhood) != argtys[0].ndim):
raise NumbaValueError("%d dimensional neighborhood specified "
"for %d dimensional input array" %
(len(self.neighborhood), argtys[0].ndim))
argtys_extra = argtys
sig_extra = ""
result = None
if 'out' in kwtys:
argtys_extra += (kwtys['out'],)
sig_extra += ", out=None"
result = kwtys['out']
if 'neighborhood' in kwtys:
argtys_extra += (kwtys['neighborhood'],)
sig_extra += ", neighborhood=None"
# look in the type cache first
if argtys_extra in self._type_cache:
(_sig, _, _, _) = self._type_cache[argtys_extra]
return _sig
(real_ret, typemap, calltypes) = self.get_return_type(argtys)
sig = signature(real_ret, *argtys_extra)
dummy_text = ("def __numba_dummy_stencil({}{}):\n pass\n".format(
",".join(self.kernel_ir.arg_names), sig_extra))
exec(dummy_text) in globals(), locals()
dummy_func = eval("__numba_dummy_stencil")
sig = sig.replace(pysig=utils.pysignature(dummy_func))
self._targetctx.insert_func_defn([(self._lower_me, self, argtys_extra)])
self._type_cache[argtys_extra] = (sig, result, typemap, calltypes)
return sig
def copy_ir_with_calltypes(self, ir, calltypes):
"""
Create a copy of a given IR along with its calltype information.
We need a copy of the calltypes because copy propagation applied
to the copied IR will change the calltypes and make subsequent
uses of the original IR invalid.
"""
copy_calltypes = {}
kernel_copy = ir.copy()
kernel_copy.blocks = {}
# For each block...
for (block_label, block) in ir.blocks.items():
new_block = copy.deepcopy(ir.blocks[block_label])
new_block.body = []
# For each statement in each block...
for stmt in ir.blocks[block_label].body:
# Copy the statement to the new copy of the kernel
# and if the original statement is in the original
# calltypes then add the type associated with this
# statement to the calltypes copy.
scopy = copy.deepcopy(stmt)
new_block.body.append(scopy)
if stmt in calltypes:
copy_calltypes[scopy] = calltypes[stmt]
kernel_copy.blocks[block_label] = new_block
return (kernel_copy, copy_calltypes)
def _stencil_wrapper(self, result, sigret, return_type, typemap, calltypes, *args):
# Overall approach:
# 1) Construct a string containing a function definition for the stencil function
# that will execute the stencil kernel. This function definition includes a
# unique stencil function name, the parameters to the stencil kernel, loop
# nests across the dimensions of the input array. Those loop nests use the
# computed stencil kernel size so as not to try to compute elements where
# elements outside the bounds of the input array would be needed.
# 2) The but of the loop nest in this new function is a special sentinel
# assignment.
# 3) Get the IR of this new function.
# 4) Split the block containing the sentinel assignment and remove the sentinel
# assignment. Insert the stencil kernel IR into the stencil function IR
# after label and variable renaming of the stencil kernel IR to prevent
# conflicts with the stencil function IR.
# 5) Compile the combined stencil function IR + stencil kernel IR into existence.
# Copy the kernel so that our changes for this callsite
# won't effect other callsites.
(kernel_copy, copy_calltypes) = self.copy_ir_with_calltypes(
self.kernel_ir, calltypes)
# The stencil kernel body becomes the body of a loop, for which args aren't needed.
ir_utils.remove_args(kernel_copy.blocks)
first_arg = kernel_copy.arg_names[0]
in_cps, out_cps = ir_utils.copy_propagate(kernel_copy.blocks, typemap)
name_var_table = ir_utils.get_name_var_table(kernel_copy.blocks)
ir_utils.apply_copy_propagate(
kernel_copy.blocks,
in_cps,
name_var_table,
typemap,
copy_calltypes)
if "out" in name_var_table:
raise NumbaValueError("Cannot use the reserved word 'out' in stencil kernels.")
sentinel_name = ir_utils.get_unused_var_name("__sentinel__", name_var_table)
if config.DEBUG_ARRAY_OPT >= 1:
print("name_var_table", name_var_table, sentinel_name)
the_array = args[0]
if config.DEBUG_ARRAY_OPT >= 1:
print("_stencil_wrapper", return_type, return_type.dtype,
type(return_type.dtype), args)
ir_utils.dump_blocks(kernel_copy.blocks)
# We generate a Numba function to execute this stencil and here
# create the unique name of this function.
stencil_func_name = "__numba_stencil_%s_%s" % (
hex(id(the_array)).replace("-", "_"),
self.id)
# We will put a loop nest in the generated function for each
# dimension in the input array. Here we create the name for
# the index variable for each dimension. index0, index1, ...
index_vars = []
for i in range(the_array.ndim):
index_var_name = ir_utils.get_unused_var_name("index" + str(i),
name_var_table)
index_vars += [index_var_name]
# Create extra signature for out and neighborhood.
out_name = ir_utils.get_unused_var_name("out", name_var_table)
neighborhood_name = ir_utils.get_unused_var_name("neighborhood",
name_var_table)
sig_extra = ""
if result is not None:
sig_extra += ", {}=None".format(out_name)
if "neighborhood" in dict(self.kws):
sig_extra += ", {}=None".format(neighborhood_name)
# Get a list of the standard indexed array names.
standard_indexed = self.options.get("standard_indexing", [])
if first_arg in standard_indexed:
raise NumbaValueError("The first argument to a stencil kernel must "
"use relative indexing, not standard indexing.")
if len(set(standard_indexed) - set(kernel_copy.arg_names)) != 0:
raise NumbaValueError("Standard indexing requested for an array name "
"not present in the stencil kernel definition.")
# Add index variables to getitems in the IR to transition the accesses
# in the kernel from relative to regular Python indexing. Returns the
# computed size of the stencil kernel and a list of the relatively indexed
# arrays.
kernel_size, relatively_indexed = self.add_indices_to_kernel(
kernel_copy, index_vars, the_array.ndim,
self.neighborhood, standard_indexed, typemap, copy_calltypes)
if self.neighborhood is None:
self.neighborhood = kernel_size
if config.DEBUG_ARRAY_OPT >= 1:
print("After add_indices_to_kernel")
ir_utils.dump_blocks(kernel_copy.blocks)
# The return in the stencil kernel becomes a setitem for that
# particular point in the iteration space.
ret_blocks = self.replace_return_with_setitem(kernel_copy.blocks,
index_vars, out_name)
if config.DEBUG_ARRAY_OPT >= 1:
print("After replace_return_with_setitem", ret_blocks)
ir_utils.dump_blocks(kernel_copy.blocks)
# Start to form the new function to execute the stencil kernel.
func_text = "def {}({}{}):\n".format(stencil_func_name,
",".join(kernel_copy.arg_names), sig_extra)
# Get loop ranges for each dimension, which could be either int
# or variable. In the latter case we'll use the extra neighborhood
# argument to the function.
ranges = []
for i in range(the_array.ndim):
if isinstance(kernel_size[i][0], int):
lo = kernel_size[i][0]
hi = kernel_size[i][1]
else:
lo = "{}[{}][0]".format(neighborhood_name, i)
hi = "{}[{}][1]".format(neighborhood_name, i)
ranges.append((lo, hi))
# If there are more than one relatively indexed arrays, add a call to
# a function that will raise an error if any of the relatively indexed
# arrays are of different size than the first input array.
if len(relatively_indexed) > 1:
func_text += " raise_if_incompatible_array_sizes(" + first_arg
for other_array in relatively_indexed:
if other_array != first_arg:
func_text += "," + other_array
func_text += ")\n"
# Get the shape of the first input array.
shape_name = ir_utils.get_unused_var_name("full_shape", name_var_table)
func_text += " {} = {}.shape\n".format(shape_name, first_arg)
# Converts cval to a string constant
def cval_as_str(cval):
if not np.isfinite(cval):
# See if this is a string-repr numerical const, issue #7286
if np.isnan(cval):
return "np.nan"
elif np.isinf(cval):
if cval < 0:
return "-np.inf"
else:
return "np.inf"
else:
return str(cval)
# If we have to allocate the output array (the out argument was not used)
# then us numpy.full if the user specified a cval stencil decorator option
# or np.zeros if they didn't to allocate the array.
if result is None:
return_type_name = numpy_support.as_dtype(
return_type.dtype).type.__name__
if "cval" in self.options:
cval = self.options["cval"]
if return_type.dtype != typing.typeof.typeof(cval):
msg = "cval type does not match stencil return type."
raise NumbaValueError(msg)
out_init ="{} = np.full({}, {}, dtype=np.{})\n".format(
out_name, shape_name, cval_as_str(cval),
return_type_name)
else:
out_init ="{} = np.zeros({}, dtype=np.{})\n".format(
out_name, shape_name, return_type_name)
func_text += " " + out_init
else: # result is present, if cval is set then use it
if "cval" in self.options:
cval = self.options["cval"]
cval_ty = typing.typeof.typeof(cval)
if not self._typingctx.can_convert(cval_ty, return_type.dtype):
msg = "cval type does not match stencil return type."
raise NumbaValueError(msg)
out_init = "{}[:] = {}\n".format(out_name, cval_as_str(cval))
func_text += " " + out_init
offset = 1
# Add the loop nests to the new function.
for i in range(the_array.ndim):
for j in range(offset):
func_text += " "
# ranges[i][0] is the minimum index used in the i'th dimension
# but minimum's greater than 0 don't preclude any entry in the array.
# So, take the minimum of 0 and the minimum index found in the kernel
# and this will be a negative number (potentially -0). Then, we do
# unary - on that to get the positive offset in this dimension whose
# use is precluded.
# ranges[i][1] is the maximum of 0 and the observed maximum index
# in this dimension because negative maximums would not cause us to
# preclude any entry in the array from being used.
func_text += ("for {} in range(-min(0,{}),"
"{}[{}]-max(0,{})):\n").format(
index_vars[i],
ranges[i][0],
shape_name,
i,
ranges[i][1])
offset += 1
for j in range(offset):
func_text += " "
# Put a sentinel in the code so we can locate it in the IR. We will
# remove this sentinel assignment and replace it with the IR for the
# stencil kernel body.
func_text += "{} = 0\n".format(sentinel_name)
func_text += " return {}\n".format(out_name)
if config.DEBUG_ARRAY_OPT >= 1:
print("new stencil func text")
print(func_text)
# Force the new stencil function into existence.
exec(func_text) in globals(), locals()
stencil_func = eval(stencil_func_name)
if sigret is not None:
pysig = utils.pysignature(stencil_func)
sigret.pysig = pysig
# Get the IR for the newly created stencil function.
from numba.core import compiler
stencil_ir = compiler.run_frontend(stencil_func)
ir_utils.remove_dels(stencil_ir.blocks)
# rename all variables in stencil_ir afresh
var_table = ir_utils.get_name_var_table(stencil_ir.blocks)
new_var_dict = {}
reserved_names = ([sentinel_name, out_name, neighborhood_name,
shape_name] + kernel_copy.arg_names + index_vars)
for name, var in var_table.items():
if not name in reserved_names:
new_var_dict[name] = ir_utils.mk_unique_var(name)
ir_utils.replace_var_names(stencil_ir.blocks, new_var_dict)
stencil_stub_last_label = max(stencil_ir.blocks.keys()) + 1
# Shift labels in the kernel copy so they are guaranteed unique
# and don't conflict with any labels in the stencil_ir.
kernel_copy.blocks = ir_utils.add_offset_to_labels(
kernel_copy.blocks, stencil_stub_last_label)
new_label = max(kernel_copy.blocks.keys()) + 1
# Adjust ret_blocks to account for addition of the offset.
ret_blocks = [x + stencil_stub_last_label for x in ret_blocks]
if config.DEBUG_ARRAY_OPT >= 1:
print("ret_blocks w/ offsets", ret_blocks, stencil_stub_last_label)
print("before replace sentinel stencil_ir")
ir_utils.dump_blocks(stencil_ir.blocks)
print("before replace sentinel kernel_copy")
ir_utils.dump_blocks(kernel_copy.blocks)
# Search all the block in the stencil outline for the sentinel.
for label, block in stencil_ir.blocks.items():
for i, inst in enumerate(block.body):
if (isinstance( inst, ir.Assign) and
inst.target.name == sentinel_name):
# We found the sentinel assignment.
loc = inst.loc
scope = block.scope
# split block across __sentinel__
# A new block is allocated for the statements prior to the
# sentinel but the new block maintains the current block
# label.
prev_block = ir.Block(scope, loc)
prev_block.body = block.body[:i]
# The current block is used for statements after sentinel.
block.body = block.body[i + 1:]
# But the current block gets a new label.
body_first_label = min(kernel_copy.blocks.keys())
# The previous block jumps to the minimum labelled block of
# the parfor body.
prev_block.append(ir.Jump(body_first_label, loc))
# Add all the parfor loop body blocks to the gufunc
# function's IR.
for (l, b) in kernel_copy.blocks.items():
stencil_ir.blocks[l] = b
stencil_ir.blocks[new_label] = block
stencil_ir.blocks[label] = prev_block
# Add a jump from all the blocks that previously contained
# a return in the stencil kernel to the block
# containing statements after the sentinel.
for ret_block in ret_blocks:
stencil_ir.blocks[ret_block].append(
ir.Jump(new_label, loc))
break
else:
continue
break
stencil_ir.blocks = ir_utils.rename_labels(stencil_ir.blocks)
ir_utils.remove_dels(stencil_ir.blocks)
assert(isinstance(the_array, types.Type))
array_types = args
new_stencil_param_types = list(array_types)
if config.DEBUG_ARRAY_OPT >= 1:
print("new_stencil_param_types", new_stencil_param_types)
ir_utils.dump_blocks(stencil_ir.blocks)
# Compile the combined stencil function with the replaced loop
# body in it.
ir_utils.fixup_var_define_in_scope(stencil_ir.blocks)
new_func = compiler.compile_ir(
self._typingctx,
self._targetctx,
stencil_ir,
new_stencil_param_types,
None,
compiler.DEFAULT_FLAGS,
{})
return new_func
def __call__(self, *args, **kwargs):
if (self.neighborhood is not None and
len(self.neighborhood) != args[0].ndim):
raise ValueError("{} dimensional neighborhood specified for {} "
"dimensional input array".format(
len(self.neighborhood), args[0].ndim))
if 'out' in kwargs:
result = kwargs['out']
rdtype = result.dtype
rttype = numpy_support.from_dtype(rdtype)
result_type = types.npytypes.Array(rttype, result.ndim,
numpy_support.map_layout(result))
array_types = tuple([typing.typeof.typeof(x) for x in args])
array_types_full = tuple([typing.typeof.typeof(x) for x in args] +
[result_type])
else:
result = None
array_types = tuple([typing.typeof.typeof(x) for x in args])
array_types_full = array_types
if config.DEBUG_ARRAY_OPT >= 1:
print("__call__", array_types, args, kwargs)
(real_ret, typemap, calltypes) = self.get_return_type(array_types)
new_func = self._stencil_wrapper(result, None, real_ret, typemap,
calltypes, *array_types_full)
if result is None:
return new_func.entry_point(*args)
else:
return new_func.entry_point(*(args+(result,)))
def stencil(func_or_mode='constant', **options):
# called on function without specifying mode style
if not isinstance(func_or_mode, str):
mode = 'constant' # default style
func = func_or_mode
else:
mode = func_or_mode
func = None
for option in options:
if option not in ["cval", "standard_indexing", "neighborhood"]:
raise ValueError("Unknown stencil option " + option)
wrapper = _stencil(mode, options)
if func is not None:
return wrapper(func)
return wrapper
def _stencil(mode, options):
if mode != 'constant':
raise ValueError("Unsupported mode style " + mode)
def decorated(func):
from numba.core import compiler
kernel_ir = compiler.run_frontend(func)
return StencilFunc(kernel_ir, mode, options)
return decorated
@lower_builtin(stencil)
def stencil_dummy_lower(context, builder, sig, args):
"lowering for dummy stencil calls"
return lir.Constant(lir.IntType(types.intp.bitwidth), 0)
| 47.849579
| 141
| 0.554133
| 4,589
| 39,763
| 4.605143
| 0.128786
| 0.011593
| 0.010789
| 0.009937
| 0.301472
| 0.238253
| 0.210997
| 0.166138
| 0.147447
| 0.118772
| 0
| 0.00355
| 0.369489
| 39,763
| 830
| 142
| 47.907229
| 0.839376
| 0.192968
| 0
| 0.254386
| 0
| 0
| 0.075153
| 0.006234
| 0
| 0
| 0
| 0
| 0.003509
| 1
| 0.033333
| false
| 0.007018
| 0.026316
| 0
| 0.101754
| 0.024561
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c30fc13cf631ce207921b9c3acc713c3fb36b5f
| 3,754
|
py
|
Python
|
examples/bicycle/bicycle_dynamics.py
|
lujieyang/irs_lqr
|
bc9cade6a3bb2fa2d76bdd5fe453030a7b28700f
|
[
"MIT"
] | 6
|
2021-11-20T19:05:06.000Z
|
2022-01-31T00:10:41.000Z
|
examples/bicycle/bicycle_dynamics.py
|
lujieyang/irs_lqr
|
bc9cade6a3bb2fa2d76bdd5fe453030a7b28700f
|
[
"MIT"
] | 10
|
2021-07-24T19:50:36.000Z
|
2021-11-20T19:06:40.000Z
|
examples/bicycle/bicycle_dynamics.py
|
lujieyang/irs_lqr
|
bc9cade6a3bb2fa2d76bdd5fe453030a7b28700f
|
[
"MIT"
] | 1
|
2021-12-15T22:09:31.000Z
|
2021-12-15T22:09:31.000Z
|
import numpy as np
import pydrake.symbolic as ps
import torch
import time
from irs_lqr.dynamical_system import DynamicalSystem
class BicycleDynamics(DynamicalSystem):
def __init__(self, h):
super().__init__()
"""
x = [x pos, y pos, heading, speed, steering_angle]
u = [acceleration, steering_velocity]
"""
self.h = h
self.dim_x = 5
self.dim_u = 2
"""Jacobian computations"""
self.x_sym = np.array([ps.Variable("x_{}".format(i)) for i in range(self.dim_x)])
self.u_sym = np.array([ps.Variable("u_{}".format(i)) for i in range(self.dim_u)])
self.f_sym = self.dynamics_sym(self.x_sym, self.u_sym)
self.jacobian_xu_sym = ps.Jacobian(self.f_sym, np.hstack((self.x_sym, self.u_sym)))
def dynamics_sym(self, x, u):
"""
Symbolic expression for dynamics. Used to compute
linearizations of the system.
x (np.array, dim: n): state
u (np.array, dim: m): action
"""
heading = x[2]
v = x[3]
steer = x[4]
dxdt = np.array([
v * ps.cos(heading),
v * ps.sin(heading),
v * ps.tan(steer),
u[0],
u[1]
])
x_new = x + self.h * dxdt
return x_new
def dynamics(self, x, u):
"""
Numeric expression for dynamics.
x (np.array, dim: n): state
u (np.array, dim: m): action
"""
heading = x[2]
v = x[3]
steer = x[4]
dxdt = np.array([
v * np.cos(heading),
v * np.sin(heading),
v * np.tan(steer),
u[0],
u[1]
])
x_new = x + self.h * dxdt
return x_new
def dynamics_batch(self, x, u):
"""
Batch dynamics. Uses pytorch for
-args:
x (np.array, dim: B x n): batched state
u (np.array, dim: B x m): batched input
-returns:
xnext (np.array, dim: B x n): batched next state
"""
heading = x[:,2]
v = x[:,3]
steer = x[:,4]
dxdt = np.vstack((
v * np.cos(heading),
v * np.sin(heading),
v * np.tan(steer),
u[:,0],
u[:,1]
)).transpose()
x_new = x + self.h * dxdt
return x_new
def dynamics_batch_torch(self, x, u):
"""
Batch dynamics. Uses pytorch for
-args:
x (np.array, dim: B x n): batched state
u (np.array, dim: B x m): batched input
-returns:
xnext (np.array, dim: B x n): batched next state
"""
x = torch.Tensor(x).cuda()
u = torch.Tensor(u).cuda()
heading = x[:,2]
v = x[:,3]
steer = x[:,4]
dxdt = torch.vstack((
v * torch.cos(heading),
v * torch.sin(heading),
v * torch.tan(steer),
u[:,0],
u[:,1]
)).T
x_new = x + self.h * dxdt
return x_new
def jacobian_xu(self, x, u):
"""
Recoever linearized dynamics dfdx as a function of x, u
"""
env = {self.x_sym[i]: x[i] for i in range(self.dim_x)}
env.update({self.u_sym[i]: u[i] for i in range(self.dim_u)})
f_x = ps.Evaluate(self.jacobian_xu_sym, env)
return f_x
def jacobian_xu_batch(self, x, u):
"""
Recoever linearized dynamics dfd(xu) as a function of x, u
"""
dxdu_batch = np.zeros((
x.shape[0], x.shape[1], x.shape[1] + u.shape[1]))
for i in range(x.shape[0]):
dxdu_batch[i] = self.jacobian_xu(x[i], u[i])
return dxdu_batch
| 28.225564
| 91
| 0.483751
| 522
| 3,754
| 3.371648
| 0.191571
| 0.055682
| 0.056818
| 0.0375
| 0.536932
| 0.514205
| 0.435795
| 0.435795
| 0.383523
| 0.383523
| 0
| 0.011593
| 0.379595
| 3,754
| 132
| 92
| 28.439394
| 0.744096
| 0.195258
| 0
| 0.475
| 0
| 0
| 0.003059
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0875
| false
| 0
| 0.0625
| 0
| 0.2375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c32dcda5e8a9e2b82a81dd52550421a3c5cdcea
| 13,265
|
py
|
Python
|
samples/COVServer.py
|
noelli/bacpypes
|
c2f4d753ed86bc0357823e718e7ff16c05f06850
|
[
"MIT"
] | null | null | null |
samples/COVServer.py
|
noelli/bacpypes
|
c2f4d753ed86bc0357823e718e7ff16c05f06850
|
[
"MIT"
] | null | null | null |
samples/COVServer.py
|
noelli/bacpypes
|
c2f4d753ed86bc0357823e718e7ff16c05f06850
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""
This sample application is a server that supports COV notification services.
The console accepts commands that change the properties of an object that
triggers the notifications.
"""
import time
from threading import Thread
from bacpypes.debugging import bacpypes_debugging, ModuleLogger
from bacpypes.consolelogging import ConfigArgumentParser
from bacpypes.consolecmd import ConsoleCmd
from bacpypes.core import run, deferred, enable_sleeping
from bacpypes.task import RecurringTask
from bacpypes.app import BIPSimpleApplication
from bacpypes.object import AnalogValueObject, BinaryValueObject
from bacpypes.local.device import LocalDeviceObject
from bacpypes.service.cov import ChangeOfValueServices
# some debugging
_debug = 0
_log = ModuleLogger(globals())
# test globals
test_av = None
test_bv = None
test_application = None
#
# SubscribeCOVApplication
#
@bacpypes_debugging
class SubscribeCOVApplication(BIPSimpleApplication, ChangeOfValueServices):
pass
#
# COVConsoleCmd
#
@bacpypes_debugging
class COVConsoleCmd(ConsoleCmd):
def do_status(self, args):
"""status"""
args = args.split()
if _debug: COVConsoleCmd._debug("do_status %r", args)
global test_application
# dump from the COV detections dict
for obj_ref, cov_detection in test_application.cov_detections.items():
print("{} {}".format(obj_ref.objectIdentifier, obj_ref))
for cov_subscription in cov_detection.cov_subscriptions:
print(" {} proc_id={} confirmed={} lifetime={}".format(
cov_subscription.client_addr,
cov_subscription.proc_id,
cov_subscription.confirmed,
cov_subscription.lifetime,
))
def do_trigger(self, args):
"""trigger object_name"""
args = args.split()
if _debug: COVConsoleCmd._debug("do_trigger %r", args)
global test_application
if not args:
print("object name required")
return
obj = test_application.get_object_name(args[0])
if not obj:
print("no such object")
return
# get the detection algorithm object
cov_detection = test_application.cov_detections.get(obj, None)
if (not cov_detection) or (len(cov_detection.cov_subscriptions) == 0):
print("no subscriptions for that object")
return
# tell it to send out notifications
cov_detection.send_cov_notifications()
def do_set(self, args):
"""set object_name [ . ] property_name [ = ] value"""
args = args.split()
if _debug: COVConsoleCmd._debug("do_set %r", args)
global test_application
try:
object_name = args.pop(0)
if '.' in object_name:
object_name, property_name = object_name.split('.')
else:
property_name = args.pop(0)
if _debug: COVConsoleCmd._debug(" - object_name: %r", object_name)
if _debug: COVConsoleCmd._debug(" - property_name: %r", property_name)
obj = test_application.get_object_name(object_name)
if _debug: COVConsoleCmd._debug(" - obj: %r", obj)
if not obj:
raise RuntimeError("object not found: %r" % (object_name,))
datatype = obj.get_datatype(property_name)
if _debug: COVConsoleCmd._debug(" - datatype: %r", datatype)
if not datatype:
raise RuntimeError("not a property: %r" % (property_name,))
# toss the equals
if args[0] == '=':
args.pop(0)
# evaluate the value
value = eval(args.pop(0))
if _debug: COVConsoleCmd._debug(" - raw value: %r", value)
# see if it can be built
obj_value = datatype(value)
if _debug: COVConsoleCmd._debug(" - obj_value: %r", obj_value)
# normalize
value = obj_value.value
if _debug: COVConsoleCmd._debug(" - normalized value: %r", value)
# change the value
setattr(obj, property_name, value)
except IndexError:
print(COVConsoleCmd.do_set.__doc__)
except Exception as err:
print("exception: %s" % (err,))
def do_write(self, args):
"""write object_name [ . ] property [ = ] value"""
args = args.split()
if _debug: COVConsoleCmd._debug("do_set %r", args)
global test_application
try:
object_name = args.pop(0)
if '.' in object_name:
object_name, property_name = object_name.split('.')
else:
property_name = args.pop(0)
if _debug: COVConsoleCmd._debug(" - object_name: %r", object_name)
if _debug: COVConsoleCmd._debug(" - property_name: %r", property_name)
obj = test_application.get_object_name(object_name)
if _debug: COVConsoleCmd._debug(" - obj: %r", obj)
if not obj:
raise RuntimeError("object not found: %r" % (object_name,))
datatype = obj.get_datatype(property_name)
if _debug: COVConsoleCmd._debug(" - datatype: %r", datatype)
if not datatype:
raise RuntimeError("not a property: %r" % (property_name,))
# toss the equals
if args[0] == '=':
args.pop(0)
# evaluate the value
value = eval(args.pop(0))
if _debug: COVConsoleCmd._debug(" - raw value: %r", value)
# see if it can be built
obj_value = datatype(value)
if _debug: COVConsoleCmd._debug(" - obj_value: %r", obj_value)
# normalize
value = obj_value.value
if _debug: COVConsoleCmd._debug(" - normalized value: %r", value)
# pass it along
obj.WriteProperty(property_name, value)
except IndexError:
print(COVConsoleCmd.do_write.__doc__)
except Exception as err:
print("exception: %s" % (err,))
@bacpypes_debugging
class TestAnalogValueTask(RecurringTask):
"""
An instance of this class is created when '--avtask <interval>' is
specified as a command line argument. Every <interval> seconds it
changes the value of the test_av present value.
"""
def __init__(self, interval):
if _debug: TestAnalogValueTask._debug("__init__ %r", interval)
RecurringTask.__init__(self, interval * 1000)
# make a list of test values
self.test_values = list(float(i * 10) for i in range(10))
def process_task(self):
if _debug: TestAnalogValueTask._debug("process_task")
global test_av
# pop the next value
next_value = self.test_values.pop(0)
self.test_values.append(next_value)
if _debug: TestAnalogValueTask._debug(" - next_value: %r", next_value)
# change the point
test_av.presentValue = next_value
@bacpypes_debugging
class TestAnalogValueThread(Thread):
"""
An instance of this class is created when '--avthread <interval>' is
specified as a command line argument. Every <interval> seconds it
changes the value of the test_av present value.
"""
def __init__(self, interval):
if _debug: TestAnalogValueThread._debug("__init__ %r", interval)
Thread.__init__(self)
# runs as a daemon
self.daemon = True
# save the interval
self.interval = interval
# make a list of test values
self.test_values = list(100.0 + float(i * 10) for i in range(10))
def run(self):
if _debug: TestAnalogValueThread._debug("run")
global test_av
while True:
# pop the next value
next_value = self.test_values.pop(0)
self.test_values.append(next_value)
if _debug: TestAnalogValueThread._debug(" - next_value: %r", next_value)
# change the point
test_av.presentValue = next_value
# sleep
time.sleep(self.interval)
@bacpypes_debugging
class TestBinaryValueTask(RecurringTask):
"""
An instance of this class is created when '--bvtask <interval>' is
specified as a command line argument. Every <interval> seconds it
changes the value of the test_bv present value.
"""
def __init__(self, interval):
if _debug: TestBinaryValueTask._debug("__init__ %r", interval)
RecurringTask.__init__(self, interval * 1000)
# save the interval
self.interval = interval
# make a list of test values
self.test_values = [True, False]
def process_task(self):
if _debug: TestBinaryValueTask._debug("process_task")
global test_bv
# pop the next value
next_value = self.test_values.pop(0)
self.test_values.append(next_value)
if _debug: TestBinaryValueTask._debug(" - next_value: %r", next_value)
# change the point
test_bv.presentValue = next_value
@bacpypes_debugging
class TestBinaryValueThread(RecurringTask, Thread):
"""
An instance of this class is created when '--bvthread <interval>' is
specified as a command line argument. Every <interval> seconds it
changes the value of the test_bv present value.
"""
def __init__(self, interval):
if _debug: TestBinaryValueThread._debug("__init__ %r", interval)
Thread.__init__(self)
# runs as a daemon
self.daemon = True
# save the interval
self.interval = interval
# make a list of test values
self.test_values = [True, False]
def run(self):
if _debug: TestBinaryValueThread._debug("run")
global test_bv
while True:
# pop the next value
next_value = self.test_values.pop(0)
self.test_values.append(next_value)
if _debug: TestBinaryValueThread._debug(" - next_value: %r", next_value)
# change the point
test_bv.presentValue = next_value
# sleep
time.sleep(self.interval)
def main():
global test_av, test_bv, test_application
# make a parser
parser = ConfigArgumentParser(description=__doc__)
parser.add_argument("--console",
action="store_true",
default=False,
help="create a console",
)
# analog value task and thread
parser.add_argument("--avtask", type=float,
help="analog value recurring task",
)
parser.add_argument("--avthread", type=float,
help="analog value thread",
)
# analog value task and thread
parser.add_argument("--bvtask", type=float,
help="binary value recurring task",
)
parser.add_argument("--bvthread", type=float,
help="binary value thread",
)
# provide a different spin value
parser.add_argument("--spin", type=float,
help="spin time",
default=1.0,
)
# parse the command line arguments
args = parser.parse_args()
if _debug: _log.debug("initialization")
if _debug: _log.debug(" - args: %r", args)
# make a device object
this_device = LocalDeviceObject(ini=args.ini)
if _debug: _log.debug(" - this_device: %r", this_device)
# make a sample application
test_application = SubscribeCOVApplication(this_device, args.ini.address)
# make an analog value object
test_av = AnalogValueObject(
objectIdentifier=('analogValue', 1),
objectName='av',
presentValue=0.0,
statusFlags=[0, 0, 0, 0],
covIncrement=1.0,
)
_log.debug(" - test_av: %r", test_av)
# add it to the device
test_application.add_object(test_av)
_log.debug(" - object list: %r", this_device.objectList)
# make a binary value object
test_bv = BinaryValueObject(
objectIdentifier=('binaryValue', 1),
objectName='bv',
presentValue='inactive',
statusFlags=[0, 0, 0, 0],
)
_log.debug(" - test_bv: %r", test_bv)
# add it to the device
test_application.add_object(test_bv)
# make a console
if args.console:
test_console = COVConsoleCmd()
_log.debug(" - test_console: %r", test_console)
# enable sleeping will help with threads
enable_sleeping()
# analog value task
if args.avtask:
test_av_task = TestAnalogValueTask(args.avtask)
test_av_task.install_task()
# analog value thread
if args.avthread:
test_av_thread = TestAnalogValueThread(args.avthread)
deferred(test_av_thread.start)
# binary value task
if args.bvtask:
test_bv_task = TestBinaryValueTask(args.bvtask)
test_bv_task.install_task()
# binary value thread
if args.bvthread:
test_bv_thread = TestBinaryValueThread(args.bvthread)
deferred(test_bv_thread.start)
_log.debug("running")
run(args.spin)
_log.debug("fini")
if __name__ == "__main__":
main()
| 30.354691
| 87
| 0.618168
| 1,507
| 13,265
| 5.213006
| 0.147313
| 0.029404
| 0.045825
| 0.057281
| 0.556263
| 0.514511
| 0.490453
| 0.490453
| 0.44972
| 0.39804
| 0
| 0.00562
| 0.289031
| 13,265
| 436
| 88
| 30.424312
| 0.827378
| 0.162307
| 0
| 0.46748
| 0
| 0
| 0.096296
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052846
| false
| 0.004065
| 0.044715
| 0
| 0.134146
| 0.036585
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c330026016ced54e01a326234695f3fe1fb584f
| 5,187
|
py
|
Python
|
fancylit/modeling/yellowbrick_funcs.py
|
rubyruins/fancylit
|
56a7cdfe78edd687a3b318bbbfa534203de1ace8
|
[
"Apache-2.0"
] | null | null | null |
fancylit/modeling/yellowbrick_funcs.py
|
rubyruins/fancylit
|
56a7cdfe78edd687a3b318bbbfa534203de1ace8
|
[
"Apache-2.0"
] | null | null | null |
fancylit/modeling/yellowbrick_funcs.py
|
rubyruins/fancylit
|
56a7cdfe78edd687a3b318bbbfa534203de1ace8
|
[
"Apache-2.0"
] | null | null | null |
import random
import numpy as np
import pandas as pd
import streamlit as st
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import train_test_split
from yellowbrick.classifier import classification_report
from yellowbrick.target import FeatureCorrelation
from yellowbrick.target import ClassBalance
from streamlit_yellowbrick import st_yellowbrick
from typing import Any, List, Tuple
import plotly.express as px
def data_prep(df: pd.DataFrame) -> Tuple[List, List, List, List]:
"""
Purpose:
Prep data for modeling
Args:
df - Pandas dataframe
Returns:
test_features - test set features
train_features - train set feautres
test_target - test set target
train_target - train set target
"""
# Specify the target classes
target_string = st.selectbox("Select Target Column", df.columns)
target = np.array(df[target_string])
# Select Features you want
feature_cols = st.multiselect("Select Modeling Features", df.columns)
# Get all features
features = df[feature_cols]
featurestmp = np.array(features)
feats = []
# find all bad rows
for index, featarr in enumerate(featurestmp):
try:
featarr = featarr.astype(float)
feats.append(featarr)
except Exception as error:
st.error(error)
st.error(featarr)
st.stop()
featuresarr = np.array(feats)
# Split Data
randInt = random.randint(1, 200)
(
test_features,
train_features,
test_target,
train_target,
) = train_test_split(featuresarr, target, test_size=0.75, random_state=randInt)
return (
test_features,
train_features,
test_target,
train_target,
)
def show_classification_report(
df: pd.DataFrame,
) -> None:
"""
Purpose:
Renders a classification_report
Args:
df - Pandas dataframe
Returns:
N/A
"""
# Prep data for model training
(
test_features,
train_features,
test_target,
train_target,
) = data_prep(df)
if st.button("Train Model"):
st.header("Classification Report")
st.markdown(
"The classification report visualizer displays the precision, recall, F1, and support scores for the model. In order to support easier interpretation and problem detection, the report integrates numerical scores with a color-coded heatmap. All heatmaps are in the range (0.0, 1.0) to facilitate easy comparison of classification models across different classification reports."
)
# Instantiate the visualizer
visualizer = classification_report(
GaussianNB(),
train_features,
train_target,
test_features,
test_target,
support=True,
)
# Get the viz
fig = visualizer.fig
ax = visualizer.show()
fig.axes.append(ax)
# show the viz
st.write(fig)
# TODO download model, Download report
# TODO live predictions
def feature_correlation(df: pd.DataFrame) -> None:
"""
Purpose:
Renders a feature correlation graph
Args:
df - Pandas dataframe
Returns:
N/A
"""
target_string = st.selectbox("Select Target Column", df.columns,
key="selectbox-feature-correlation")
residual_cols = [col for col in df.columns if col != target_string and df[col].dtype != "object"]
feature_cols = st.multiselect("Select Modeling Features", residual_cols,
key="multiselect-feature-correlation",
default=residual_cols[:5])
if str(df[target_string].dtype) == "object":
method = 'mutual_info-classification'
else:
type_problem = st.selectbox("Select the type of problem",
['classification', 'regression'])
if type_problem == 'classification':
method = st.selectbox("Select the correlation method",
['mutual_info-classification', 'pearson'])
else:
method = st.selectbox("Select the correlation method",
['mutual_info-regression', 'pearson'])
try:
viz = FeatureCorrelation(method=method,
feature_names=feature_cols,
sort=True)
viz.fit(df[feature_cols], df[target_string])
fig = px.bar(x=viz.scores_, y=viz.features_, title="Feature Correlation")
st.plotly_chart(fig)
except :
st.warning("Verify the type of problem that you select")
def class_balance(df: pd.DataFrame) -> None:
"""
Purpose:
Renders a class balance graph
Args:
df - Pandas dataframe
Returns:
N/A
"""
classes = st.selectbox("Select Class Column", df.columns, index = len(df.columns) - 1)
visualizer = ClassBalance(labels = df[classes].unique())
visualizer.fit(df[classes])
st_yellowbrick(visualizer)
| 30.511765
| 389
| 0.614035
| 571
| 5,187
| 5.467601
| 0.302977
| 0.038437
| 0.032671
| 0.026906
| 0.211403
| 0.202434
| 0.202434
| 0.132607
| 0.065983
| 0
| 0
| 0.003876
| 0.303644
| 5,187
| 170
| 390
| 30.511765
| 0.860465
| 0.144207
| 0
| 0.222222
| 0
| 0.010101
| 0.200374
| 0.03133
| 0
| 0
| 0
| 0.005882
| 0
| 1
| 0.040404
| false
| 0
| 0.121212
| 0
| 0.171717
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c33dde47e4450a45e6aa5280d3a4d98189d8d33
| 14,566
|
py
|
Python
|
info/modules/admin/views.py
|
moonbria/test1
|
05893bd91d416ca4093e4619ede427434fa665cc
|
[
"MIT"
] | null | null | null |
info/modules/admin/views.py
|
moonbria/test1
|
05893bd91d416ca4093e4619ede427434fa665cc
|
[
"MIT"
] | null | null | null |
info/modules/admin/views.py
|
moonbria/test1
|
05893bd91d416ca4093e4619ede427434fa665cc
|
[
"MIT"
] | null | null | null |
from flask import request
import random
import re
from flask import current_app, jsonify
from flask import g
from flask import make_response
from flask import redirect
from flask import render_template
from flask import request
from flask import session
from flask import url_for
import time
from info import constants, db
from info import redis_store
from info.lib.yuntongxun.sms import CCP
from info.utils.captcha.captcha import captcha
from info.utils.image_storage import storage
from info.utils.response_code import RET
from info.modules.passport import passport_blu
from info.models import User, Category, News
from info.modules.profile import profile_blu
from info.utils.common import user_login_data
from datetime import datetime, timedelta
from . import admin_blu
@admin_blu.route("/login", methods=["GET", "POST"])
def admin_login():
if request.method == "GET":
# 去session 中取到指定的值
user_id = session.get("user_id", None)
is_admin = session.get("is_admin", False)
if user_id and is_admin:
return redirect(url_for("admin_index"))
return render_template("admin/login.html")
# 取到登陆的参数
username = request.form.get("username")
password = request.form.get("password")
if not all([username, password]):
return render_template("admin/login.html", errmsg="参数错误")
try:
user = User.query.filter(User.mobile == username).first()
except Exception as e:
current_app.logger.error(e)
return render_template("admin/login.html", errmsg="数据错误")
if not user:
return render_template("admin/login.html", errmsg="用户名错误")
if not user.check_password(password):
return render_template("admin/login.html", errmsg="密码错误")
if not user.is_admin:
return render_template("admin/login.html", errmsg="用户不是管理员")
session["user_id"] = user.id
session["nick_name"] = user.nick_name
session["mobile"] = user.mobile
session["is_admin"] = True
# 跳转到后台管理主页,暂未实现
return redirect(url_for("admin.admin_index"))
@admin_blu.route("/index")
@user_login_data
def admin_index():
user = g.user
return render_template("admin/index.html", user=user.to_dict())
@admin_blu.before_request
def before_request():
# 判断如果不是登陆页面的请求
if not request.url.endswith(url_for("admin.admin_login")):
user_id = session.get("user_id")
is_admin = session.get("is_admin", False)
if not user_id or not is_admin:
# 判断当前是否有用户登陆,或者是否是管理员,如果不是,直接重定向到项目首页
return redirect("/")
@admin_blu.route("/user_count")
def user_count():
# 查询总人数
total_count = 0
try:
total_count = User.query.filter(User.is_admin == False).count()
except Exception as e:
current_app.logger.error(e)
# 查询月新增数
mon_count = 0
try:
now = time.localtime()
mon_begin = "%d-%02d-01" % (now.tm_year, now.tm_mon)
mon_begin_date = datetime.strptime(mon_begin, "%Y-%m-%d")
mon_count = User.query.filter(User.is_admin==False,
User.create_time > mon_begin_date).count()
except Exception as e:
current_app.logger.error(e)
day_count = 0
try:
day_begin = "%d-%02d-%02d" % (now.tm_year, now.tm_mon, now.tm_mday)
day_begin_date = datetime.strptime(day_begin, "%Y-%m-%d")
day_count = User.query.filter(User.is_admin==False,
User.create_time >= day_begin_date).count()
except Exception as e:
current_app.logger.error(e)
# 查询图表信息
# 获取到当天00:00:00时间
now_date = datetime.strptime(datetime.now().strftime("%Y-%m-%d"), "%Y-%m-%d")
print(now_date)
# 定义空数组,保存数据
active_date = list()
active_count = list()
# 依次添加数据,再反转
for i in range(0, 31):
begin_date = now_date - timedelta(days=i)
end_date = now_date - timedelta(days=(i - 1))
active_date.append(begin_date.strftime("%Y-%m-%d"))
count = 0
try:
count = User.query.filter(User.is_admin == False,
User.last_login >= begin_date,
User.last_login < end_date).count()
print(count)
except Exception as e:
current_app.logger.error(e)
active_count.append(count)
active_date.reverse()
active_count.reverse()
data = {"total_count": total_count, "mon_count": mon_count, "day_count": day_count,
"active_date": active_date, "active_count": active_count}
return render_template("admin/user_count.html", data=data)
@admin_blu.route("/user_list")
def user_list():
"""获取用户列表"""
# 获取参数
page = request.args.get("p", 1)
try:
print(page)
page = int(page)
except Exception as e:
current_app.logger.error(e)
page = 1
# 设置变量默认值
users = []
current_page = 1
total_page = 1
#查询数据
try:
paginate = User.query.filter(User.is_admin == False)\
.order_by(User.last_login.desc())\
.paginate(page, constants.ADMIN_NEWS_PAGE_MAX_COUNT, False)
users = paginate.items
current_page = paginate.page
total_page = paginate.pages
except Exception as e:
current_app.logger.error(e)
# 将模型列表转换成字典列表
users_list = []
for user in users:
users_list.append(user.to_admin_dict())
context = {
"total_page": total_page,
"current_page": current_page,
"users": users_list
}
return render_template("admin/user_list.html", data=context)
@admin_blu.route("/news_review")
def news_review():
"""返回待审核新闻列表"""
page = request.args.get("p", 1)
keywords = request.args.get("keywords", "")
try:
page = int(page)
except Exception as e:
current_app.logger.error(e)
page = 1
news_list = list()
current_page = 1
total_page = 1
try:
filters = [News.status != 0]
# 如果有关键词
if keywords:
# 添加关键字检索选项
filters.append(News.title.contains(keywords))
paginate = News.query.filter(*filters)\
.order_by(News.create_time.desc())\
.paginate(page, constants.ADMIN_NEWS_PAGE_MAX_COUNT, False)
news_list = paginate.items
current_page = paginate.page
total_page = paginate.pages
except Exception as e:
current_app.error(e)
news_dict_list = list()
for news in news_list:
news_dict_list.append(news.to_review_dict())
data = {
"total_page": total_page,
"current_page": current_page,
"news_list": news_dict_list
}
return render_template("admin/news_review.html", data=data)
@admin_blu.route("/news_review_detail", methods=["GET", "POST"])
def news_review_detail():
"""新闻审核"""
# 获取新闻id
if request.method == "GET":
news_id = request.args.get("news_id")
if not news_id:
data = {
"errmsg": "未查询到数据"
}
return render_template("admin/news_review_detail.html", data=data)
# 通过id查询新闻
news = None
try:
news = News.query.get(news_id)
except Exception as e:
current_app.logger.error(e)
if not news:
data = {
"errmsg": "未查询到数据"
}
return render_template("admin/news_review_detail.html", data=data)
# 返回数据
data = {
"news": news.to_dict()
}
return render_template("admin/news_review_detail.html", data=data)
# 执行审核操作
# 1. 获取参数
news_id = request.json.get("news_id")
action = request.json.get("action")
#2. 判断参数
if not all([news_id, action]):
return jsonify(errno=RET.PARAMERR, errmsg="参数错误")
if action not in ("accept", "reject"):
return jsonify(errno=RET.PARAMERR, errmsg="参数错误")
news = None
try:
# 3. 查询新闻
news = News.query.get(news_id)
except Exception as e:
current_app.logger.error(e)
if not news:
return jsonify(errno=RET.NODATA, errmsg="未查询到数据")
if action == "accept":
news.status = 0
else:
# 拒绝通过,需要获取原因
reason = request.json.get("reason")
if not reason:
return jsonify(errno=RET.PARAMERR, errmsg="参数错误")
news.reason = reason
news.status = -1
# 保存数据库
try:
db.session.commit()
except Exception as e:
current_app.logger.error(e)
db.session.rollback()
return jsonify(errno=RET.DBERR, errmsg="保存数据失败")
return jsonify(errno=RET.OK, errmsg="操作成功")
@admin_blu.route("/news_edit", methods=["GET", "POST"])
def news_edit():
"""返回新闻列表"""
page = request.args.get("p", "1")
print(page)
a = re.match(r"^\d*", page)
b = re.findall(r"""keywords=(\w*)""", page)
print(b)
page = a.group()
if b != []:
b = b[0]
keywords = b
else:
keywords = None
b = ""
try:
page = int(page)
except Exception as e:
current_app.logger.error(e)
page = 1
news_list = list()
current_page = 1
total_page = 1
try:
filters = list()
# 如果有关键词
if keywords:
# 添加关键词的检索选项
filters.append(News.title.contains(keywords))
# 查询
paginate = News.query.filter(*filters)\
.order_by(News.create_time.desc())\
.paginate(page, constants.ADMIN_NEWS_PAGE_MAX_COUNT, False)
news_list = paginate.items
current_page = paginate.page
total_page = paginate.pages
except Exception as e:
current_app.logger.error(e)
news_dict_list = list()
for news in news_list:
news_dict_list.append(news.to_basic_dict())
data = {
"total_page": total_page,
"current_page": current_page,
"new_list": news_dict_list,
"last_input": b
}
if request.method == "GET":
return render_template("admin/news_edit.html", data=data)
# return jsonify(errno=RET.OK, errmsg="OK")
return render_template("admin/news_edit.html", data=data)
@admin_blu.route("/news_edit_detail", methods=["GET", "POST"])
def news_edit_detail():
"""新闻编辑详情"""
if request.method == "GET":
# 获取参数
news_id = request.args.get("news_id")
if not news_id:
data = {
"errmsg": "没有找到新闻"
}
return render_template("admin/news_edit_detail.html", data=data)
# 查询新闻
news = None
try:
news = News.query.get(news_id)
except Exception as e:
current_app.logger.error(e)
if not news:
data = {
"errmsg": "没有找到新闻"
}
return render_template("admin/news_edit_detail.html", data=data)
categories = Category.query.all()
categories_li = []
for category in categories:
c_dict = category.to_dict()
c_dict["is_selected"] = False
if category.id == News.category_id:
c_dict["is_selected"] = True
categories_li.append(c_dict)
# 移除最新分类
categories_li.pop(0)
data = {
"news": news.to_dict(),
"categories": categories_li
}
return render_template("admin/news_edit_detail.html", data=data)
news_id = request.form.get("news_id")
title = request.form.get("title")
digest= request.form.get("digest")
content = request.form.get("content")
index_image = request.form.get("index-image")
categery_id = request.form.get("category_id")
# 1.1 判断数据是否有值:
if not all([title, digest, content, categery_id]):
return jsonify(errno=RET.PARAMERR, errmsg="参数有误")
print(title, digest, content, categery_id)
news = None
try:
news = News.query.get(news_id)
except Exception as e:
current_app.logger.error(e)
if not news:
return jsonify(errno=RET.NODATA, errmsg="未找到新闻数据")
# 1.2 尝试读取图片
if index_image:
try:
index_image = index_image.read()
except Exception as e:
return jsonify(errno=RET.PARAMERR, errmsg="参数有误")
# 2. 将标题图片上传到七牛
try:
key = storage(index_image)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.THIRDERR, errmsg="上传图片错误")
news.index_image_url = constants.QINIU_DOMIN_PREFIX + key
# 3. 设置相关数据
news.title = title
news.digest = digest
news.content = content
news.category_id = categery_id
# 4. 保存到数据库
try:
db.session.commit()
except Exception as e:
current_app.logger.error(e)
db.session.rollback()
return jsonify(errno=RET.DBERR, errmsg="保存数据失败")
# 5. 返回结果
return jsonify(errno=RET.OK, errmsg="编辑成功")
@admin_blu.route("/news_category")
def get_news_category():
# 获取所有的分类数据
categories = Category.query.all()
# 定义列表保存分类数据
categories_dicts = []
for category in categories:
# 获取字典
cate_dict = category.to_dict()
# 拼接内容
categories_dicts.append(cate_dict)
categories_dicts.pop(0)
# 返回内容
data = {
"categories": categories_dicts
}
return render_template("admin/news_type.html", data=data)
@admin_blu.route("/add_category", methods=["POST"])
def add_category():
"""修改或者添加分类"""
category_id = request.json.get("id")
category_name = request.json.get("name")
print(category_name)
if not category_name:
return jsonify(errno=RET.PARAMERR, errmsg="参数错误")
# 判断是否有分类id
if category_id:
try:
category = Category.query.get(category_id)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg="查询数据失败")
if not category:
return jsonify(errno=RET.NODATA, errmsg="未查询到分类信息")
category.name = category_name
return jsonify(errno=RET.OK, errmsg="保存数据成功")
else:
# 如果没有分类id, 添加分类
try:
new_category = Category()
new_category.id = category_id
new_category.name = category_name
db.session.add(new_category)
db.session.commit()
except Exception as e:
current_app.logger.error(e)
db.session.rollback()
return jsonify(errno=RET.DBERR, errmsg="保存数据失败")
return jsonify(errno=RET.OK, errmsg="保存数据成功")
| 28.729783
| 87
| 0.604696
| 1,823
| 14,566
| 4.653319
| 0.143719
| 0.024755
| 0.042084
| 0.04456
| 0.532712
| 0.492514
| 0.420134
| 0.374396
| 0.336673
| 0.321349
| 0
| 0.004849
| 0.277976
| 14,566
| 506
| 88
| 28.786561
| 0.80175
| 0.035631
| 0
| 0.463351
| 0
| 0
| 0.089477
| 0.015104
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028796
| false
| 0.010471
| 0.062827
| 0
| 0.196335
| 0.018325
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c3438c0b1046ec22f1ab42437a0d08677dfe6f2
| 2,839
|
py
|
Python
|
src/predict_model.py
|
Swati17293/outlet-prediction
|
3c1f41b88d71b5247763bacc9dbc1abf5d0619a2
|
[
"MIT"
] | 1
|
2020-10-28T00:05:31.000Z
|
2020-10-28T00:05:31.000Z
|
src/predict_model.py
|
Swati17293/outlet-prediction
|
3c1f41b88d71b5247763bacc9dbc1abf5d0619a2
|
[
"MIT"
] | null | null | null |
src/predict_model.py
|
Swati17293/outlet-prediction
|
3c1f41b88d71b5247763bacc9dbc1abf5d0619a2
|
[
"MIT"
] | 1
|
2021-12-09T14:36:54.000Z
|
2021-12-09T14:36:54.000Z
|
#Answer Generation
import csv
import os
import numpy as np
from keras.models import *
from keras.models import Model
from keras.preprocessing import text
def load_model():
print('\nLoading model...')
# load json and create model
json_file = open('models/MODEL.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
gate_model = model_from_json(loaded_model_json)
# load weights into new model
gate_model.load_weights('models/MODEL.h5', by_name=True)
return gate_model
train_ans, anslist = [], []
def ans_vec():
anslist = []
dataset = ['Train']
for data in dataset:
f = open('data/raw/' + data + '.csv')
lines = csv.reader(f)
for line in lines:
source_uri = line[4]
anslist.append(source_uri)
f.close()
return anslist
def generate_save_ans():
dic = 3
anslist = ans_vec()
gate_model = load_model()
test_title_feature = np.load('data/vectorized/Test_title.npy')
test_summary_feature = np.load('data/vectorized/Test_summary.npy')
tokenizer_a = text.Tokenizer(num_words=dic+1)
tokenizer_a.fit_on_texts(anslist)
dic_a = tokenizer_a.word_index
ind_a ={value:key for key, value in dic_a.items()}
num_test = len(open('data/raw/Test.csv', 'r').readlines())
ans = gate_model.predict([ test_title_feature, test_summary_feature])
fp = open('reports/Test.ans', 'w')
for h in range(num_test):
i = h
if np.argmax(ans[i][0],axis=0) == 0:
fp.write('indiatimes\n') #Low frequency words are replaced with "indiatimes"
else:
for j in range(dic):
an = np.argmax(ans[i][j],axis=0)
if j != dic-1:
anext = np.argmax(ans[i][j+1],axis=0)
if an != 0 and anext != 0: #Words before and after
if an == anext:
fp.write('') #Delete duplicate words
else:
fp.write(ind_a[an] + ' ')
elif an != 0 and anext == 0:
fp.write(ind_a[an])
elif an == 0 and anext != 0:
fp.write(ind_a[anext])
else:
fp.write('')
else:
if an != 0:
fp.write(ind_a[an] + '\n')
else:
fp.write('\n')
fp.close()
def main():
load_model()
print('\n\nGenerating answers...')
if os.path.exists('reports') == False:
os.mkdir('reports')
if os.path.isfile('reports/Test.ans') == False:
generate_save_ans()
print('\nAnswer generation complete...\n\n')
if __name__ == "__main__":
main()
| 27.038095
| 89
| 0.534343
| 364
| 2,839
| 3.997253
| 0.321429
| 0.038488
| 0.021993
| 0.030241
| 0.125773
| 0.099656
| 0.047423
| 0.047423
| 0.047423
| 0.047423
| 0
| 0.009605
| 0.339908
| 2,839
| 105
| 90
| 27.038095
| 0.766809
| 0.058471
| 0
| 0.093333
| 0
| 0
| 0.105322
| 0.023238
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053333
| false
| 0
| 0.08
| 0
| 0.16
| 0.04
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c353955c991e91d2a8ac820fc6be7fa23bb7348
| 716
|
py
|
Python
|
tools/client.py
|
Alisa1114/yolov4-pytorch-1
|
5dd8768f2eef868c9ee4588818350d4e1b50b98f
|
[
"MIT"
] | null | null | null |
tools/client.py
|
Alisa1114/yolov4-pytorch-1
|
5dd8768f2eef868c9ee4588818350d4e1b50b98f
|
[
"MIT"
] | null | null | null |
tools/client.py
|
Alisa1114/yolov4-pytorch-1
|
5dd8768f2eef868c9ee4588818350d4e1b50b98f
|
[
"MIT"
] | null | null | null |
# -*- coding: UTF-8 -*-
from socket import *
def client():
#實驗室電腦
# serverip='120.126.151.182'
# serverport=8887
#在自己電腦測試
serverip='127.0.0.1'
serverport=8888
client=socket(AF_INET,SOCK_STREAM)
client.connect((serverip,serverport))
address_file = open('tools/address.txt', 'r')
address = address_file.read()
client.send(address.encode())
print(client.recv(1024).decode())
if __name__=='__main__':
client()
# buffer='POST /post HTTP/1.1\r\n'
# buffer+='Content-Type:application/json\r\n'
# buffer+='Body:{\\"StuId\\":\\"410785016 Chao,He-Teng\\"}\r\n'
# buffer+='Address : ' + address + '\r\n'
# buffer+='\r\n'
# print(buffer)
# message = "國立台北大學世界第一:)"
| 25.571429
| 64
| 0.624302
| 94
| 716
| 4.62766
| 0.617021
| 0.022989
| 0.073563
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.070588
| 0.168994
| 716
| 28
| 65
| 25.571429
| 0.660504
| 0.432961
| 0
| 0
| 0
| 0
| 0.088832
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.083333
| 0
| 0.166667
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c35e02888592e1186585689132cd3d10b0f4a6d
| 13,039
|
py
|
Python
|
dapy/models/kuramoto_sivashinsky.py
|
hassaniqbal209/data-assimilation
|
ec52d655395dbed547edf4b4f3df29f017633f1b
|
[
"MIT"
] | 11
|
2020-07-29T07:46:39.000Z
|
2022-03-17T01:28:07.000Z
|
dapy/models/kuramoto_sivashinsky.py
|
hassaniqbal209/data-assimilation
|
ec52d655395dbed547edf4b4f3df29f017633f1b
|
[
"MIT"
] | 1
|
2020-07-14T11:49:17.000Z
|
2020-07-29T07:43:22.000Z
|
dapy/models/kuramoto_sivashinsky.py
|
hassaniqbal209/data-assimilation
|
ec52d655395dbed547edf4b4f3df29f017633f1b
|
[
"MIT"
] | 10
|
2020-07-14T11:34:24.000Z
|
2022-03-07T09:08:12.000Z
|
"""Non-linear SPDE model on a periodic 1D spatial domain for laminar wave fronts.
Based on the Kuramato--Sivashinsky PDE model [1, 2] which exhibits spatio-temporally
chaotic dynamics.
References:
1. Kuramoto and Tsuzuki. Persistent propagation of concentration waves
in dissipative media far from thermal equilibrium.
Progress in Theoretical Physcs, 55 (1976) pp. 356–369.
2. Sivashinsky. Nonlinear analysis of hydrodynamic instability in laminar
flames I. Derivation of basic equations.
Acta Astronomica, 4 (1977) pp. 1177–1206.
"""
from typing import Union, Optional, Sequence, Callable
import numpy as np
from dapy.models.base import AbstractDiagonalGaussianModel
from dapy.models.spatial import SpatiallyExtendedModelMixIn
from dapy.integrators.etdrk4 import FourierETDRK4Integrator
from dapy.models.transforms import (
OneDimensionalFourierTransformedDiagonalGaussianModelMixIn,
fft,
real_array_to_rfft_coeff,
rfft_coeff_to_real_array,
)
class FourierLaminarFlameModel(AbstractDiagonalGaussianModel):
"""Non-linear SPDE model on a periodic 1D spatial domain for laminar flame fronts.
This model class represents the state field by its the Fourier coefficients rather
than values of the state field at the spatial mesh points.
Based on the Kuramato--Sivashinsky PDE model [1, 2] which exhibits spatio-temporally
chaotic dynamics.
The governing stochastic partial differential equation (SPDE) is
dX = -(∂⁴X/∂s⁴ + ∂²X/∂s² + X * ∂X/∂s + γ * X) dt + κ ⊛ dW
where `s` is the spatial coordinate in a periodic domain `[0, S)`, `t` the time
coordinate, `X(s, t)` the state field process, `γ` a coefficient controlling the
degree of damping in the dynamics, `W(s, t)` a space-time white noise process,
`κ(s)` a spatial smoothing kernel and `⊛` indicates circular convolution in the
spatial coordinate.
Using a spectral spatial discretisation, this corresponds to a non-linear system of
stochastic differential equations (SDEs) in the Fourier coefficients X̃ₖ
dX̃ₖ = (ωₖ² - ωₖ⁴ - γ) * X̃ₖ + (i * ωₖ / 2) * DFTₖ(IDFT(X̃)²) + κ̃ₖ * dW̃ₖ
where `W̃ₖ` is a complex-valued Wiener process, `κ̃ₖ` the kth Fourier coefficient of
the smoothing kernel `κ`, `ωₖ = 2 * pi * k / S` the kth spatial frequency and `i`
the imaginary unit.
A Fourier-domain exponential time-differencing integrator with 4th order Runge--
Kutta updates for non-linear terms [3, 4] is used to integrate the deterministic
component of the SDE dynamics and an Euler-Maruyama discretisation used for the
Wiener process increment.
The smoothing kernel Fourier coefficients are assumed to be
κ̃ₖ = σ * exp(-ωₖ² * ℓ²) * √(M / S)
where `σ` is a parameter controlling the amplitude and `ℓ` a parameter controlling
the length scale.
References:
1. Kuramoto and Tsuzuki. Persistent propagation of concentration waves
in dissipative media far from thermal equilibrium.
Progress in Theoretical Physcs, 55 (1976) pp. 356–369.
2. Sivashinsky. Nonlinear analysis of hydrodynamic instability in laminar
flames I. Derivation of basic equations. Acta Astronomica, 4 (1977)
pp. 1177–1206.
3. Kassam, Aly-Khan and Trefethen, Lloyd N.
Fourth-order time-stepping for stiff PDEs.
SIAM Journal on Scientific Computing 26.4 (2005): 1214-1233.
4. Cox, Steven M. and Matthews, Paul C.
Exponential time differencing for stiff systems.
Journal of Computational Physics 176.2 (2002): 430-455.
"""
def __init__(
self,
dim_state: int = 512,
observation_space_indices: Union[slice, Sequence[int]] = slice(4, None, 8),
observation_function: Optional[Callable[[np.ndarray, int], np.ndarray]] = None,
time_step: float = 0.25,
domain_extent: float = 32 * np.pi,
damping_coeff: float = 1.0 / 6,
observation_noise_std: float = 0.5,
initial_state_amplitude: float = 1.0,
state_noise_amplitude: float = 1.0,
state_noise_length_scale: float = 1.0,
num_roots_of_unity_etdrk4_integrator: int = 16,
**kwargs
):
"""
Args:
dim_state: Dimension of state which is equivalent here to number of mesh
points in spatial discretization.
observation_space_indices: Slice or sequence of integers specifying spatial
mesh node indices (indices in to state vector) corresponding to
observation points.
observation_function: Function to apply to subsampled state field to compute
mean of observation(s) given state(s) at a given time index. Defaults to
identity function in first argument.
time_step: Integrator time step.
domain_extent: Extent (size) of spatial domain.
damping_coeff: Coefficient (`γ` in description above) controlling degree of
damping in dynamics.
observation_noise_std: Standard deviation of additive Gaussian noise in
observations. Either a scalar or array of shape `(dim_observation,)`.
Noise in each dimension assumed to be independent i.e. a diagonal noise
covariance.
initial_state_amplitude: Amplitude scale parameter for initial random
state field. Larger values correspond to larger magnitude values for the
initial state.
state_noise_amplitude: Amplitude scale parameter for additive state noise
in model dynamics. Larger values correspond to larger magnitude
additive noise in the state field.
state_noise_length_scale: Length scale parameter for smoothed noise used to
generate initial state and additive state noise fields. Larger values
correspond to smoother fields.
num_roots_of_unity_etdrk4_integrator: Number of roots of unity to use in
approximating contour integrals in exponential time-differencing plus
fourth-order Runge Kutta integrator.
"""
assert dim_state % 2 == 0, "State dimension `dim_state` must be even"
self.time_step = time_step
self.observation_space_indices = observation_space_indices
self.observation_function = observation_function
spatial_freqs = np.arange(dim_state // 2 + 1) * 2 * np.pi / domain_extent
spatial_freqs_sq = spatial_freqs ** 2
spatial_freqs[dim_state // 2] = 0
state_noise_kernel = (
(time_step) ** 0.5
* state_noise_amplitude
* np.exp(-0.5 * spatial_freqs_sq * state_noise_length_scale ** 2)
* (dim_state / domain_extent) ** 0.5
)
state_noise_std = rfft_coeff_to_real_array(
state_noise_kernel + 1j * state_noise_kernel, False
)
initial_state_kernel = (
initial_state_amplitude
* np.exp(-0.5 * spatial_freqs_sq * state_noise_length_scale ** 2)
* (dim_state / domain_extent) ** 0.5
)
initial_state_std = rfft_coeff_to_real_array(
initial_state_kernel + 1j * initial_state_kernel, False
)
def linear_operator(freqs, freqs_sq):
return freqs_sq - freqs_sq ** 2 - damping_coeff
def nonlinear_operator(v, freqs, freqs_sq):
return (
-0.5j * freqs * fft.rfft(fft.irfft(v, norm="ortho") ** 2, norm="ortho")
)
self.integrator = FourierETDRK4Integrator(
linear_operator=linear_operator,
nonlinear_operator=nonlinear_operator,
num_mesh_point=dim_state,
domain_size=domain_extent,
time_step=time_step,
num_roots_of_unity=num_roots_of_unity_etdrk4_integrator,
)
if observation_function is None:
dim_observation = np.zeros(dim_state)[observation_space_indices].shape[0]
else:
dim_observation = observation_function(
np.zeros(dim_state)[observation_space_indices], 0
).shape[0]
super().__init__(
dim_state=dim_state,
dim_observation=dim_observation,
initial_state_std=initial_state_std,
initial_state_mean=np.zeros(dim_state),
state_noise_std=state_noise_std,
observation_noise_std=observation_noise_std,
**kwargs
)
def _next_state_mean(self, states: np.ndarray, t: int) -> np.ndarray:
return rfft_coeff_to_real_array(
self.integrator.step(real_array_to_rfft_coeff(states))
)
def _observation_mean(self, states: np.ndarray, t: int) -> np.ndarray:
subsampled_states = fft.irfft(real_array_to_rfft_coeff(states), norm="ortho")[
..., self.observation_space_indices
]
if self.observation_function is None:
return subsampled_states
else:
return self.observation_function(subsampled_states, t)
class SpatialLaminarFlameModel(
SpatiallyExtendedModelMixIn,
OneDimensionalFourierTransformedDiagonalGaussianModelMixIn,
FourierLaminarFlameModel,
):
"""Non-linear SPDE model on a periodic 1D spatial domain for laminar flame fronts.
This model class represents the state field by its values at the spatial mesh points
rather than the corresponding Fourier coefficients. For more details see the
docstring of `FourierLaminarFlameModel`.
"""
def __init__(
self,
dim_state: int = 512,
observation_space_indices: Union[slice, Sequence[int]] = slice(4, None, 8),
observation_function: Optional[Callable[[np.ndarray, int], np.ndarray]] = None,
time_step: float = 0.25,
domain_extent: float = 32 * np.pi,
damping_coeff: float = 1.0 / 6,
observation_noise_std: float = 0.5,
initial_state_amplitude: float = 1.0,
state_noise_amplitude: float = 1.0,
state_noise_length_scale: float = 1.0,
num_roots_of_unity_etdrk4_integrator: int = 16,
):
"""
Args:
dim_state: Dimension of state which is equivalent here to number of mesh
points in spatial discretization.
observation_space_indices: Slice or sequence of integers specifying spatial
mesh node indices (indices in to state vector) corresponding to
observation points.
observation_function: Function to apply to subsampled state field to compute
mean of observation(s) given state(s) at a given time index. Defaults to
identity function in first argument.
time_step: Integrator time step.
domain_extent: Extent (size) of spatial domain.
damping_coeff: Coefficient (`γ` in description above) controlling degree of
damping in dynamics.
observation_noise_std: Standard deviation of additive Gaussian noise in
observations. Either a scalar or array of shape `(dim_observation,)`.
Noise in each dimension assumed to be independent i.e. a diagonal noise
covariance.
initial_state_amplitude: Amplitude scale parameter for initial random
state field. Larger values correspond to larger magnitude values for the
initial state.
state_noise_amplitude: Amplitude scale parameter for additive state noise
in model dynamics. Larger values correspond to larger magnitude
additive noise in the state field.
state_noise_length_scale: Length scale parameter for smoothed noise used to
generate initial state and additive state noise fields. Larger values
correspond to smoother fields.
num_roots_of_unity_etdrk4_integrator: Number of roots of unity to use in
approximating contour integrals in exponential time-differencing plus
fourth-order Runge Kutta integrator.
"""
super().__init__(
dim_state=dim_state,
observation_space_indices=observation_space_indices,
observation_function=observation_function,
time_step=time_step,
domain_extent=domain_extent,
damping_coeff=damping_coeff,
observation_noise_std=observation_noise_std,
initial_state_amplitude=initial_state_amplitude,
state_noise_amplitude=state_noise_amplitude,
state_noise_length_scale=state_noise_length_scale,
num_roots_of_unity_etdrk4_integrator=num_roots_of_unity_etdrk4_integrator,
mesh_shape=(dim_state,),
domain_extents=(domain_extent,),
domain_is_periodic=True,
observation_node_indices=observation_space_indices,
)
| 47.072202
| 88
| 0.666692
| 1,616
| 13,039
| 5.206064
| 0.209158
| 0.029716
| 0.032806
| 0.019969
| 0.616427
| 0.594556
| 0.539166
| 0.530132
| 0.530132
| 0.521574
| 0
| 0.02024
| 0.27249
| 13,039
| 276
| 89
| 47.242754
| 0.864326
| 0.515914
| 0
| 0.367188
| 0
| 0
| 0.009774
| 0
| 0
| 0
| 0
| 0
| 0.007813
| 1
| 0.046875
| false
| 0
| 0.046875
| 0.023438
| 0.148438
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c3bcf54b28a72322eb20b3cefe8c6d28943d5e4
| 1,030
|
py
|
Python
|
demos/restful-users/index.py
|
karldoenitz/karlooper
|
2e1df83ed1ec9b343cdd930162a4de7ecd149c04
|
[
"MIT"
] | 161
|
2016-05-17T12:44:07.000Z
|
2020-07-30T02:18:34.000Z
|
demos/restful-users/index.py
|
karldoenitz/karlooper
|
2e1df83ed1ec9b343cdd930162a4de7ecd149c04
|
[
"MIT"
] | 6
|
2016-08-29T01:40:26.000Z
|
2017-12-29T09:20:41.000Z
|
demos/restful-users/index.py
|
karldoenitz/karlooper
|
2e1df83ed1ec9b343cdd930162a4de7ecd149c04
|
[
"MIT"
] | 16
|
2016-06-27T02:56:54.000Z
|
2019-08-08T08:18:48.000Z
|
# -*-encoding:utf-8-*-
import os
from karlooper.web.application import Application
from karlooper.web.request import Request
class UsersHandler(Request):
def get(self):
return self.render("/user-page.html")
class UserInfoHandler(Request):
def post(self):
print(self.get_http_request_message())
size = self.get_parameter("user_size", 0)
size = int(size)
user_list = [{"name": "name_%d" % i, "gender": "male", "age": i + 10} for i in range(size)]
result = {
"status": 0,
"message": "OK",
"data": user_list
}
return self.response_as_json(result)
url_mapping = {
"/users": UsersHandler,
"/user-info": UserInfoHandler
}
settings = {
"template": os.getcwd() + "/templates",
"static": os.getcwd() + "/templates",
"log_enable": False,
"debug": True
}
if __name__ == '__main__':
application = Application(url_mapping, settings=settings)
application.listen(port=8080)
application.run()
| 23.409091
| 99
| 0.61165
| 117
| 1,030
| 5.205128
| 0.57265
| 0.042693
| 0.052545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01148
| 0.238835
| 1,030
| 43
| 100
| 23.953488
| 0.765306
| 0.019417
| 0
| 0
| 0
| 0
| 0.138889
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.09375
| 0.03125
| 0.28125
| 0.03125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c3c84ef8550fb8c1fe9332f31bf0fbd72087616
| 1,206
|
py
|
Python
|
cli/waiter/subcommands/kill.py
|
geofft/waiter
|
0e10cd497c2c679ea43231866d9f803c3fed5d77
|
[
"Apache-2.0"
] | null | null | null |
cli/waiter/subcommands/kill.py
|
geofft/waiter
|
0e10cd497c2c679ea43231866d9f803c3fed5d77
|
[
"Apache-2.0"
] | null | null | null |
cli/waiter/subcommands/kill.py
|
geofft/waiter
|
0e10cd497c2c679ea43231866d9f803c3fed5d77
|
[
"Apache-2.0"
] | null | null | null |
from waiter.action import process_kill_request
from waiter.util import guard_no_cluster, check_positive
def kill(clusters, args, _, __):
"""Kills the service(s) using the given token name."""
guard_no_cluster(clusters)
token_name_or_service_id = args.get('token-or-service-id')
is_service_id = args.get('is-service-id', False)
force_flag = args.get('force', False)
timeout_secs = args['timeout']
success = process_kill_request(clusters, token_name_or_service_id, is_service_id, force_flag, timeout_secs)
return 0 if success else 1
def register(add_parser):
"""Adds this sub-command's parser and returns the action function"""
parser = add_parser('kill', help='kill services')
parser.add_argument('token-or-service-id')
parser.add_argument('--force', '-f', help='kill all services, never prompt', dest='force', action='store_true')
parser.add_argument('--service-id', '-s', help='kill by service id instead of token',
dest='is-service-id', action='store_true')
parser.add_argument('--timeout', '-t', help='timeout (in seconds) for kill to complete',
type=check_positive, default=30)
return kill
| 46.384615
| 115
| 0.694859
| 172
| 1,206
| 4.656977
| 0.412791
| 0.11236
| 0.054931
| 0.047441
| 0.191011
| 0.191011
| 0
| 0
| 0
| 0
| 0
| 0.004024
| 0.175788
| 1,206
| 25
| 116
| 48.24
| 0.801811
| 0.09204
| 0
| 0
| 0
| 0
| 0.23893
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0.105263
| 0
| 0.315789
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c3ccdaafeb79fdce0197fde1a5c4f83054573ab
| 3,338
|
py
|
Python
|
a2t/src/a2t.py
|
syeda-khurrath/fabric8-analytics-common
|
421f7e27869c5695ed73b51e6422e097aba00108
|
[
"Apache-2.0"
] | null | null | null |
a2t/src/a2t.py
|
syeda-khurrath/fabric8-analytics-common
|
421f7e27869c5695ed73b51e6422e097aba00108
|
[
"Apache-2.0"
] | 4
|
2019-05-20T08:27:47.000Z
|
2019-05-20T08:29:57.000Z
|
a2t/src/a2t.py
|
codeready-analytics/fabric8-analytics-common
|
a763c5534d601f2f40a0f02c02914c49ea23669d
|
[
"Apache-2.0"
] | 1
|
2020-10-05T21:12:44.000Z
|
2020-10-05T21:12:44.000Z
|
"""The main module of the Analytics API Load Tests tool.
Copyright (c) 2019 Red Hat Inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
import os
from time import time
from fastlog import log
from csv_reader import read_csv_as_dicts
from setup import setup
from cliargs import cli_parser
from component_analysis import ComponentAnalysis
from stack_analysis import StackAnalysis
from test_runner import start_tests
# current version of this tool
VERSION_MAJOR = 1
VERSION_MINOR = 0
def check_api_endpoint(api):
"""Check that some API endpoint is callable."""
log.info("Checking: core API endpoint")
with log.indent():
if not api.is_api_running():
log.error("Fatal: tested system is not available")
sys.exit(1)
else:
log.success("ok")
def check_auth_token(api):
"""Check the authorization token for the core API."""
log.info("Checking: authorization token for the core API")
with log.indent():
if api.check_auth_token_validity():
log.success("ok")
else:
log.error("Fatal: wrong token(?)")
sys.exit(1)
def check_system(api):
"""Check if all system endpoints are available and that tokens are valid."""
# try to access system endpoints
log.info("System check")
with log.indent():
check_api_endpoint(api)
check_auth_token(api)
def show_version():
"""Show A2T version."""
print("A2T version {major}.{minor}".format(major=VERSION_MAJOR, minor=VERSION_MINOR))
def main():
"""Entry point to the Analytics API Load Tests."""
log.setLevel(log.INFO)
cli_arguments = cli_parser.parse_args()
if cli_arguments.version:
show_version()
sys.exit(0)
else:
cfg = setup(cli_arguments)
coreapi_url = os.environ.get('F8A_SERVER_API_URL', None)
component_analysis = ComponentAnalysis(coreapi_url,
cfg["access_token"], cfg["user_key"], True)
stack_analysis = StackAnalysis(coreapi_url,
cfg["access_token"], cfg["user_key"], True)
check_system(component_analysis)
try:
tests = read_csv_as_dicts(cfg["input_file"])
except Exception as e:
log.error("Test description can not be read")
log.error(e)
sys.exit(0)
t1 = time()
tags = cfg["tags"]
start_tests(cfg, tests, tags, component_analysis, stack_analysis)
t2 = time()
log.info("Start time: {}".format(t1))
log.info("End time: {}".format(t2))
log.info("Duration: {}".format(t2 - t1))
if __name__ == "__main__":
# execute only if run as a script
main()
| 30.345455
| 90
| 0.65698
| 459
| 3,338
| 4.640523
| 0.381264
| 0.023005
| 0.01831
| 0.026761
| 0.148357
| 0.09108
| 0.035681
| 0.035681
| 0.035681
| 0
| 0
| 0.007984
| 0.249551
| 3,338
| 109
| 91
| 30.623853
| 0.842315
| 0.304374
| 0
| 0.21875
| 0
| 0
| 0.143357
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.078125
| false
| 0
| 0.15625
| 0
| 0.234375
| 0.015625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c3d2c0aac2c057e54b3e25d8827904204518172
| 3,568
|
py
|
Python
|
riscv_ctg/ctg.py
|
Giri2801/riscv-ctg
|
a90e03f0856bbdd106c3f6d51815af94707e711e
|
[
"BSD-3-Clause"
] | null | null | null |
riscv_ctg/ctg.py
|
Giri2801/riscv-ctg
|
a90e03f0856bbdd106c3f6d51815af94707e711e
|
[
"BSD-3-Clause"
] | null | null | null |
riscv_ctg/ctg.py
|
Giri2801/riscv-ctg
|
a90e03f0856bbdd106c3f6d51815af94707e711e
|
[
"BSD-3-Clause"
] | null | null | null |
# See LICENSE.incore file for details
import os,re
import multiprocessing as mp
import time
import shutil
from riscv_ctg.log import logger
import riscv_ctg.utils as utils
import riscv_ctg.constants as const
from riscv_isac.cgf_normalize import expand_cgf
from riscv_ctg.generator import Generator
from math import *
from riscv_ctg.__init__ import __version__
def create_test(usage_str, node,label,base_isa,max_inst):
global op_template
global ramdomize
global out_dir
global xlen
flen = 0
if 'opcode' not in node:
return
if 'ignore' in node:
logger.info("Ignoring :" + str(label))
if node['ignore']:
return
for opcode in node['opcode']:
op_node=None
if opcode not in op_template:
for op,foo in op_template.items():
if op!='metadata' and foo['std_op'] is not None and opcode==foo['std_op']:
op_node = foo
break
else:
op_node = op_template[opcode]
if op_node is None:
logger.warning("Skipping :" + str(opcode))
return
if xlen not in op_node['xlen']:
logger.warning("Skipping {0} since its not supported in current XLEN:".format(opcode))
return
if 'flen' in op_node:
if '.d' in opcode:
flen = 64
elif '.s' in opcode:
flen = 32
else:
flen = op_node['flen'][0]
#if flen not in op_node['flen']:
# return
fprefix = os.path.join(out_dir,str(label))
logger.info('Generating Test for :' + str(label) +"-" + opcode)
formattype = op_node['formattype']
gen = Generator(formattype,op_node,opcode,randomize,xlen,flen,base_isa)
op_comb = gen.opcomb(node)
val_comb = gen.valcomb(node)
instr_dict = gen.correct_val(gen.testreg(gen.swreg(gen.gen_inst(op_comb, val_comb, node))))
logger.info("Writing tests for :"+str(label))
my_dict = gen.reformat_instr(instr_dict)
gen.write_test(fprefix,node,label,my_dict, op_node, usage_str, max_inst)
def ctg(verbose, out, random ,xlen_arg, cgf_file,num_procs,base_isa, max_inst,list_duplicate):
global op_template
global randomize
global out_dir
global xlen
logger.level(verbose)
logger.info('****** RISC-V Compliance Test Generator {0} *******'.format(__version__ ))
logger.info('Copyright (c) 2020, InCore Semiconductors Pvt. Ltd.')
logger.info('All Rights Reserved.')
logger.info("Copying env folder to Output directory.")
env_dir = os.path.join(out,"env")
if not os.path.exists(env_dir):
shutil.copytree(const.env,env_dir)
xlen = int(xlen_arg)
out_dir = out
randomize = random
mytime = time.asctime(time.gmtime(time.time()) ) + ' GMT'
cgf_argument = ''
for cf in cgf_file:
cgf_argument += '// --cgf {} \\\n'.format(cf)
randomize_argument = ''
if random is True:
randomize_argument = ' \\\n// --randomize'
usage_str = const.usage.safe_substitute(base_isa=base_isa, \
cgf=cgf_argument, version = __version__, time=mytime, \
randomize=randomize_argument,xlen=str(xlen_arg))
op_template = utils.load_yaml(const.template_file)
cgf = expand_cgf(cgf_file,xlen,list_duplicate)
pool = mp.Pool(num_procs)
results = pool.starmap(create_test, [(usage_str, node,label,base_isa,max_inst) for label,node in cgf.items()])
pool.close()
| 37.166667
| 114
| 0.626962
| 483
| 3,568
| 4.428571
| 0.300207
| 0.030856
| 0.01683
| 0.019635
| 0.058906
| 0.038336
| 0.038336
| 0.038336
| 0.038336
| 0.038336
| 0
| 0.00457
| 0.264013
| 3,568
| 95
| 115
| 37.557895
| 0.809977
| 0.0213
| 0
| 0.139535
| 0
| 0
| 0.120734
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023256
| false
| 0
| 0.127907
| 0
| 0.197674
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|