hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d5f72b6bb8de932265e3494ed6520e23b33d2b72
| 705
|
py
|
Python
|
p6e8.py
|
yannickbf-prog/python
|
da4bd2c8668966359b829a8ac2a896afeca2b150
|
[
"MIT"
] | null | null | null |
p6e8.py
|
yannickbf-prog/python
|
da4bd2c8668966359b829a8ac2a896afeca2b150
|
[
"MIT"
] | null | null | null |
p6e8.py
|
yannickbf-prog/python
|
da4bd2c8668966359b829a8ac2a896afeca2b150
|
[
"MIT"
] | null | null | null |
#Yannick p6e8 Escribe un programa que te pida primero un número y luego te pida números hasta que la suma de los números introducidos coincida con el número inicial. El programa termina escribiendo la lista de números.
limite = int(input("Escribe limite:"))
valores = int(input("Escribe un valor:"))
listavalores = []
listavalores.append(valores)
while limite > sum(listavalores):
valores = int(input("Escribe otro valor"))
listavalores.append(valores)
print(f"El limite a superar es {limite}. La lista creada es ", end="")
for i in range(len(listavalores)):
print (listavalores[i], end=" ")
print(f"ya que la suma de estos numeros es {sum(listavalores)}")
| 30.652174
| 219
| 0.704965
| 100
| 705
| 4.97
| 0.51
| 0.04829
| 0.090543
| 0.044266
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003521
| 0.194326
| 705
| 22
| 220
| 32.045455
| 0.871479
| 0.307801
| 0
| 0.181818
| 0
| 0
| 0.340564
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.272727
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5f73b66aea43800edd9e2977d37ade872174872
| 1,574
|
py
|
Python
|
.venv/lib/python3.8/site-packages/cleo/application.py
|
RivtLib/replit01
|
ce1ae18b446a9c844f40e88a51c71fbc45ab3ad7
|
[
"MIT"
] | 1
|
2020-08-07T16:09:57.000Z
|
2020-08-07T16:09:57.000Z
|
.venv/lib/python3.8/site-packages/cleo/application.py
|
RivtLib/replit01
|
ce1ae18b446a9c844f40e88a51c71fbc45ab3ad7
|
[
"MIT"
] | null | null | null |
.venv/lib/python3.8/site-packages/cleo/application.py
|
RivtLib/replit01
|
ce1ae18b446a9c844f40e88a51c71fbc45ab3ad7
|
[
"MIT"
] | null | null | null |
from typing import Optional
from typing import Tuple
from clikit.console_application import ConsoleApplication
from .commands import BaseCommand
from .commands.completions_command import CompletionsCommand
from .config import ApplicationConfig
class Application(ConsoleApplication, object):
"""
An Application is the container for a collection of commands.
This class is optimized for a standard CLI environment.
Usage:
>>> app = Application('myapp', '1.0 (stable)')
>>> app.add(HelpCommand())
>>> app.run()
"""
def __init__(
self, name=None, version=None, complete=True, config=None
): # type: (str, str, bool, Optional[ApplicationConfig]) -> None
if config is None:
config = ApplicationConfig(name, version)
super(Application, self).__init__(config)
if complete:
self.add(CompletionsCommand())
def add_commands(self, *commands): # type: (Tuple[BaseCommand]) -> None
for command in commands:
self.add(command)
def add(self, command): # type: (BaseCommand) -> Application
"""
Adds a command object.
"""
self.add_command(command.config)
command.set_application(self)
return self
def find(self, name): # type: (str) -> BaseCommand
names = name.split(" ")
command = self.get_command(names[0])
for name in names[1:]:
command = command.get_sub_command(name)
return command.config.handler
| 29.148148
| 77
| 0.623888
| 168
| 1,574
| 5.75
| 0.375
| 0.021739
| 0.033126
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003509
| 0.275731
| 1,574
| 53
| 78
| 29.698113
| 0.84386
| 0.250953
| 0
| 0
| 0
| 0
| 0.00094
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.214286
| 0
| 0.464286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5f7c879e6735f223e0344e0abf1f6975431be03
| 1,009
|
py
|
Python
|
watcher/fly.py
|
cog-isa/htm-rl
|
baf5b67a11283d37165bf6a29d6808a234d6d98c
|
[
"MIT"
] | 1
|
2021-12-09T22:09:24.000Z
|
2021-12-09T22:09:24.000Z
|
watcher/fly.py
|
cog-isa/htm-rl
|
baf5b67a11283d37165bf6a29d6808a234d6d98c
|
[
"MIT"
] | null | null | null |
watcher/fly.py
|
cog-isa/htm-rl
|
baf5b67a11283d37165bf6a29d6808a234d6d98c
|
[
"MIT"
] | 1
|
2021-11-18T08:54:20.000Z
|
2021-11-18T08:54:20.000Z
|
from utils.drawer import Drawer
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("name", help="the name of the datafile")
parser.add_argument("--size", help="width,height")
args = parser.parse_args()
if args.size is None:
width, height = 1280, 720
else:
width, height = args.size.split(',')
drawer = Drawer('data/'+args.name, [int(width), int(height)])
while not drawer.window.should_close():
drawer.update()
# the main application loop
while not drawer.window.should_close() and not drawer.window.next and not drawer.window.previous:
drawer.process()
if drawer.window.next and drawer.current + 2 < len(drawer.data_base.keys()): drawer.current = drawer.current + 1
if drawer.window.previous and drawer.current > 0: drawer.current = drawer.current - 1
drawer.window.next = False
drawer.window.previous = False
drawer.window.terminate()
| 36.035714
| 120
| 0.663033
| 130
| 1,009
| 5.038462
| 0.384615
| 0.164886
| 0.091603
| 0.061069
| 0.177099
| 0.094656
| 0
| 0
| 0
| 0
| 0
| 0.013906
| 0.216056
| 1,009
| 27
| 121
| 37.37037
| 0.814159
| 0.024777
| 0
| 0
| 0
| 0
| 0.061162
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.095238
| 0
| 0.095238
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5f85a460ddcb48e089b11f2309816efd46bb61e
| 3,263
|
py
|
Python
|
test/unit/test_structures.py
|
ourobouros/aws-encryption-sdk-python
|
1d0e40de7fef1b1131127a6f8626ef6a60739289
|
[
"Apache-2.0"
] | null | null | null |
test/unit/test_structures.py
|
ourobouros/aws-encryption-sdk-python
|
1d0e40de7fef1b1131127a6f8626ef6a60739289
|
[
"Apache-2.0"
] | 1
|
2019-05-30T22:14:47.000Z
|
2019-05-30T22:14:47.000Z
|
test/unit/test_structures.py
|
ourobouros/aws-encryption-sdk-python
|
1d0e40de7fef1b1131127a6f8626ef6a60739289
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Unit test suite for aws_encryption_sdk.structures"""
import pytest
from aws_encryption_sdk.identifiers import Algorithm, ContentType, ObjectType, SerializationVersion
from aws_encryption_sdk.structures import DataKey, EncryptedDataKey, MasterKeyInfo, MessageHeader, RawDataKey
from .unit_test_utils import all_invalid_kwargs, all_valid_kwargs
pytestmark = [pytest.mark.unit, pytest.mark.local]
VALID_KWARGS = {
MessageHeader: [
dict(
version=SerializationVersion.V1,
type=ObjectType.CUSTOMER_AE_DATA,
algorithm=Algorithm.AES_256_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384,
message_id=b"aosiejfoaiwej",
encryption_context={},
encrypted_data_keys=set([]),
content_type=ContentType.FRAMED_DATA,
content_aad_length=32456,
header_iv_length=32456,
frame_length=234567,
)
],
MasterKeyInfo: [
dict(provider_id="fawnofijawef", key_info="ajsnoiajerofi"),
dict(provider_id=b"fawnofijawef", key_info="ajsnoiajerofi"),
dict(provider_id="fawnofijawef", key_info=b"ajsnoiajerofi"),
dict(provider_id=b"fawnofijawef", key_info=b"ajsnoiajerofi"),
],
RawDataKey: [
dict(key_provider=MasterKeyInfo(provider_id="asjnoa", key_info=b"aosjfoaiwej"), data_key=b"aosijfoewaijf")
],
DataKey: [
dict(
key_provider=MasterKeyInfo(provider_id="asjnoa", key_info=b"aosjfoaiwej"),
data_key=b"oaijefoawiejf",
encrypted_data_key=b"aisofiawjef",
)
],
EncryptedDataKey: [
dict(
key_provider=MasterKeyInfo(provider_id="asjnoa", key_info=b"aosjfoaiwej"), encrypted_data_key=b"aisofiawjef"
)
],
}
@pytest.mark.parametrize("cls, kwargs", all_valid_kwargs(VALID_KWARGS))
def test_attributes_valid_kwargs(cls, kwargs):
cls(**kwargs)
@pytest.mark.parametrize("cls, kwargs", all_invalid_kwargs(VALID_KWARGS))
def test_attributes_invalid_kwargs(cls, kwargs):
with pytest.raises(TypeError):
cls(**kwargs)
@pytest.mark.parametrize(
"kwargs, attribute, expected_value",
(
(dict(provider_id="asfoijwae", key_info=b"oaiejfoeiwja"), "provider_id", "asfoijwae"),
(dict(provider_id=b"asfoijwae", key_info=b"oaiejfoeiwja"), "provider_id", "asfoijwae"),
(dict(provider_id="asfoijwae", key_info="oaiejfoeiwja"), "key_info", b"oaiejfoeiwja"),
(dict(provider_id="asfoijwae", key_info=b"oaiejfoeiwja"), "key_info", b"oaiejfoeiwja"),
),
)
def test_master_key_info_convert(kwargs, attribute, expected_value):
test = MasterKeyInfo(**kwargs)
assert getattr(test, attribute) == expected_value
| 37.505747
| 120
| 0.70426
| 388
| 3,263
| 5.695876
| 0.368557
| 0.044344
| 0.036199
| 0.045249
| 0.423077
| 0.320814
| 0.238009
| 0.217195
| 0.148869
| 0.148869
| 0
| 0.014296
| 0.185412
| 3,263
| 86
| 121
| 37.94186
| 0.817156
| 0.179896
| 0
| 0.163934
| 0
| 0
| 0.162091
| 0
| 0
| 0
| 0
| 0
| 0.016393
| 1
| 0.04918
| false
| 0
| 0.065574
| 0
| 0.114754
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5f884d302908ab9fba8e534f212148aba1c42a3
| 1,745
|
py
|
Python
|
codes/utils/mygraph.py
|
CristianLazoQuispe/Datathon-Interbank-2020
|
54f5d11fe83eb5a8ea8284be13d96e9e12978354
|
[
"MIT"
] | null | null | null |
codes/utils/mygraph.py
|
CristianLazoQuispe/Datathon-Interbank-2020
|
54f5d11fe83eb5a8ea8284be13d96e9e12978354
|
[
"MIT"
] | null | null | null |
codes/utils/mygraph.py
|
CristianLazoQuispe/Datathon-Interbank-2020
|
54f5d11fe83eb5a8ea8284be13d96e9e12978354
|
[
"MIT"
] | null | null | null |
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
path_results = '../results/images/'
# this function receives a dataset with binary target and it will graph a hist of values
def graph_target(data,name="target",figsize=(6,4),title_name=None,color_text="white",save=False,name_file='target_distribution'):
plt.figure(figsize=figsize)
total = float(len(data)) # one person per row
title_name = "Target distribution"+" of "+str(int(total))+" users" if title_name is None else title_name+" of "+str(int(total))+" users"
ax = sns.countplot(x=name, data=data) # for Seaborn version 0.7 and more
for p in ax.patches:
height = p.get_height()
ax.text(p.get_x()+p.get_width()/2.,
height/3,
'{:.2f}%\n{:d}'.format(100*height/total,height),
ha="center",color=color_text,fontweight='bold')#fontsize=10
plt.title(title_name)
plt.show()
if save:
figure = ax.get_figure()
figure.savefig(path_results+name_file+'.png',dpi=400, bbox_inches = 'tight')
# plot histograms of train and test to understand the differences between them
def plot_comp_hist(data1,data2,l_range=[-np.inf,np.inf],labels=['x','y'],title='histogram',bins=20,alpha=0.5):
x = data1[(data1>=l_range[0])&(data1<l_range[1])]
y = data2[(data2>=l_range[0])&(data2<l_range[1])]
plt.hist([x, y],label=labels, bins = bins, alpha=alpha)
plt.legend(loc='upper right')
plt.title(title)
#rcc_train[(rcc_train.saldo>=0.2)&(rcc_train.saldo<3)].saldo.plot.hist(title="Fraud Tranascation <3", alpha=0.5)
#rcc_train[(rcc_test.saldo>=0.2)&(rcc_test.saldo<3)].saldo.plot.hist(title="Fraud Tranascation <3", alpha=0.5)
| 51.323529
| 140
| 0.671633
| 281
| 1,745
| 4.060498
| 0.448399
| 0.039439
| 0.028922
| 0.022787
| 0.117441
| 0.08589
| 0.08589
| 0.08589
| 0.08589
| 0.08589
| 0
| 0.029472
| 0.163897
| 1,745
| 34
| 141
| 51.323529
| 0.75257
| 0.256734
| 0
| 0
| 0
| 0
| 0.109642
| 0
| 0.037037
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0.148148
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5f8c3fa603dfdb79ab13ebb13d4e8e23422a12c
| 1,134
|
py
|
Python
|
src/pretix/base/validators.py
|
td00/pretix
|
e31bd7600c85598de135f2eb5012e2f33fdb1d11
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/pretix/base/validators.py
|
td00/pretix
|
e31bd7600c85598de135f2eb5012e2f33fdb1d11
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/pretix/base/validators.py
|
td00/pretix
|
e31bd7600c85598de135f2eb5012e2f33fdb1d11
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2017-08-09T17:11:28.000Z
|
2017-08-09T17:11:28.000Z
|
from django.core.exceptions import ValidationError
from django.utils.deconstruct import deconstructible
from django.utils.translation import ugettext_lazy as _
class BlacklistValidator:
blacklist = []
def __call__(self, value):
# Validation logic
if value in self.blacklist:
raise ValidationError(
_('This slug has an invalid value: %(value)s.'),
code='invalid',
params={'value': value},
)
@deconstructible
class EventSlugBlacklistValidator(BlacklistValidator):
blacklist = [
'download',
'healthcheck',
'locale',
'control',
'redirect',
'jsi18n',
'metrics',
'_global',
'__debug__',
'api',
'events',
]
@deconstructible
class OrganizerSlugBlacklistValidator(BlacklistValidator):
blacklist = [
'download',
'healthcheck',
'locale',
'control',
'pretixdroid',
'redirect',
'jsi18n',
'metrics',
'_global',
'__debug__',
'about',
'api',
]
| 20.618182
| 64
| 0.552028
| 82
| 1,134
| 7.426829
| 0.585366
| 0.049261
| 0.049261
| 0.151067
| 0.298851
| 0.19376
| 0
| 0
| 0
| 0
| 0
| 0.00534
| 0.339506
| 1,134
| 54
| 65
| 21
| 0.807744
| 0.014109
| 0
| 0.55814
| 0
| 0
| 0.197133
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023256
| false
| 0
| 0.069767
| 0
| 0.232558
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5f8e7bbb353d3c7f7fae4eb9baaff7822b54512
| 32,192
|
py
|
Python
|
fortnitepy/ext/commands/bot.py
|
gfdb/fortnitepy
|
1cedbddee1f81c96fc60b586cd2c16398bc2d45f
|
[
"MIT"
] | 127
|
2019-07-15T15:55:30.000Z
|
2022-03-22T07:39:29.000Z
|
fortnitepy/ext/commands/bot.py
|
xMistt/fortnitepy
|
c64d72572e188a938e0b39a6d1fd1e8ee4842d31
|
[
"MIT"
] | 65
|
2019-07-15T22:48:35.000Z
|
2022-01-30T05:18:36.000Z
|
fortnitepy/ext/commands/bot.py
|
xMistt/fortnitepy
|
c64d72572e188a938e0b39a6d1fd1e8ee4842d31
|
[
"MIT"
] | 83
|
2019-07-18T12:37:58.000Z
|
2022-03-19T20:56:47.000Z
|
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import logging
import inspect
import asyncio
import types
import sys
import importlib
import collections
import traceback
from typing import Any, List, Optional, Mapping, Set
from fortnitepy.client import Client
from fortnitepy.auth import Auth
from fortnitepy.typedefs import MaybeCoro, ListOrTuple
from ._types import _BaseCommand
from .errors import (ExtensionFailed, ExtensionMissingEntryPoint,
ExtensionNotLoaded, ExtensionAlreadyLoaded,
ExtensionNotFound, CheckFailure, CommandError,
CommandNotFound)
from .core import GroupMixin
from .cog import Cog
from .view import StringView
from .context import Context
from .help import HelpCommand, FortniteHelpCommand
from .typedefs import Message
log = logging.getLogger(__name__)
def _is_submodule(parent: str, child: str) -> bool:
return parent == child or child.startswith(parent + ".")
class _DefaultRepr:
def __repr__(self) -> str:
return '<default-help-command>'
_default = _DefaultRepr()
class Bot(GroupMixin, Client):
"""Represents a fortnite bot.
This class is a subclass of :class:`fortnitepy.Client` and as a result
anything that you can do with a :class:`fortnitepy.Client` you can do with
this bot.
This class also subclasses :class:`.GroupMixin` to provide the
functionality to manage commands.
Attributes
-----------
command_prefix
The command prefix is what the message content must contain initially
to have a command invoked. This prefix could either be a string to
indicate what the prefix should be, or a callable that takes in the bot
as its first parameter and :class:`fortnitepy.FriendMessage` or
:class:`fortnitepy.PartyMessage` as its second parameter and returns
the prefix. This is to facilitate "dynamic" command prefixes. This
callable can be either a regular function or a coroutine.
An empty string as the prefix always matches, enabling prefix-less
command invocation.
The command prefix could also be an iterable of strings indicating that
multiple checks for the prefix should be used and the first one to
match will be the invocation prefix. You can get this prefix via
:attr:`.Context.prefix`. To avoid confusion empty iterables are not
allowed.
.. note::
When passing multiple prefixes be careful to not pass a prefix
that matches a longer prefix occurring later in the sequence. For
example, if the command prefix is ``('!', '!?')`` the ``'!?'``
prefix will never be matched to any message as the previous one
matches messages starting with ``!?``. This is especially important
when passing an empty string, it should always be last as no prefix
after it will be matched.
case_insensitive: :class:`bool`
Whether the commands should be case insensitive. Defaults to ``False``.
This attribute does not carry over to groups. You must set it to every
group if you require group commands to be case insensitive as well.
description: :class:`str`
The content prefixed into the default help message.
help_command: Optional[:class:`.HelpCommand`]
The help command implementation to use. This can be dynamically
set at runtime. To remove the help command pass ``None``. For more
information on implementing a help command, see
:ref:`ext_commands_help_command`.
owner_id: Optional[:class:`int`]
The user ID that owns the bot. This is used by :meth:`.is_owner()`
and checks that call this method.
owner_ids: Optional[Collection[:class:`int`]]
The user IDs that owns the bot. This is similar to `owner_id`.
For performance reasons it is recommended to use a :class:`set`
for the collection. You cannot set both `owner_id` and `owner_ids`.
This is used by :meth:`.is_owner()` and checks that call this method.
"""
def __init__(self, command_prefix: Any, auth: Auth, *,
help_command: Optional[HelpCommand] = _default,
description: Optional[str] = None,
**kwargs: Any) -> None:
kwargs['case_insensitive'] = kwargs.get('case_insensitive', False)
super().__init__(auth, **kwargs)
self.command_prefix = command_prefix
self.description = inspect.cleandoc(description) if description else ''
self.owner_id = kwargs.get('owner_id')
self.owner_ids = kwargs.get('owner_ids', set())
if self.owner_id and self.owner_ids:
raise TypeError('Both owner_id and owner_ids are set.')
if (self.owner_ids and not isinstance(self.owner_ids,
collections.abc.Collection)):
raise TypeError(
'owner_ids must be a collection not '
'{0.__class__!r}'.format(self.owner_ids)
)
self.__cogs = {}
self.__extensions = {}
self._checks = []
self._check_once = []
self._help_command = None
self._before_invoke = None
self._after_invoke = None
if help_command is _default:
self.help_command = FortniteHelpCommand()
else:
self.help_command = help_command
self.add_event_handler('friend_message', self.process_commands)
self.add_event_handler('party_message', self.process_commands)
def register_methods(self) -> None:
for _, obj in inspect.getmembers(self):
if isinstance(obj, _BaseCommand):
obj.instance = self
if obj.parent is None:
try:
self.add_command(obj)
except CommandError:
traceback.print_exc()
continue
super().register_methods()
async def close(self, *,
close_http: bool = True,
dispatch_close: bool = True) -> None:
if dispatch_close:
await asyncio.gather(
self.dispatch_and_wait_event('before_close'),
self.dispatch_and_wait_event('close'),
)
for extension in tuple(self.__extensions):
try:
self.unload_extension(extension)
except Exception:
pass
for cog in tuple(self.__cogs):
try:
self.remove_cog(cog)
except Exception:
pass
await self._close(
close_http=close_http,
dispatch_close=dispatch_close
)
def check(self, func: MaybeCoro) -> MaybeCoro:
r"""A decorator that adds a check globally to every command.
.. note::
This function can either be a regular function or a coroutine.
This function takes a single parameter, :class:`.Context`, and can
only raise exceptions inherited from :exc:`.CommandError`.
Example
-------
.. code-block:: python3
@bot.check
def global_check(ctx):
# Allows only party commands.
return ctx.party is not None
"""
self.add_check(func)
return func
def add_check(self, func: MaybeCoro, *,
call_once: bool = False) -> None:
"""Adds a global check to the bot.
This is the non-decorator interface to :meth:`.check`
and :meth:`.check_once`.
Parameters
----------
func
The function that was used as a global check.
call_once: :class:`bool`
If the function should only be called once per
:meth:`Command.invoke` call.
"""
if call_once:
self._check_once.append(func)
else:
self._checks.append(func)
def remove_check(self, func: MaybeCoro, *,
call_once: bool = False) -> None:
"""Removes a global check from the bot.
Parameters
----------
func
The function to remove from the global checks.
call_once: :class:`bool`
If the function was added with ``call_once=True`` in
the :meth:`.Bot.add_check` call or using :meth:`.check_once`.
"""
list_ = self._check_once if call_once else self._checks
try:
list_.remove(func)
except ValueError:
pass
def check_once(self, func: MaybeCoro) -> MaybeCoro:
r"""A decorator that adds a "call once" global check to the bot.
Unlike regular global checks, this one is called only once
per :meth:`Command.invoke` call.
Regular global checks are called whenever a command is called
or :meth:`.Command.can_run` is called. This type of check
bypasses that and ensures that it's called only once, even inside
the default help command.
.. note::
This function can either be a regular function or a coroutine.
This function takes a single parameter, :class:`.Context`, and can
only raise exceptions inherited from :exc:`.CommandError`.
Example
-------
.. code-block:: python3
@bot.check_once
def whitelist(ctx):
return ctx.message.author.id in my_whitelist
"""
self.add_check(func, call_once=True)
return func
async def can_run(self, ctx: Context, *,
call_once: bool = False) -> bool:
data = self._check_once if call_once else self._checks
if len(data) == 0:
return True
for func in data:
if asyncio.iscoroutinefunction(func):
res = await func(ctx)
else:
res = func(ctx)
if not res:
return False
return True
async def is_owner(self, user_id: str) -> bool:
"""|coro|
Checks if a user id is the owner of the bot.
Parameters
----------
user_id: :class:`str`
The user id to check for.
Returns
-------
:class:`bool`
Whether the user is the owner.
"""
if self.owner_id:
return user_id == self.owner_id
else:
return user_id in self.owner_ids
def before_invoke(self, coro: MaybeCoro) -> MaybeCoro:
"""A decorator that registers a coroutine as a pre-invoke hook.
A pre-invoke hook is called directly before the command is
called. This makes it a useful function to set up database
connections or any type of set up required.
This pre-invoke hook takes a sole parameter, a :class:`.Context`.
.. note::
The :meth:`~.Bot.before_invoke` and :meth:`~.Bot.after_invoke`
hooks are only called if all checks and argument parsing
procedures pass without error. If any check or argument parsing
procedures fail then the hooks are not called.
Parameters
----------
coro
The coroutine to register as the pre-invoke hook.
Raises
------
TypeError
The coroutine passed is not actually a coroutine.
"""
if not asyncio.iscoroutinefunction(coro):
raise TypeError('The pre-invoke hook must be a coroutine.')
self._before_invoke = coro
return coro
def after_invoke(self, coro: MaybeCoro) -> MaybeCoro:
r"""A decorator that registers a coroutine as a post-invoke hook.
A post-invoke hook is called directly after the command is
called. This makes it a useful function to clean-up database
connections or any type of clean up required.
This post-invoke hook takes a sole parameter, a :class:`.Context`.
.. note::
Similar to :meth:`~.Bot.before_invoke`\, this is not called unless
checks and argument parsing procedures succeed. This hook is,
however, **always** called regardless of the internal command
callback raising an error (i.e. :exc:`.CommandInvokeError`\).
This makes it ideal for clean-up scenarios.
Parameters
----------
coro:
The coroutine to register as the post-invoke hook.
Raises
------
TypeError
The coroutine passed is not actually a coroutine.
"""
if not asyncio.iscoroutinefunction(coro):
raise TypeError('The post-invoke hook must be a coroutine.')
self._after_invoke = coro
return coro
def add_cog(self, cog: Cog) -> None:
"""Adds a "cog" to the bot.
A cog is a class that has its own event listeners and commands.
Parameters
----------
cog: :class:`.Cog`
The cog to register to the bot.
Raises
------
TypeError
The cog does not inherit from :class:`.Cog`.
CommandError
An error happened during loading.
"""
if not isinstance(cog, Cog):
raise TypeError('Cogs must derive from Cog.')
cog = cog._inject(self)
self.__cogs[cog.__cog_name__] = cog
def remove_cog(self, name: str) -> None:
"""Removes a cog from the bot.
All registered commands and event listeners that the
cog has registered will be removed as well.
If no cog is found then this method has no effect.
Parameters
----------
name: :class:`str`
The name of the cog to remove.
"""
cog = self.__cogs.pop(name, None)
if cog is None:
return
help_command = self.help_command
if help_command and help_command.cog is cog:
help_command.cog = None
cog._eject(self)
def get_cog(self, name: str) -> Optional[Cog]:
"""Gets the cog instance requested.
If the cog is not found, ``None`` is returned instead.
Parameters
-----------
name: :class:`str`
The name of the cog you are requesting.
This is equivalent to the name passed via keyword
argument in class creation or the class name if unspecified.
"""
return self.__cogs.get(name)
@property
def cogs(self) -> Mapping[str, Cog]:
"""Mapping[:class:`str`, :class:`Cog`]: A read-only mapping of cog
name to cog.
"""
return types.MappingProxyType(self.__cogs)
def _remove_module_references(self, name: str) -> None:
# find all references to the module
# remove the cogs registered from the module
for cogname, cog in self.__cogs.copy().items():
if _is_submodule(name, cog.__module__):
self.remove_cog(cogname)
# remove all the commands from the module
for cmd in self.all_commands.copy().values():
if cmd.module is not None and _is_submodule(name, cmd.module):
if isinstance(cmd, GroupMixin):
cmd.recursively_remove_all_commands()
self.remove_command(cmd.name)
# remove all the listeners from the module
for event_list in self._events.copy().values():
remove = []
for index, event in enumerate(event_list):
if (event.__module__ is not None
and _is_submodule(name, event.__module__)):
remove.append(index)
for index in reversed(remove):
del event_list[index]
def _call_module_finalizers(self, lib: object, key: str) -> None:
try:
func = getattr(lib, 'cog_teardown')
except AttributeError:
pass
else:
try:
func(self)
except Exception:
pass
finally:
self.__extensions.pop(key, None)
sys.modules.pop(key, None)
name = lib.__name__
for module in list(sys.modules.keys()):
if _is_submodule(name, module):
del sys.modules[module]
def _load_from_module_spec(self, spec: types.ModuleType,
key: str) -> None:
# precondition: key not in self.__extensions
lib = importlib.util.module_from_spec(spec)
sys.modules[key] = lib
try:
spec.loader.exec_module(lib)
except Exception as e:
del sys.modules[key]
raise ExtensionFailed(key, e) from e
try:
setup = getattr(lib, 'extension_setup')
except AttributeError:
del sys.modules[key]
raise ExtensionMissingEntryPoint(key)
try:
setup(self)
except Exception as e:
del sys.modules[key]
self._remove_module_references(lib.__name__)
self._call_module_finalizers(lib, key)
raise ExtensionFailed(key, e) from e
else:
self.__extensions[key] = lib
def load_extension(self, name: str) -> None:
"""Loads an extension.
An extension is a python module that contains commands, cogs, or
listeners.
An extension must have a global function, ``extension_setup`` defined
as the entry point on what to do when the extension is loaded. This
entry point must have a single argument, the ``bot``.
Parameters
----------
name: :class:`str`
The extension name to load. It must be dot separated like
regular Python imports if accessing a sub-module. e.g.
``foo.test`` if you want to import ``foo/test.py``.
Raises
------
ExtensionNotFound
The extension could not be imported.
ExtensionAlreadyLoaded
The extension is already loaded.
ExtensionMissingEntryPoint
The extension does not have a extension_setup function.
ExtensionFailed
The extension or its setup function had an execution error.
"""
if name in self.__extensions:
raise ExtensionAlreadyLoaded(name)
spec = importlib.util.find_spec(name)
if spec is None:
raise ExtensionNotFound(name)
self._load_from_module_spec(spec, name)
def unload_extension(self, name: str) -> None:
"""Unloads an extension.
When the extension is unloaded, all commands, listeners, and cogs are
removed from the bot and the module is un-imported.
The extension can provide an optional global function,
``cog_teardown``, to do miscellaneous clean-up if necessary. This
function takes a single parameter, the ``bot``, similar to
``extension_setup`` from :meth:`~.Bot.load_extension`.
Parameters
------------
name: :class:`str`
The extension name to unload. It must be dot separated like
regular Python imports if accessing a sub-module. e.g.
``foo.test`` if you want to import ``foo/test.py``.
Raises
-------
ExtensionNotLoaded
The extension was not loaded.
"""
lib = self.__extensions.get(name)
if lib is None:
raise ExtensionNotLoaded(name)
self._remove_module_references(lib.__name__)
self._call_module_finalizers(lib, name)
def reload_extension(self, name: str) -> None:
"""Atomically reloads an extension.
This replaces the extension with the same extension, only refreshed.
This is equivalent to a :meth:`unload_extension` followed by
a :meth:`load_extension` except done in an atomic way. That is, if an
operation fails mid-reload then the bot will roll-back to the prior
working state.
Parameters
------------
name: :class:`str`
The extension name to reload. It must be dot separated like
regular Python imports if accessing a sub-module. e.g.
``foo.test`` if you want to import ``foo/test.py``.
Raises
-------
ExtensionNotLoaded
The extension was not loaded.
ExtensionNotFound
The extension could not be imported.
ExtensionMissingEntryPoint
The extension does not have a extension_setup function.
ExtensionFailed
The extension setup function had an execution error.
"""
lib = self.__extensions.get(name)
if lib is None:
raise ExtensionNotLoaded(name)
# get the previous module states from sys modules
modules = {
name: module
for name, module in sys.modules.items()
if _is_submodule(lib.__name__, name)
}
try:
# Unload and then load the module...
self._remove_module_references(lib.__name__)
self._call_module_finalizers(lib, name)
self.load_extension(name)
except Exception:
# if the load failed, the remnants should have been
# cleaned from the load_extension function call
# so let's load it from our old compiled library.
lib.extension_setup(self)
self.__extensions[name] = lib
# revert sys.modules back to normal and raise back to caller
sys.modules.update(modules)
raise
@property
def extensions(self) -> Mapping[str, types.ModuleType]:
"""Mapping[:class:`str`, :class:`py:types.ModuleType`]: A read-only
mapping of extension name to extension.
"""
return types.MappingProxyType(self.__extensions)
@property
def help_command(self) -> Optional[HelpCommand]:
return self._help_command
@help_command.setter
def help_command(self, value: Optional[HelpCommand]) -> None:
if value is not None:
if not isinstance(value, HelpCommand):
raise TypeError('help_command must be a subclass '
'of HelpCommand')
if self._help_command is not None:
self._help_command._remove_from_bot(self)
self._help_command = value
value._add_to_bot(self)
elif self._help_command is not None:
self._help_command._remove_from_bot(self)
self._help_command = None
else:
self._help_command = None
async def get_prefix(self, message: Message) -> Any:
"""|coro|
Retrieves the prefix the bot is listening to with the message as
a context.
Parameters
----------
message: Union[:class:`fortnitepy.FriendMessage`, :class:`fortnitepy.PartyMessage`]
The message context to get the prefix of.
Returns
--------
Union[List[:class:`str`], :class:`str`]
A list of prefixes or a single prefix that the bot is
listening for.
""" # noqa
prefix = ret = self.command_prefix
if callable(prefix):
if asyncio.iscoroutinefunction(prefix):
ret = await prefix(self, message)
else:
ret = prefix(self, message)
if not isinstance(ret, str):
try:
ret = list(ret)
except TypeError:
# It's possible that a generator raised this exception. Don't
# replace it with our own error if that's the case.
if isinstance(ret, collections.abc.Iterable):
raise
raise TypeError('command_prefix must be plain string, '
'iterable of strings, or callable '
'returning either of these, not '
'{}'.format(ret.__class__.__name__))
if not ret:
raise ValueError('Iterable command_prefix must contain at '
'least one prefix')
return ret
async def get_context(self, message: Message, *,
cls: Context = Context) -> Context:
r"""|coro|
Returns the invocation context from the message.
This is a more low-level counter-part for :meth:`.process_commands`
to allow users more fine grained control over the processing.
The returned context is not guaranteed to be a valid invocation
context, :attr:`.Context.valid` must be checked to make sure it is.
If the context is not valid then it is not a valid candidate to be
invoked under :meth:`~.Bot.invoke`.
Parameters
----------
message: Union[:class:`fortnitepy.FriendMessage`, :class:`fortnitepy.PartyMessage`]
The message to get the invocation context from.
cls
The factory class that will be used to create the context.
By default, this is :class:`.Context`. Should a custom
class be provided, it must be similar enough to :class:`.Context`\'s
interface.
Returns
-------
:class:`.Context`
The invocation context. The type of this can change via the
``cls`` parameter.
""" # noqa
view = StringView(message.content)
ctx = cls(prefix=None, view=view, bot=self, message=message)
prefix = await self.get_prefix(message)
invoked_prefix = prefix
if isinstance(prefix, str):
if not view.skip_string(prefix):
return ctx
else:
try:
if message.content.startswith(tuple(prefix)):
for element in prefix:
if view.skip_string(element):
invoked_prefix = element
break
else:
invoked_prefix = None
else:
return ctx
except TypeError:
if not isinstance(prefix, list):
raise TypeError('get_prefix must return either a string '
'or a list of string, not '
'{}'.format(prefix.__class__.__name__))
for value in prefix:
if not isinstance(value, str):
raise TypeError('Iterable command_prefix or list '
'returned from get_prefix must '
'contain only strings, not '
'{}'.format(value.__class__.__name__))
raise
invoker = view.get_word()
ctx.invoked_with = invoker
ctx.prefix = invoked_prefix
ctx.command = self.all_commands.get(invoker)
return ctx
def _print_error(self, ctx: Context, error: Exception) -> None:
print(
'Ignoring exception in command {}:'.format(ctx.command),
file=sys.stderr
)
traceback.print_exception(
type(error),
error,
error.__traceback__,
file=sys.stderr
)
async def wait_for_futures(self, futures: ListOrTuple, *,
check: Optional[callable] = None,
timeout: Optional[int] = None,
cancel: bool = False) -> None:
def _cancel_futs(pending_futures: Set[asyncio.Future]) -> None:
for p in pending_futures:
if not p.cancelled():
p.cancel()
pending = futures
while pending:
done, pending = await asyncio.wait(
pending,
return_when=asyncio.FIRST_COMPLETED,
timeout=timeout
)
# Set should only contain one value
for future in done:
if check is None or check(future):
if cancel:
_cancel_futs(pending)
return future
async def _wait_for_error_return(self, futures: List[asyncio.Future],
ctx: Context,
error: Exception) -> None:
def check(future):
return future.result() is False
ret = await self.wait_for_futures(futures, check=check)
if isinstance(ret, asyncio.Future):
self._print_error(ctx, error)
def dispatch_error(self, ctx: Context, error: Exception) -> None:
if self._event_has_handler('command_error'):
futures = self.dispatch_event('command_error', ctx, error)
asyncio.ensure_future(self._wait_for_error_return(
futures,
ctx,
error
))
else:
self._print_error(ctx, error)
async def invoke(self, ctx: Context) -> None:
"""|coro|
Invokes the command given under the invocation context and
handles all the internal event dispatch mechanisms.
Parameters
-----------
ctx: :class:`.Context`
The invocation context to invoke.
"""
if ctx.command is not None:
self.dispatch_event('command', ctx)
try:
if await self.can_run(ctx, call_once=True):
await ctx.command.invoke(ctx)
else:
raise CheckFailure('The global check once functions '
'failed.')
except CommandError as exc:
await ctx.command.dispatch_error(ctx, exc)
else:
self.dispatch_event('command_completion', ctx)
elif ctx.invoked_with:
exc = CommandNotFound('Command "{}" is not found'
''.format(ctx.invoked_with))
self.dispatch_error(ctx, exc)
async def process_commands(self, message: Message) -> None:
"""|coro|
This function processes the commands that have been registered
to the bot and other groups. Without this coroutine, none of the
commands will be triggered.
By default, this coroutine is called automatically when a new
message is received.
This is built using other low level tools, and is equivalent to a
call to :meth:`~.Bot.get_context` followed by a call to
:meth:`~.Bot.invoke`.
Parameters
-----------
message: Union[:class:`fortnitepy.FriendMessage`, :class:`fortnitepy.PartyMessage`]
The message to process commands for.
""" # noqa
if message.author.id == self.user.id:
return
ctx = await self.get_context(message)
await self.invoke(ctx)
| 35.02938
| 91
| 0.584493
| 3,747
| 32,192
| 4.906592
| 0.160128
| 0.017949
| 0.009791
| 0.004079
| 0.235409
| 0.199728
| 0.184607
| 0.162143
| 0.139951
| 0.127223
| 0
| 0.000375
| 0.337351
| 32,192
| 918
| 92
| 35.067538
| 0.861476
| 0.343191
| 0
| 0.218905
| 0
| 0
| 0.050784
| 0.001322
| 0
| 0
| 0
| 0
| 0
| 1
| 0.067164
| false
| 0.012438
| 0.054726
| 0.00995
| 0.18408
| 0.014925
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5fa6f305d9e54a79de33a61c7eebe1b7c16b303
| 657
|
py
|
Python
|
LeetCodeSolutions/python/64_Minimum_Path_Sum.py
|
ChuanleiGuo/AlgorithmsPlayground
|
90b6287b742c8bfd3797540c408d679be2821a40
|
[
"MIT"
] | 1
|
2017-03-27T13:38:37.000Z
|
2017-03-27T13:38:37.000Z
|
LeetCodeSolutions/python/64_Minimum_Path_Sum.py
|
ChuanleiGuo/AlgorithmsPlayground
|
90b6287b742c8bfd3797540c408d679be2821a40
|
[
"MIT"
] | null | null | null |
LeetCodeSolutions/python/64_Minimum_Path_Sum.py
|
ChuanleiGuo/AlgorithmsPlayground
|
90b6287b742c8bfd3797540c408d679be2821a40
|
[
"MIT"
] | null | null | null |
class Solution(object):
def minPathSum(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
m, n = len(grid), len(grid[0])
dp = [[0] * n for _ in range(m)]
for i in range(m):
for j in range(n):
if i == 0 and j == 0:
dp[i][j] = grid[0][0]
elif i == 0:
dp[i][j] = grid[i][j] + dp[i][j - 1]
elif j == 0:
dp[i][j] = grid[i][j] + dp[i - 1][j]
else:
dp[i][j] = grid[i][j] + min(dp[i - 1][j], dp[i][j - 1])
return dp[m - 1][n - 1]
| 31.285714
| 75
| 0.340944
| 98
| 657
| 2.27551
| 0.285714
| 0.080717
| 0.107623
| 0.143498
| 0.255605
| 0.219731
| 0.125561
| 0.125561
| 0.125561
| 0
| 0
| 0.04023
| 0.47032
| 657
| 20
| 76
| 32.85
| 0.600575
| 0.059361
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5fad574122cf8647545ad83e7dc43147679cc22
| 1,129
|
py
|
Python
|
paths_win.py
|
tankbusta/rescache
|
86ca7f3fb66e28a8761f0995a300f57a73a9561d
|
[
"MIT"
] | 15
|
2015-03-05T17:03:08.000Z
|
2022-01-28T07:49:38.000Z
|
paths_win.py
|
tankbusta/rescache
|
86ca7f3fb66e28a8761f0995a300f57a73a9561d
|
[
"MIT"
] | null | null | null |
paths_win.py
|
tankbusta/rescache
|
86ca7f3fb66e28a8761f0995a300f57a73a9561d
|
[
"MIT"
] | 9
|
2015-03-06T09:56:30.000Z
|
2017-11-07T00:24:17.000Z
|
import _winreg
import os
def get_shared_cache_folder():
"""
Look in the registry for the configured cache folder.
If there is no entry, then we create one.
:return:
"""
_winreg.aReg = _winreg.ConnectRegistry(None, _winreg.HKEY_CURRENT_USER)
try:
key = _winreg.OpenKey(_winreg.aReg, r"SOFTWARE\CCP\EVEONLINE")
path, _ = _winreg.QueryValueEx(key, "CACHEFOLDER")
except OSError:
return None
return path
def set_shared_cache_folder(folder_path):
if not os.path.isdir(folder_path):
try:
os.makedirs(folder_path)
except OSError:
raise ValueError("Could not create directory {}".format(folder_path))
folder_path = os.path.normpath(folder_path) + os.sep
key_eveonline = _winreg.CreateKey(_winreg.aReg, r"SOFTWARE\CCP\EVEONLINE")
_winreg.SetValueEx(key_eveonline, "CACHEFOLDER", 0, _winreg.REG_SZ, folder_path)
key_eveprobe = _winreg.CreateKey(_winreg.aReg, r"SOFTWARE\CCP\EVEPROBE")
_winreg.SetValueEx(key_eveprobe, "CACHEFOLDER", 0, _winreg.REG_SZ, folder_path)
def get_index_path(hint):
return hint
| 30.513514
| 84
| 0.70062
| 146
| 1,129
| 5.143836
| 0.417808
| 0.106525
| 0.043941
| 0.075899
| 0.23968
| 0.23968
| 0.186418
| 0
| 0
| 0
| 0
| 0.002212
| 0.199291
| 1,129
| 36
| 85
| 31.361111
| 0.82854
| 0.092117
| 0
| 0.173913
| 0
| 0
| 0.126873
| 0.064935
| 0
| 0
| 0
| 0
| 0
| 1
| 0.130435
| false
| 0
| 0.086957
| 0.043478
| 0.347826
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5fd3faa9866127caab32ba61fdd34ab4ec39ea3
| 36,968
|
py
|
Python
|
pyclicker/lib/python3.7/site-packages/Xlib/display.py
|
JayRovacsek/pyautoclick
|
e136a58c129332933eb8455dd7c8e16222d54fb2
|
[
"MIT"
] | 1
|
2022-01-25T22:52:58.000Z
|
2022-01-25T22:52:58.000Z
|
Xlib/display.py
|
EnjoyLifeFund/Debian_py36_packages
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
Xlib/display.py
|
EnjoyLifeFund/Debian_py36_packages
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
# Xlib.display -- high level display object
#
# Copyright (C) 2000 Peter Liljenberg <petli@ctrl-c.liu.se>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place,
# Suite 330,
# Boston, MA 02111-1307 USA
# Python modules
import types
# Python 2/3 compatibility.
from six import create_unbound_method
# Xlib modules
from . import error
from . import ext
from . import X
# Xlib.protocol modules
from .protocol import display as protocol_display
from .protocol import request, event, rq
# Xlib.xobjects modules
from .xobject import resource
from .xobject import drawable
from .xobject import fontable
from .xobject import colormap
from .xobject import cursor
_resource_baseclasses = {
'resource': resource.Resource,
'drawable': drawable.Drawable,
'window': drawable.Window,
'pixmap': drawable.Pixmap,
'fontable': fontable.Fontable,
'font': fontable.Font,
'gc': fontable.GC,
'colormap': colormap.Colormap,
'cursor': cursor.Cursor,
}
_resource_hierarchy = {
'resource': ('drawable', 'window', 'pixmap',
'fontable', 'font', 'gc',
'colormap', 'cursor'),
'drawable': ('window', 'pixmap'),
'fontable': ('font', 'gc')
}
class _BaseDisplay(protocol_display.Display):
resource_classes = _resource_baseclasses.copy()
# Implement a cache of atom names, used by Window objects when
# dealing with some ICCCM properties not defined in Xlib.Xatom
def __init__(self, *args, **keys):
protocol_display.Display.__init__(self, *args, **keys)
self._atom_cache = {}
def get_atom(self, atomname, only_if_exists=0):
if atomname in self._atom_cache:
return self._atom_cache[atomname]
r = request.InternAtom(display = self, name = atomname, only_if_exists = only_if_exists)
# don't cache NONE responses in case someone creates this later
if r.atom != X.NONE:
self._atom_cache[atomname] = r.atom
return r.atom
class Display(object):
def __init__(self, display = None):
self.display = _BaseDisplay(display)
# Create the keymap cache
self._keymap_codes = [()] * 256
self._keymap_syms = {}
self._update_keymap(self.display.info.min_keycode,
(self.display.info.max_keycode
- self.display.info.min_keycode + 1))
# Translations for keysyms to strings.
self.keysym_translations = {}
# Find all supported extensions
self.extensions = []
self.class_extension_dicts = {}
self.display_extension_methods = {}
# a dict that maps the event name to the code
# or, when it's an event with a subcode, to a tuple of (event,subcode)
# note this wraps the dict so you address it as
# extension_event.EXTENSION_EVENT_NAME rather than
# extension_event["EXTENSION_EVENT_NAME"]
self.extension_event = rq.DictWrapper({})
exts = self.list_extensions()
# Go through all extension modules
for extname, modname in ext.__extensions__:
if extname in exts:
# Import the module and fetch it
__import__('Xlib.ext.' + modname)
mod = getattr(ext, modname)
info = self.query_extension(extname)
self.display.set_extension_major(extname, info.major_opcode)
# Call initialiasation function
mod.init(self, info)
self.extensions.append(extname)
# Finalize extensions by creating new classes
for class_name, dictionary in self.class_extension_dicts.items():
origcls = self.display.resource_classes[class_name]
self.display.resource_classes[class_name] = type(origcls.__name__,
(origcls,),
dictionary)
# Problem: we have already created some objects without the
# extensions: the screen roots and default colormaps.
# Fix that by reinstantiating them.
for screen in self.display.info.roots:
screen.root = self.display.resource_classes['window'](self.display, screen.root.id)
screen.default_colormap = self.display.resource_classes['colormap'](self.display, screen.default_colormap.id)
def get_display_name(self):
"""Returns the name used to connect to the server, either
provided when creating the Display object, or fetched from the
environmental variable $DISPLAY."""
return self.display.get_display_name()
def fileno(self):
"""Returns the file descriptor number of the underlying socket.
This method is provided to allow Display objects to be passed
select.select()."""
return self.display.fileno()
def close(self):
"""Close the display, freeing the resources that it holds."""
self.display.close()
def set_error_handler(self, handler):
"""Set the default error handler which will be called for all
unhandled errors. handler should take two arguments as a normal
request error handler, but the second argument (the request) will
be None. See section Error Handling."""
self.display.set_error_handler(handler)
def flush(self):
"""Flush the request queue, building and sending the queued
requests. This can be necessary in applications that never wait
for events, and in threaded applications."""
self.display.flush()
def sync(self):
"""Flush the queue and wait until the server has processed all
the queued requests. Use this e.g. when it is important that
errors caused by a certain request is trapped."""
# Do a light-weight replyrequest to sync. There must
# be a better way to do it...
self.get_pointer_control()
def next_event(self):
"""Return the next event. If there are no events queued, it will
block until the next event is fetched from the server."""
return self.display.next_event()
def pending_events(self):
"""Return the number of events queued, i.e. the number of times
that Display.next_event() can be called without blocking."""
return self.display.pending_events()
def has_extension(self, extension):
"""Check if both the server and the client library support the X
extension named extension."""
return extension in self.extensions
def create_resource_object(self, type, id):
"""Create a resource object of type for the integer id. type
should be one of the following strings:
resource
drawable
window
pixmap
fontable
font
gc
colormap
cursor
This function can be used when a resource ID has been fetched
e.g. from an resource or a command line argument. Resource
objects should never be created by instantiating the appropriate
class directly, since any X extensions dynamically added by the
library will not be available.
"""
return self.display.resource_classes[type](self.display, id)
# We need this to handle display extension methods
def __getattr__(self, attr):
try:
function = self.display_extension_methods[attr]
return types.MethodType(function, self)
except KeyError:
raise AttributeError(attr)
###
### display information retrieval
###
def screen(self, sno = None):
if sno is None:
return self.display.info.roots[self.display.default_screen]
else:
return self.display.info.roots[sno]
def screen_count(self):
"""Return the total number of screens on the display."""
return len(self.display.info.roots)
def get_default_screen(self):
"""Return the number of the default screen, extracted from the
display name."""
return self.display.get_default_screen()
###
### Extension module interface
###
def extension_add_method(self, object, name, function):
"""extension_add_method(object, name, function)
Add an X extension module method. OBJECT is the type of
object to add the function to, a string from this list:
display
resource
drawable
window
pixmap
fontable
font
gc
colormap
cursor
NAME is the name of the method, a string. FUNCTION is a
normal function whose first argument is a 'self'.
"""
if object == 'display':
if hasattr(self, name):
raise AssertionError('attempting to replace display method: %s' % name)
self.display_extension_methods[name] = function
else:
class_list = (object, ) + _resource_hierarchy.get(object, ())
for class_name in class_list:
cls = _resource_baseclasses[class_name]
if hasattr(cls, name):
raise AssertionError('attempting to replace %s method: %s' % (class_name, name))
method = create_unbound_method(function, cls)
# Maybe should check extension overrides too
try:
self.class_extension_dicts[class_name][name] = method
except KeyError:
self.class_extension_dicts[class_name] = { name: method }
def extension_add_event(self, code, evt, name = None):
"""extension_add_event(code, evt, [name])
Add an extension event. CODE is the numeric code, and EVT is
the event class. EVT will be cloned, and the attribute _code
of the new event class will be set to CODE.
If NAME is omitted, it will be set to the name of EVT. This
name is used to insert an entry in the DictWrapper
extension_event.
"""
newevt = type(evt.__name__, evt.__bases__,
evt.__dict__.copy())
newevt._code = code
self.display.add_extension_event(code, newevt)
if name is None:
name = evt.__name__
setattr(self.extension_event, name, code)
def extension_add_subevent(self, code, subcode, evt, name = None):
"""extension_add_subevent(code, evt, [name])
Add an extension subevent. CODE is the numeric code, subcode
is the sub-ID of this event that shares the code ID with other
sub-events and EVT is the event class. EVT will be cloned, and
the attribute _code of the new event class will be set to CODE.
If NAME is omitted, it will be set to the name of EVT. This
name is used to insert an entry in the DictWrapper
extension_event.
"""
newevt = type(evt.__name__, evt.__bases__,
evt.__dict__.copy())
newevt._code = code
self.display.add_extension_event(code, newevt, subcode)
if name is None:
name = evt.__name__
# store subcodes as a tuple of (event code, subcode) in the
# extension dict maintained in the display object
setattr(self.extension_event, name, (code,subcode))
def add_extension_error(self, code, err):
"""add_extension_error(code, err)
Add an extension error. CODE is the numeric code, and ERR is
the error class.
"""
self.display.add_extension_error(code, err)
###
### keymap cache implementation
###
# The keycode->keysym map is stored in a list with 256 elements.
# Each element represents a keycode, and the tuple elements are
# the keysyms bound to the key.
# The keysym->keycode map is stored in a mapping, where the keys
# are keysyms. The values are a sorted list of tuples with two
# elements each: (index, keycode)
# keycode is the code for a key to which this keysym is bound, and
# index is the keysyms index in the map for that keycode.
def keycode_to_keysym(self, keycode, index):
"""Convert a keycode to a keysym, looking in entry index.
Normally index 0 is unshifted, 1 is shifted, 2 is alt grid, and 3
is shift+alt grid. If that key entry is not bound, X.NoSymbol is
returned."""
try:
return self._keymap_codes[keycode][index]
except IndexError:
return X.NoSymbol
def keysym_to_keycode(self, keysym):
"""Look up the primary keycode that is bound to keysym. If
several keycodes are found, the one with the lowest index and
lowest code is returned. If keysym is not bound to any key, 0 is
returned."""
try:
return self._keymap_syms[keysym][0][1]
except (KeyError, IndexError):
return 0
def keysym_to_keycodes(self, keysym):
"""Look up all the keycodes that is bound to keysym. A list of
tuples (keycode, index) is returned, sorted primarily on the
lowest index and secondarily on the lowest keycode."""
try:
# Copy the map list, reversing the arguments
return map(lambda x: (x[1], x[0]), self._keymap_syms[keysym])
except KeyError:
return []
def refresh_keyboard_mapping(self, evt):
"""This method should be called once when a MappingNotify event
is received, to update the keymap cache. evt should be the event
object."""
if isinstance(evt, event.MappingNotify):
if evt.request == X.MappingKeyboard:
self._update_keymap(evt.first_keycode, evt.count)
else:
raise TypeError('expected a MappingNotify event')
def _update_keymap(self, first_keycode, count):
"""Internal function, called to refresh the keymap cache.
"""
# Delete all sym->code maps for the changed codes
lastcode = first_keycode + count
for keysym, codes in self._keymap_syms.items():
i = 0
while i < len(codes):
code = codes[i][1]
if code >= first_keycode and code < lastcode:
del codes[i]
else:
i = i + 1
# Get the new keyboard mapping
keysyms = self.get_keyboard_mapping(first_keycode, count)
# Replace code->sym map with the new map
self._keymap_codes[first_keycode:lastcode] = keysyms
# Update sym->code map
code = first_keycode
for syms in keysyms:
index = 0
for sym in syms:
if sym != X.NoSymbol:
if sym in self._keymap_syms:
symcodes = self._keymap_syms[sym]
symcodes.append((index, code))
symcodes.sort()
else:
self._keymap_syms[sym] = [(index, code)]
index = index + 1
code = code + 1
###
### client-internal keysym to string translations
###
def lookup_string(self, keysym):
"""Return a string corresponding to KEYSYM, or None if no
reasonable translation is found.
"""
s = self.keysym_translations.get(keysym)
if s is not None:
return s
import Xlib.XK
return Xlib.XK.keysym_to_string(keysym)
def rebind_string(self, keysym, newstring):
"""Change the translation of KEYSYM to NEWSTRING.
If NEWSTRING is None, remove old translation if any.
"""
if newstring is None:
try:
del self.keysym_translations[keysym]
except KeyError:
pass
else:
self.keysym_translations[keysym] = newstring
###
### X requests
###
def intern_atom(self, name, only_if_exists = 0):
"""Intern the string name, returning its atom number. If
only_if_exists is true and the atom does not already exist, it
will not be created and X.NONE is returned."""
r = request.InternAtom(display = self.display,
name = name,
only_if_exists = only_if_exists)
return r.atom
def get_atom(self, atom, only_if_exists = 0):
"""Alias for intern_atom, using internal cache"""
return self.display.get_atom(atom, only_if_exists)
def get_atom_name(self, atom):
"""Look up the name of atom, returning it as a string. Will raise
BadAtom if atom does not exist."""
r = request.GetAtomName(display = self.display,
atom = atom)
return r.name
def get_selection_owner(self, selection):
"""Return the window that owns selection (an atom), or X.NONE if
there is no owner for the selection. Can raise BadAtom."""
r = request.GetSelectionOwner(display = self.display,
selection = selection)
return r.owner
def send_event(self, destination, event, event_mask = 0, propagate = 0,
onerror = None):
"""Send a synthetic event to the window destination which can be
a window object, or X.PointerWindow or X.InputFocus. event is the
event object to send, instantiated from one of the classes in
protocol.events. See XSendEvent(3X11) for details.
There is also a Window.send_event() method."""
request.SendEvent(display = self.display,
onerror = onerror,
propagate = propagate,
destination = destination,
event_mask = event_mask,
event = event)
def ungrab_pointer(self, time, onerror = None):
"""elease a grabbed pointer and any queued events. See
XUngrabPointer(3X11)."""
request.UngrabPointer(display = self.display,
onerror = onerror,
time = time)
def change_active_pointer_grab(self, event_mask, cursor, time, onerror = None):
"""Change the dynamic parameters of a pointer grab. See
XChangeActivePointerGrab(3X11)."""
request.ChangeActivePointerGrab(display = self.display,
onerror = onerror,
cursor = cursor,
time = time,
event_mask = event_mask)
def ungrab_keyboard(self, time, onerror = None):
"""Ungrab a grabbed keyboard and any queued events. See
XUngrabKeyboard(3X11)."""
request.UngrabKeyboard(display = self.display,
onerror = onerror,
time = time)
def allow_events(self, mode, time, onerror = None):
"""Release some queued events. mode should be one of
X.AsyncPointer, X.SyncPointer, X.AsyncKeyboard, X.SyncKeyboard,
X.ReplayPointer, X.ReplayKeyboard, X.AsyncBoth, or X.SyncBoth.
time should be a timestamp or X.CurrentTime."""
request.AllowEvents(display = self.display,
onerror = onerror,
mode = mode,
time = time)
def grab_server(self, onerror = None):
"""Disable processing of requests on all other client connections
until the server is ungrabbed. Server grabbing should be avoided
as much as possible."""
request.GrabServer(display = self.display,
onerror = onerror)
def ungrab_server(self, onerror = None):
"""Release the server if it was previously grabbed by this client."""
request.UngrabServer(display = self.display,
onerror = onerror)
def warp_pointer(self, x, y, src_window = X.NONE, src_x = 0, src_y = 0,
src_width = 0, src_height = 0, onerror = None):
"""Move the pointer relative its current position by the offsets
(x, y). However, if src_window is a window the pointer is only
moved if the specified rectangle in src_window contains it. If
src_width is 0 it will be replaced with the width of src_window -
src_x. src_height is treated in a similar way.
To move the pointer to absolute coordinates, use Window.warp_pointer()."""
request.WarpPointer(display = self.display,
onerror = onerror,
src_window = src_window,
dst_window = X.NONE,
src_x = src_x,
src_y = src_y,
src_width = src_width,
src_height = src_height,
dst_x = x,
dst_y = y)
def set_input_focus(self, focus, revert_to, time, onerror = None):
"""Set input focus to focus, which should be a window,
X.PointerRoot or X.NONE. revert_to specifies where the focus
reverts to if the focused window becomes not visible, and should
be X.RevertToParent, RevertToPointerRoot, or RevertToNone. See
XSetInputFocus(3X11) for details.
There is also a Window.set_input_focus()."""
request.SetInputFocus(display = self.display,
onerror = onerror,
revert_to = revert_to,
focus = focus,
time = time)
def get_input_focus(self):
"""Return an object with the following attributes:
focus
The window which currently holds the input
focus, X.NONE or X.PointerRoot.
revert_to
Where the focus will revert, one of X.RevertToParent,
RevertToPointerRoot, or RevertToNone. """
return request.GetInputFocus(display = self.display)
def query_keymap(self):
"""Return a bit vector for the logical state of the keyboard,
where each bit set to 1 indicates that the corresponding key is
currently pressed down. The vector is represented as a list of 32
integers. List item N contains the bits for keys 8N to 8N + 7
with the least significant bit in the byte representing key 8N."""
r = request.QueryKeymap(display = self.display)
return r.map
def open_font(self, name):
"""Open the font identifed by the pattern name and return its
font object. If name does not match any font, None is returned."""
fid = self.display.allocate_resource_id()
ec = error.CatchError(error.BadName)
request.OpenFont(display = self.display,
onerror = ec,
fid = fid,
name = name)
self.sync()
if ec.get_error():
self.display.free_resource_id(fid)
return None
else:
cls = self.display.get_resource_class('font', fontable.Font)
return cls(self.display, fid, owner = 1)
def list_fonts(self, pattern, max_names):
"""Return a list of font names matching pattern. No more than
max_names will be returned."""
r = request.ListFonts(display = self.display,
max_names = max_names,
pattern = pattern)
return r.fonts
def list_fonts_with_info(self, pattern, max_names):
"""Return a list of fonts matching pattern. No more than
max_names will be returned. Each list item represents one font
and has the following properties:
name
The name of the font.
min_bounds
max_bounds
min_char_or_byte2
max_char_or_byte2
default_char
draw_direction
min_byte1
max_byte1
all_chars_exist
font_ascent
font_descent
replies_hint
See the descripton of XFontStruct in XGetFontProperty(3X11)
for details on these values.
properties
A list of properties. Each entry has two attributes:
name
The atom identifying this property.
value
A 32-bit unsigned value.
"""
return request.ListFontsWithInfo(display = self.display,
max_names = max_names,
pattern = pattern)
def set_font_path(self, path, onerror = None):
"""Set the font path to path, which should be a list of strings.
If path is empty, the default font path of the server will be
restored."""
request.SetFontPath(display = self.display,
onerror = onerror,
path = path)
def get_font_path(self):
"""Return the current font path as a list of strings."""
r = request.GetFontPath(display = self.display)
return r.paths
def query_extension(self, name):
"""Ask the server if it supports the extension name. If it is
supported an object with the following attributes is returned:
major_opcode
The major opcode that the requests of this extension uses.
first_event
The base event code if the extension have additional events, or 0.
first_error
The base error code if the extension have additional errors, or 0.
If the extension is not supported, None is returned."""
r = request.QueryExtension(display = self.display,
name = name)
if r.present:
return r
else:
return None
def list_extensions(self):
"""Return a list of all the extensions provided by the server."""
r = request.ListExtensions(display = self.display)
return r.names
def change_keyboard_mapping(self, first_keycode, keysyms, onerror = None):
"""Modify the keyboard mapping, starting with first_keycode.
keysyms is a list of tuples of keysyms. keysyms[n][i] will be
assigned to keycode first_keycode+n at index i."""
request.ChangeKeyboardMapping(display = self.display,
onerror = onerror,
first_keycode = first_keycode,
keysyms = keysyms)
def get_keyboard_mapping(self, first_keycode, count):
"""Return the current keyboard mapping as a list of tuples,
starting at first_keycount and no more than count."""
r = request.GetKeyboardMapping(display = self.display,
first_keycode = first_keycode,
count = count)
return r.keysyms
def change_keyboard_control(self, onerror = None, **keys):
"""Change the parameters provided as keyword arguments:
key_click_percent
The volume of key clicks between 0 (off) and 100 (load).
-1 will restore default setting.
bell_percent
The base volume of the bell, coded as above.
bell_pitch
The pitch of the bell in Hz, -1 restores the default.
bell_duration
The duration of the bell in milliseconds, -1 restores
the default.
led
led_mode
led_mode should be X.LedModeOff or X.LedModeOn. If led is
provided, it should be a 32-bit mask listing the LEDs that
should change. If led is not provided, all LEDs are changed.
key
auto_repeat_mode
auto_repeat_mode should be one of X.AutoRepeatModeOff,
X.AutoRepeatModeOn, or X.AutoRepeatModeDefault. If key is
provided, that key will be modified, otherwise the global
state for the entire keyboard will be modified."""
request.ChangeKeyboardControl(display = self.display,
onerror = onerror,
attrs = keys)
def get_keyboard_control(self):
"""Return an object with the following attributes:
global_auto_repeat
X.AutoRepeatModeOn or X.AutoRepeatModeOff.
auto_repeats
A list of 32 integers. List item N contains the bits for keys
8N to 8N + 7 with the least significant bit in the byte
representing key 8N. If a bit is on, autorepeat is enabled
for the corresponding key.
led_mask
A 32-bit mask indicating which LEDs are on.
key_click_percent
The volume of key click, from 0 to 100.
bell_percent
bell_pitch
bell_duration
The volume, pitch and duration of the bell. """
return request.GetKeyboardControl(display = self.display)
def bell(self, percent = 0, onerror = None):
"""Ring the bell at the volume percent which is relative the base
volume. See XBell(3X11)."""
request.Bell(display = self.display,
onerror = onerror,
percent = percent)
def change_pointer_control(self, accel = None, threshold = None, onerror = None):
"""To change the pointer acceleration, set accel to a tuple (num,
denum). The pointer will then move num/denum times the normal
speed if it moves beyond the threshold number of pixels at once.
To change the threshold, set it to the number of pixels. -1
restores the default."""
if accel is None:
do_accel = 0
accel_num = 0
accel_denum = 0
else:
do_accel = 1
accel_num, accel_denum = accel
if threshold is None:
do_threshold = 0
else:
do_threshold = 1
request.ChangePointerControl(display = self.display,
onerror = onerror,
do_accel = do_accel,
do_thres = do_threshold,
accel_num = accel_num,
accel_denum = accel_denum,
threshold = threshold)
def get_pointer_control(self):
"""Return an object with the following attributes:
accel_num
accel_denom
The acceleration as numerator/denumerator.
threshold
The number of pixels the pointer must move before the
acceleration kicks in."""
return request.GetPointerControl(display = self.display)
def set_screen_saver(self, timeout, interval, prefer_blank, allow_exposures, onerror = None):
"""See XSetScreenSaver(3X11)."""
request.SetScreenSaver(display = self.display,
onerror = onerror,
timeout = timeout,
interval = interval,
prefer_blank = prefer_blank,
allow_exposures = allow_exposures)
def get_screen_saver(self):
"""Return an object with the attributes timeout, interval,
prefer_blanking, allow_exposures. See XGetScreenSaver(3X11) for
details."""
return request.GetScreenSaver(display = self.display)
def change_hosts(self, mode, host_family, host, onerror = None):
"""mode is either X.HostInsert or X.HostDelete. host_family is
one of X.FamilyInternet, X.FamilyDECnet or X.FamilyChaos.
host is a list of bytes. For the Internet family, it should be the
four bytes of an IPv4 address."""
request.ChangeHosts(display = self.display,
onerror = onerror,
mode = mode,
host_family = host_family,
host = host)
def list_hosts(self):
"""Return an object with the following attributes:
mode
X.EnableAccess if the access control list is used, X.DisableAccess otherwise.
hosts
The hosts on the access list. Each entry has the following attributes:
family
X.FamilyInternet, X.FamilyDECnet, or X.FamilyChaos.
name
A list of byte values, the coding depends on family. For the Internet family, it is the 4 bytes of an IPv4 address.
"""
return request.ListHosts(display = self.display)
def set_access_control(self, mode, onerror = None):
"""Enable use of access control lists at connection setup if mode
is X.EnableAccess, disable if it is X.DisableAccess."""
request.SetAccessControl(display = self.display,
onerror = onerror,
mode = mode)
def set_close_down_mode(self, mode, onerror = None):
"""Control what will happen with the client's resources at
connection close. The default is X.DestroyAll, the other values
are X.RetainPermanent and X.RetainTemporary."""
request.SetCloseDownMode(display = self.display,
onerror = onerror,
mode = mode)
def force_screen_saver(self, mode, onerror = None):
"""If mode is X.ScreenSaverActive the screen saver is activated.
If it is X.ScreenSaverReset, the screen saver is deactivated as
if device input had been received."""
request.ForceScreenSaver(display = self.display,
onerror = onerror,
mode = mode)
def set_pointer_mapping(self, map):
"""Set the mapping of the pointer buttons. map is a list of
logical button numbers. map must be of the same length as the
list returned by Display.get_pointer_mapping().
map[n] sets the
logical number for the physical button n+1. Logical number 0
disables the button. Two physical buttons cannot be mapped to the
same logical number.
If one of the buttons to be altered are
logically in the down state, X.MappingBusy is returned and the
mapping is not changed. Otherwise the mapping is changed and
X.MappingSuccess is returned."""
r = request.SetPointerMapping(display = self.display,
map = map)
return r.status
def get_pointer_mapping(self):
"""Return a list of the pointer button mappings. Entry N in the
list sets the logical button number for the physical button N+1."""
r = request.GetPointerMapping(display = self.display)
return r.map
def set_modifier_mapping(self, keycodes):
"""Set the keycodes for the eight modifiers X.Shift, X.Lock,
X.Control, X.Mod1, X.Mod2, X.Mod3, X.Mod4 and X.Mod5. keycodes
should be a eight-element list where each entry is a list of the
keycodes that should be bound to that modifier.
If any changed
key is logically in the down state, X.MappingBusy is returned and
the mapping is not changed. If the mapping violates some server
restriction, X.MappingFailed is returned. Otherwise the mapping
is changed and X.MappingSuccess is returned."""
r = request.SetModifierMapping(display = self.display,
keycodes = keycodes)
return r.status
def get_modifier_mapping(self):
"""Return a list of eight lists, one for each modifier. The list
can be indexed using X.ShiftMapIndex, X.Mod1MapIndex, and so on.
The sublists list the keycodes bound to that modifier."""
r = request.GetModifierMapping(display = self.display)
return r.keycodes
def no_operation(self, onerror = None):
"""Do nothing but send a request to the server."""
request.NoOperation(display = self.display,
onerror = onerror)
| 38.872766
| 123
| 0.599681
| 4,461
| 36,968
| 4.866174
| 0.171038
| 0.039525
| 0.033168
| 0.024185
| 0.215819
| 0.148747
| 0.113645
| 0.092731
| 0.072968
| 0.05187
| 0
| 0.005636
| 0.332801
| 36,968
| 950
| 124
| 38.913684
| 0.874478
| 0.425422
| 0
| 0.18797
| 0
| 0
| 0.015041
| 0
| 0
| 0
| 0
| 0
| 0.005013
| 1
| 0.172932
| false
| 0.002506
| 0.035088
| 0
| 0.325815
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5fd49cbaa7ded4b224914739446f1a0434a93af
| 657
|
py
|
Python
|
Others/qupc/qupc2014/c/main.py
|
KATO-Hiro/AtCoder
|
cbbdb18e95110b604728a54aed83a6ed6b993fde
|
[
"CC0-1.0"
] | 2
|
2020-06-12T09:54:23.000Z
|
2021-05-04T01:34:07.000Z
|
Others/qupc/qupc2014/c/main.py
|
KATO-Hiro/AtCoder
|
cbbdb18e95110b604728a54aed83a6ed6b993fde
|
[
"CC0-1.0"
] | 961
|
2020-06-23T07:26:22.000Z
|
2022-03-31T21:34:52.000Z
|
Others/qupc/qupc2014/c/main.py
|
KATO-Hiro/AtCoder
|
cbbdb18e95110b604728a54aed83a6ed6b993fde
|
[
"CC0-1.0"
] | null | null | null |
# -*- coding: utf-8 -*-
def main():
from string import ascii_uppercase
n, m, q_large = map(int, input().split())
s = [list(input()) for _ in range(n)]
q = [input() for _ in range(q_large)]
pos = [None for _ in range(26)]
for i in range(n):
for j in range(m):
sij = s[i][j]
if sij != "*":
index = ascii_uppercase.index(sij)
pos[index] = (i + 1, j + 1)
for qi in q:
index = ascii_uppercase.index(qi)
p = pos[index]
if p is None:
print("NA")
else:
print(p[0], p[1])
if __name__ == "__main__":
main()
| 21.193548
| 50
| 0.47032
| 93
| 657
| 3.150538
| 0.430108
| 0.119454
| 0.102389
| 0.102389
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017073
| 0.375951
| 657
| 30
| 51
| 21.9
| 0.697561
| 0.031963
| 0
| 0
| 0
| 0
| 0.01735
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.047619
| 0
| 0.095238
| 0.095238
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5fedd3bf29602a1334d3dbff567321747bbca26
| 4,652
|
py
|
Python
|
analysis/notebooks/helper/anova.py
|
dpedrosac/DBSgait
|
6df44cf975d43f9e932ef10144bfb7c1b5390b7b
|
[
"MIT"
] | 1
|
2021-09-29T05:53:38.000Z
|
2021-09-29T05:53:38.000Z
|
analysis/notebooks/helper/anova.py
|
dpedrosac/DBSgait
|
6df44cf975d43f9e932ef10144bfb7c1b5390b7b
|
[
"MIT"
] | null | null | null |
analysis/notebooks/helper/anova.py
|
dpedrosac/DBSgait
|
6df44cf975d43f9e932ef10144bfb7c1b5390b7b
|
[
"MIT"
] | 1
|
2021-09-22T08:48:47.000Z
|
2021-09-22T08:48:47.000Z
|
import numpy as np
import pandas as pd
from scipy.stats import f_oneway
from typing import Dict, Tuple, Set
def extract_significant_p(df: pd.DataFrame, p_value_limit: float):
"""Return a df, which replaces values that are above p_value_limit with `None`"""
return (
df.loc(axis=1)[f"p-value"]
.where(df[f"p-value"] < p_value_limit)
.dropna(axis=0, how="all")
)
def _calculate_anova(data: pd.DataFrame) -> Tuple:
"""Calculate one-way anova using each column as a different measurement."""
parameter = [column for column in data.columns if column != "configuration"][0]
data_ = [
data[data["configuration"] == configuration][parameter].T.to_numpy()
for configuration in set(data["configuration"])
]
return f_oneway(*data_)
def anova(
dataset: Dict, gait_test: str, gait_parameter: str
) -> Tuple[pd.DataFrame, Set]:
"""Calculat a one-way anova for a single gait test and gait parameter.
Parameters
----------
dataset
A dictionary, where the keys are descriptions for different subjects. The values are dataframes, which have a
pd.MultiIndex as columns. The first level describes the test paradigm, e.g. "slow" / "fast". The second level
describes the DBS configureation, e.g. "130", "100", "OFF". The third level is the gait parameter,
e.g. stride length.
gait_test
Used to select the first level of the columns
gait_parameter
Used to select the thrid level of the columns
Returns
-------
d
A dictionary where the keys are equal to the passed argument `dataset`. The values are dataframes,
where the columns correspond to the two feet and the rows are different gait parameters. The values are anova
p-values between all DBS configurations and the OFF state for this specific `gait_test`
"""
anova_dict = {}
anova_df = pd.DataFrame()
not_evaluated = []
for patient, patient_data in dataset.items():
anova_dict[patient] = {"LeftFoot": (None, None), "RightFoot": (None, None)}
for foot in set(patient_data["foot"]):
missing_condition = None
foot_data = patient_data[
(patient_data["foot"] == foot) & (patient_data["test"] == gait_test)
][[gait_parameter, "configuration"]]
possible_configurations = {
"030",
"033",
"040",
"066",
"085",
"090",
"100",
"130",
"OFF",
}
actual_configurations = set(foot_data["configuration"])
missing_configurations = possible_configurations - actual_configurations
if missing_configurations:
not_evaluated.append(
" ".join([gait_test, patient, *missing_configurations, foot])
)
if len(missing_configurations) > (len(possible_configurations) - 2):
print(
"Not evaluating this foot, because to few configurations available."
)
continue
# print(set(foot_data.columns) - set(foot_data_valid.columns))
anova_dict[patient][foot] = _calculate_anova(foot_data)
row = pd.DataFrame(
index=[patient],
columns=pd.MultiIndex.from_arrays(
[["p-value"] * 2, ["LeftFoot", "RightFoot"]]
),
data=[
[
anova_dict[patient]["LeftFoot"][1],
anova_dict[patient]["RightFoot"][1],
]
],
)
anova_df = pd.concat([anova_df, row])
return anova_df, set(not_evaluated)
def conclude_results(
all_results: pd.DataFrame,
p_value_limit: float
) -> pd.DataFrame:
anova_overview = pd.DataFrame()
significant_results = {}
for gait_parameter in all_results.keys():
significant_results[gait_parameter] = extract_significant_p(
all_results[gait_parameter], p_value_limit=p_value_limit
)
data = [
len(all_results[gait_parameter]),
len(significant_results[gait_parameter]),
significant_results[gait_parameter].count().sum(),
]
columns = ["n_patients", "n_patients_significant", "n_feet_significant"]
anova_overview = pd.concat(
[
anova_overview,
pd.DataFrame(data=[data], columns=columns, index=[gait_parameter]),
]
)
return anova_overview
| 36.34375
| 117
| 0.589209
| 521
| 4,652
| 5.084453
| 0.289827
| 0.05889
| 0.024915
| 0.035108
| 0.040015
| 0.040015
| 0
| 0
| 0
| 0
| 0
| 0.011501
| 0.308469
| 4,652
| 127
| 118
| 36.629921
| 0.811937
| 0.239897
| 0
| 0.022222
| 0
| 0
| 0.085872
| 0.006382
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044444
| false
| 0
| 0.044444
| 0
| 0.133333
| 0.011111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5ff5d19a2e1fbd8c3dcb000fc779bc359c47c61
| 2,251
|
py
|
Python
|
bux_recorder/utils.py
|
roaldarbol/bux
|
356817bbc7139c972d640c64fb8fcba27b70b3f7
|
[
"MIT"
] | null | null | null |
bux_recorder/utils.py
|
roaldarbol/bux
|
356817bbc7139c972d640c64fb8fcba27b70b3f7
|
[
"MIT"
] | 9
|
2021-12-09T18:07:25.000Z
|
2022-03-30T23:22:45.000Z
|
bux_recorder/utils.py
|
roaldarbol/bux
|
356817bbc7139c972d640c64fb8fcba27b70b3f7
|
[
"MIT"
] | null | null | null |
import os
import platform
import time
import csv
import serial
import cv2
import tkinter as tk
from tkinter.filedialog import askdirectory
from serial.tools import list_ports
# From https://raspberrypi.stackexchange.com/a/118473
def is_raspberrypi():
try:
with io.open('/sys/firmware/devicetree/base/model', 'r') as m:
if 'raspberry pi' in m.read().lower():
return(m)
except Exception:
pass
return False
def get_platform():
return platform.system()
def get_gui_coordinates(root, w, h):
# get screen width and height
ws = root.winfo_screenwidth() # width of the screen
hs = root.winfo_screenheight() # height of the screen
# calculate x and y coordinates for the Tk root window
x = (ws/2) - (w/2)
y = (hs/2) - (h/2)
return(w,h,x,y)
def handle_focus_in(button):
full_name_entry.delete(0, tk.END)
full_name_entry.config(fg='black')
def handle_focus_out(button):
full_name_entry.delete(0, tk.END)
full_name_entry.config(fg='grey')
full_name_entry.insert(0, "Example: Joe Bloggs")
def hover(button, enter, message):
if message == "":
return
else:
button.configure(text=message)
def list_ports():
"""
Test the ports and returns a tuple with the available ports and the ones that are working.
"""
non_working_ports = []
dev_port = 0
working_ports = []
available_ports = []
while len(non_working_ports) < 6: # if there are more than 5 non working ports stop the testing.
camera = cv2.VideoCapture(dev_port)
if not camera.isOpened():
non_working_ports.append(dev_port)
# print("Port %s is not working." %dev_port)
else:
is_reading, img = camera.read()
w = camera.get(3)
h = camera.get(4)
if is_reading:
# print("Port %s is working and reads images (%s x %s)" %(dev_port,h,w))
working_ports.append(dev_port)
else:
# print("Port %s for camera ( %s x %s) is present but does not reads." %(dev_port,h,w))
available_ports.append(dev_port)
dev_port +=1
return available_ports,working_ports,non_working_ports
| 30.835616
| 103
| 0.631719
| 322
| 2,251
| 4.270186
| 0.406832
| 0.045818
| 0.047273
| 0.039273
| 0.112
| 0.075636
| 0.075636
| 0.075636
| 0.075636
| 0.075636
| 0
| 0.012666
| 0.263438
| 2,251
| 73
| 104
| 30.835616
| 0.816647
| 0.234118
| 0
| 0.089286
| 0
| 0
| 0.04468
| 0.020576
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0.017857
| 0.160714
| 0.017857
| 0.357143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5fff25cf4828ce6ee852dfb013719288c2e6acf
| 1,712
|
py
|
Python
|
a2e/optimizer/hpbandster/_model_worker.py
|
maechler/a2e
|
c28f546ca5fc3fdb9c740ea5f0f85d2aca044a00
|
[
"MIT"
] | 1
|
2021-03-19T09:09:41.000Z
|
2021-03-19T09:09:41.000Z
|
a2e/optimizer/hpbandster/_model_worker.py
|
maechler/a2e
|
c28f546ca5fc3fdb9c740ea5f0f85d2aca044a00
|
[
"MIT"
] | null | null | null |
a2e/optimizer/hpbandster/_model_worker.py
|
maechler/a2e
|
c28f546ca5fc3fdb9c740ea5f0f85d2aca044a00
|
[
"MIT"
] | null | null | null |
from hpbandster.core.worker import Worker
from a2e.model import AbstractModel
from a2e.optimizer import EvaluationResultAggregator
from a2e.utility import inf_nan_to_float_max
class ModelWorker(Worker):
def __init__(
self,
model: AbstractModel,
evaluation_result_aggregator: EvaluationResultAggregator,
x_train,
y_train,
x_valid,
y_valid,
run_id,
nameserver=None,
nameserver_port=None,
logger=None,
host=None,
id=None,
timeout=None,
):
super().__init__(run_id, nameserver=nameserver, nameserver_port=nameserver_port, logger=logger, host=host, id=id, timeout=timeout)
self.model = model
self.evaluation_result_aggregator = evaluation_result_aggregator
self.x_train = x_train
self.y_train = y_train
self.x_valid = x_valid
self.y_valid = y_valid
def compute(self, config, budget, working_directory, **kwargs):
iteration, stage, actual_num_config = kwargs['config_id']
self.model.load_config(config, budget=budget, **kwargs)
evaluation_result = self.model.evaluate(
self.x_train,
self.y_train,
self.x_valid,
self.y_valid,
budget,
)
evaluation_result.add_info('iteration', iteration)
evaluation_result.add_info('stage', stage)
evaluation_result.add_info('actual_num_config', actual_num_config)
self.evaluation_result_aggregator.add_evaluation_result(evaluation_result)
return {
'loss': inf_nan_to_float_max(evaluation_result.cost),
'info': evaluation_result.info,
}
| 31.703704
| 138
| 0.657126
| 195
| 1,712
| 5.430769
| 0.276923
| 0.181303
| 0.098206
| 0.065156
| 0.109537
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002377
| 0.26285
| 1,712
| 53
| 139
| 32.301887
| 0.836767
| 0
| 0
| 0
| 0
| 0
| 0.028037
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0.086957
| 0
| 0.173913
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9101c6f835f7bccd8700b747bc71f0d2474bb905
| 1,245
|
py
|
Python
|
xagents/__init__.py
|
schissmantics/xagents
|
04f1b96f767903c62138b7d63986f16edfe5f240
|
[
"MIT"
] | 37
|
2021-08-05T16:31:54.000Z
|
2022-01-16T11:49:46.000Z
|
xagents/__init__.py
|
schissmantics/xagents
|
04f1b96f767903c62138b7d63986f16edfe5f240
|
[
"MIT"
] | 1
|
2022-01-08T17:22:53.000Z
|
2022-01-08T17:22:53.000Z
|
xagents/__init__.py
|
schissmantics/xagents
|
04f1b96f767903c62138b7d63986f16edfe5f240
|
[
"MIT"
] | 3
|
2021-08-13T06:25:22.000Z
|
2021-08-20T01:37:15.000Z
|
from xagents import a2c, acer, ddpg, dqn, ppo, td3, trpo
from xagents.a2c.agent import A2C
from xagents.acer.agent import ACER
from xagents.base import OffPolicy
from xagents.ddpg.agent import DDPG
from xagents.dqn.agent import DQN
from xagents.ppo.agent import PPO
from xagents.td3.agent import TD3
from xagents.trpo.agent import TRPO
from xagents.utils.cli import play_args, train_args, tune_args
from xagents.utils.common import register_models
__author__ = 'schissmantics'
__email__ = 'schissmantics@outlook.com'
__license__ = 'MIT'
__version__ = '1.0.1'
agents = {
'a2c': {'module': a2c, 'agent': A2C},
'acer': {'module': acer, 'agent': ACER},
'dqn': {'module': dqn, 'agent': DQN},
'ppo': {'module': ppo, 'agent': PPO},
'td3': {'module': td3, 'agent': TD3},
'trpo': {'module': trpo, 'agent': TRPO},
'ddpg': {'module': ddpg, 'agent': DDPG},
}
register_models(agents)
commands = {
'train': (train_args, 'fit', 'Train given an agent and environment'),
'play': (
play_args,
'play',
'Play a game given a trained agent and environment',
),
'tune': (
tune_args,
'',
'Tune hyperparameters given an agent, hyperparameter specs, and environment',
),
}
| 30.365854
| 85
| 0.658635
| 162
| 1,245
| 4.91358
| 0.283951
| 0.15201
| 0.037688
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01497
| 0.195181
| 1,245
| 40
| 86
| 31.125
| 0.779441
| 0
| 0
| 0.052632
| 0
| 0
| 0.261847
| 0.02008
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.289474
| 0
| 0.289474
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91031df628ba0d6e12adf7ed9e0154be2d4256a3
| 3,794
|
py
|
Python
|
examples/MMPT/mmpt_cli/localjob.py
|
Este1le/fairseq
|
0fa073e0e0ddd90ff6850588e655c9566bb222ff
|
[
"MIT"
] | null | null | null |
examples/MMPT/mmpt_cli/localjob.py
|
Este1le/fairseq
|
0fa073e0e0ddd90ff6850588e655c9566bb222ff
|
[
"MIT"
] | null | null | null |
examples/MMPT/mmpt_cli/localjob.py
|
Este1le/fairseq
|
0fa073e0e0ddd90ff6850588e655c9566bb222ff
|
[
"MIT"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from mmpt.utils import recursive_config
class BaseJob(object):
def __init__(self, yaml_file, dryrun=False):
self.yaml_file = yaml_file
self.config = recursive_config(yaml_file)
self.dryrun = dryrun
def submit(self, **kwargs):
raise NotImplementedError
def _normalize_cmd(self, cmd_list):
cmd_list = list(cmd_list)
yaml_index = cmd_list.index("[yaml]")
cmd_list[yaml_index] = self.yaml_file
return cmd_list
class LocalJob(BaseJob):
CMD_CONFIG = {
"local_single": [
"fairseq-train", "[yaml]", "--user-dir", "mmpt",
"--task", "mmtask", "--arch", "mmarch",
"--criterion", "mmloss",
],
"local_small": [
"fairseq-train", "[yaml]", "--user-dir", "mmpt",
"--task", "mmtask", "--arch", "mmarch",
"--criterion", "mmloss",
"--distributed-world-size", "2"
],
"local_big": [
"fairseq-train", "[yaml]", "--user-dir", "mmpt",
"--task", "mmtask", "--arch", "mmarch",
"--criterion", "mmloss",
"--distributed-world-size", "4"
],
"local_predict": ["python", "mmpt_cli/predict.py", "[yaml]"],
}
def __init__(self, yaml_file, job_type=None, dryrun=False):
super().__init__(yaml_file, dryrun)
if job_type is None:
self.job_type = "local_single"
if self.config.task_type is not None:
self.job_type = self.config.task_type
else:
self.job_type = job_type
if self.job_type in ["local_single", "local_small"]:
if self.config.fairseq.dataset.batch_size > 32:
print("decreasing batch_size to 32 for local testing?")
def submit(self):
cmd_list = self._normalize_cmd(LocalJob.CMD_CONFIG[self.job_type])
if "predict" not in self.job_type:
# append fairseq args.
from mmpt.utils import load_config
config = load_config(config_file=self.yaml_file)
for field in config.fairseq:
for key in config.fairseq[field]:
if key in ["fp16", "reset_optimizer", "reset_dataloader", "reset_meters"]: # a list of binary flag.
param = ["--" + key.replace("_", "-")]
else:
if key == "lr":
value = str(config.fairseq[field][key][0])
elif key == "adam_betas":
value = "'"+str(config.fairseq[field][key])+"'"
else:
value = str(config.fairseq[field][key])
param = [
"--" + key.replace("_", "-"),
value
]
cmd_list.extend(param)
print("launching", " ".join(cmd_list))
if not self.dryrun:
os.system(" ".join(cmd_list))
return JobStatus("12345678")
class JobStatus(object):
def __init__(self, job_id):
self.job_id = job_id
def __repr__(self):
return self.job_id
def __str__(self):
return self.job_id
def done(self):
return False
def running(self):
return False
def result(self):
if self.done():
return "{} is done.".format(self.job_id)
else:
return "{} is running.".format(self.job_id)
def stderr(self):
return self.result()
def stdout(self):
return self.result()
| 32.152542
| 120
| 0.524512
| 419
| 3,794
| 4.536993
| 0.293556
| 0.044187
| 0.034719
| 0.031562
| 0.207785
| 0.187796
| 0.118885
| 0.118885
| 0.118885
| 0.118885
| 0
| 0.006822
| 0.343173
| 3,794
| 117
| 121
| 32.42735
| 0.756019
| 0.055878
| 0
| 0.241758
| 0
| 0
| 0.155245
| 0.013427
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.032967
| 0.065934
| 0.32967
| 0.021978
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9103b4aa5d2e6a5156212d03a9f3245d1c26b5fe
| 1,154
|
py
|
Python
|
tron/Nubs/deprecated/tcc25m-old.py
|
sdss/tron
|
886c5c5fb6341ad85e4a9f5d6f5ecb6bbc0d8322
|
[
"BSD-3-Clause"
] | null | null | null |
tron/Nubs/deprecated/tcc25m-old.py
|
sdss/tron
|
886c5c5fb6341ad85e4a9f5d6f5ecb6bbc0d8322
|
[
"BSD-3-Clause"
] | null | null | null |
tron/Nubs/deprecated/tcc25m-old.py
|
sdss/tron
|
886c5c5fb6341ad85e4a9f5d6f5ecb6bbc0d8322
|
[
"BSD-3-Clause"
] | null | null | null |
import os.path
from tron import g, hub
from tron.Hub.Command.Encoders.ASCIICmdEncoder import ASCIICmdEncoder
from tron.Hub.Nub.TCCShellNub import TCCShellNub
from tron.Hub.Reply.Decoders.ASCIIReplyDecoder import ASCIIReplyDecoder
name = 'tcc'
def start(poller):
stop()
initCmds = ('show version', 'show users', 'show time', 'show status', 'show inst/full',
'show object/full', 'show axisconfig', 'show focus', 'axis status', 'show scale',
'mir status')
safeCmds = r'(^show )|(status$)'
d = ASCIIReplyDecoder(EOL='\r', stripChars='\n', CIDfirst=False, debug=1)
e = ASCIICmdEncoder(EOL='\r', debug=1, CIDfirst=False)
tcc = TCCShellNub(poller, [
'/usr/bin/ssh', '-1', '-e', 'none', '-a', '-x', '-i',
os.path.expanduser('~/.ssh/tron'), '-T', 'tccuser@tcc25m'
],
initCmds=initCmds,
safeCmds=safeCmds,
needsAuth=True,
name=name,
encoder=e,
decoder=d,
logDir=os.path.join(g.logDir, name),
debug=1)
hub.addActor(tcc)
def stop():
n = hub.findActor(name)
if n:
hub.dropActor(n)
del n
| 25.644444
| 97
| 0.598787
| 141
| 1,154
| 4.900709
| 0.48227
| 0.04631
| 0.047757
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006857
| 0.241768
| 1,154
| 44
| 98
| 26.227273
| 0.782857
| 0
| 0
| 0
| 0
| 0
| 0.180243
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.15625
| 0
| 0.21875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9106509f9ec5a979f79cad4305026bbe9239af41
| 9,920
|
py
|
Python
|
python/arch/api/table/session.py
|
GentleWang1011/eggroll
|
417b029958e0e0ec6f0e1eb03d9ecdf4d5cff47c
|
[
"Apache-2.0"
] | 1
|
2020-10-23T03:18:54.000Z
|
2020-10-23T03:18:54.000Z
|
python/arch/api/table/session.py
|
GentleWang1011/eggroll
|
417b029958e0e0ec6f0e1eb03d9ecdf4d5cff47c
|
[
"Apache-2.0"
] | null | null | null |
python/arch/api/table/session.py
|
GentleWang1011/eggroll
|
417b029958e0e0ec6f0e1eb03d9ecdf4d5cff47c
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import datetime
import threading
from typing import Iterable
import six
from arch.api import WorkMode, Backend
from arch.api.table.table import Table
from eggroll.core.constants import StoreTypes
def build_session(job_id=None,
work_mode: WorkMode = WorkMode.STANDALONE,
backend: Backend = Backend.EGGROLL2,
persistent_engine: StoreTypes = StoreTypes.ROLLPAIR_LMDB):
from arch.api.table import eggroll_util
if backend.is_eggroll():
from arch.api.table.eggroll import session_impl
eggroll_session = eggroll_util.build_eggroll_session(work_mode=work_mode, job_id=job_id)
session = session_impl.FateSessionImpl(eggroll_session, work_mode, persistent_engine)
elif backend.is_spark():
from arch.api.table.pyspark import session_impl
eggroll_session = eggroll_util.build_eggroll_session(work_mode=work_mode, job_id=job_id)
session = session_impl.FateSessionImpl(eggroll_session, work_mode, persistent_engine)
elif backend.is_eggroll2():
from eggroll.core.session import session_init
from arch.api.table.eggroll2 import session_impl
options = {}
if work_mode == WorkMode.STANDALONE:
options['eggroll.session.deploy.mode'] = "standalone"
elif work_mode == WorkMode.CLUSTER:
options['eggroll.session.deploy.mode'] = "cluster"
er_session = session_init(session_id=job_id, options=options)
session = session_impl.FateSessionImpl(er_session, work_mode, persistent_engine)
else:
raise ValueError(f"work_mode: {work_mode} not supported")
return session
@six.add_metaclass(abc.ABCMeta)
class FateSession(object):
_instance: 'FateSession' = None
__lock = threading.Lock()
@staticmethod
def set_instance(instance):
if not FateSession._instance:
with FateSession.__lock:
if not FateSession._instance:
FateSession._instance = instance
@staticmethod
def get_instance():
return FateSession._instance
@abc.abstractmethod
def get_persistent_engine(self):
pass
@abc.abstractmethod
def table(self,
name,
namespace,
partition,
persistent,
in_place_computing,
create_if_missing,
error_if_exist) -> Table:
pass
@abc.abstractmethod
def parallelize(self,
data: Iterable,
include_key,
name,
partition,
namespace,
persistent,
chunk_size,
in_place_computing,
create_if_missing,
error_if_exist) -> Table:
pass
@abc.abstractmethod
def cleanup(self, name, namespace, persistent):
pass
# noinspection PyPep8Naming
@abc.abstractmethod
def generateUniqueId(self):
pass
@abc.abstractmethod
def get_session_id(self):
pass
@abc.abstractmethod
def stop(self):
pass
@staticmethod
def get_data_table(name, namespace):
"""
return data table instance by table name and table name space
:param name: table name of data table
:param namespace: table name space of data table
:return:
data table instance
"""
return FateSession.get_instance().table(name=name,
namespace=namespace,
create_if_missing=False,
persistent=True,
error_if_exist=False,
in_place_computing=False,
partition=1)
@staticmethod
def save_data_table_meta(kv, data_table_name, data_table_namespace):
"""
save data table meta information
:param kv: v should be serialized by JSON
:param data_table_name: table name of this data table
:param data_table_namespace: table name of this data table
:return:
"""
from arch.api.utils.core import json_dumps
data_meta_table = FateSession.get_instance().table(name="%s.meta" % data_table_name,
namespace=data_table_namespace,
partition=1,
create_if_missing=True,
error_if_exist=False,
persistent=True,
in_place_computing=False)
for k, v in kv.items():
data_meta_table.put(k, json_dumps(v))
@staticmethod
def get_data_table_meta(key, data_table_name, data_table_namespace):
"""
get data table meta information
:param key:
:param data_table_name: table name of this data table
:param data_table_namespace: table name of this data table
:return:
"""
from arch.api.utils.core import json_loads
data_meta_table = FateSession.get_instance().table(name="%s.meta" % data_table_name,
namespace=data_table_namespace,
create_if_missing=True,
error_if_exist=False,
in_place_computing=False,
persistent=True,
partition=1)
if data_meta_table:
value_bytes = data_meta_table.get(key, use_serialize=False)
if value_bytes:
return json_loads(value_bytes)
else:
return None
else:
return None
@staticmethod
def get_data_table_metas(data_table_name, data_table_namespace):
"""
get data table meta information
:param data_table_name: table name of this data table
:param data_table_namespace: table name of this data table
:return:
"""
from arch.api.utils.core import json_loads
data_meta_table = FateSession.get_instance().table(name="%s.meta" % data_table_name,
namespace=data_table_namespace,
partition=1,
persistent=True,
in_place_computing=False,
create_if_missing=True,
error_if_exist=False)
if data_meta_table:
metas = dict()
for k, v in data_meta_table.collect(use_serialize=False):
metas[k] = json_loads(v)
return metas
else:
return None
@staticmethod
def clean_table(namespace, regex_string='*'):
try:
FateSession.get_instance().cleanup(name=regex_string, namespace=namespace, persistent=False)
except Exception as e:
print(e)
@staticmethod
def save_data(kv_data: Iterable,
name,
namespace,
partition=1,
persistent: bool = True,
create_if_missing=True,
error_if_exist=False,
in_version: bool = False,
version_log=None):
"""
save data into data table
:param version_log:
:param in_version:
:param kv_data:
:param name: table name of data table
:param namespace: table namespace of data table
:param partition: number of partition
:param persistent: bool = True,
:param create_if_missing:
:param error_if_exist:
:return:
data table instance
"""
from arch.api.utils import version_control
data_table = FateSession.get_instance().table(name=name,
namespace=namespace,
partition=partition,
persistent=persistent,
in_place_computing=False,
create_if_missing=create_if_missing,
error_if_exist=error_if_exist)
data_table.put_all(kv_data)
if in_version:
version_log = "[AUTO] save data at %s." % datetime.datetime.now() if not version_log else version_log
version_control.save_version(name=name, namespace=namespace, version_log=version_log)
return data_table
| 39.055118
| 113
| 0.541431
| 987
| 9,920
| 5.213779
| 0.192503
| 0.073455
| 0.021376
| 0.017489
| 0.425379
| 0.361251
| 0.339487
| 0.32841
| 0.29382
| 0.267198
| 0
| 0.002849
| 0.398589
| 9,920
| 253
| 114
| 39.209486
| 0.859705
| 0.164315
| 0
| 0.494118
| 0
| 0
| 0.020501
| 0.006792
| 0
| 0
| 0
| 0
| 0
| 1
| 0.094118
| false
| 0.041176
| 0.1
| 0.005882
| 0.264706
| 0.005882
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91085824641d29cf6a64bb1d7961d3c8c9b1d9df
| 10,481
|
py
|
Python
|
experiments/vitchyr/vaes/learn_swirl_vae.py
|
Asap7772/rail-rl-franka-eval
|
4bf99072376828193d05b53cf83c7e8f4efbd3ba
|
[
"MIT"
] | null | null | null |
experiments/vitchyr/vaes/learn_swirl_vae.py
|
Asap7772/rail-rl-franka-eval
|
4bf99072376828193d05b53cf83c7e8f4efbd3ba
|
[
"MIT"
] | null | null | null |
experiments/vitchyr/vaes/learn_swirl_vae.py
|
Asap7772/rail-rl-franka-eval
|
4bf99072376828193d05b53cf83c7e8f4efbd3ba
|
[
"MIT"
] | null | null | null |
"""
VAE on the swirl task.
Basically, VAEs don't work. It's probably because the prior isn't very good
and/or because the learning signal is pretty weak when both the encoder and
decoder change quickly. However, I tried also alternating between the two,
and that didn't seem to help.
"""
from torch.distributions import Normal
from torch.optim import Adam
import torch
import numpy as np
import matplotlib.pyplot as plt
from torch import nn as nn
import railrl.torch.pytorch_util as ptu
SWIRL_RATE = 1
T = 10
BS = 128
N_BATCHES = 2000
N_VIS = 1000
HIDDEN_SIZE = 32
VERBOSE = False
def swirl_data(batch_size):
t = np.random.uniform(size=batch_size, low=0, high=T)
x = t * np.cos(t * SWIRL_RATE) / T
y = t * np.sin(t * SWIRL_RATE) / T
data = np.array([x, y]).T
noise = np.random.randn(batch_size, 2) / (T * 2)
return data + noise, t.reshape(-1, 1)
def swirl_t_to_data(t):
x = t * np.cos(t * SWIRL_RATE) / T
y = t * np.sin(t * SWIRL_RATE) / T
return np.array([x, y]).T
def kl_to_prior(means, log_stds, stds):
"""
KL between a Gaussian and a standard Gaussian.
https://stats.stackexchange.com/questions/60680/kl-divergence-between-two-multivariate-gaussians
"""
return 0.5 * (
- 2 * log_stds # log std_prior = 0
- 1 # d = 1
+ stds ** 2
+ means ** 2
)
class Encoder(nn.Sequential):
def encode(self, x):
return self.get_encoding_and_suff_stats(x)[0]
def get_encoding_and_suff_stats(self, x):
output = self(x)
means, log_stds = (
output[:, 0:1], output[:, 1:2]
)
stds = log_stds.exp()
epsilon = ptu.Variable(torch.randn(*means.size()))
latents = epsilon * stds + means
latents = latents
return latents, means, log_stds, stds
class Decoder(nn.Sequential):
def decode(self, latents):
output = self(latents)
means, log_stds = output[:, 0:2], output[:, 2:4]
distribution = Normal(means, log_stds.exp())
return distribution.sample()
def t_to_xy(t):
if len(t.shape) == 2:
t = t[:, 0]
x = t * np.cos(t * SWIRL_RATE) / T
y = t * np.sin(t * SWIRL_RATE) / T
return np.array([x, y]).T
def pretrain_encoder(encoder, opt):
losses = []
for _ in range(1000):
x_np, y_np = swirl_data(BS)
x = ptu.np_to_var(x_np)
y = ptu.np_to_var(y_np)
y_hat = encoder.encode(x)
loss = ((y_hat - y) ** 2).mean()
opt.zero_grad()
loss.backward()
opt.step()
losses.append(loss.data.numpy())
if VERBOSE:
x_np, y_np = swirl_data(N_VIS)
x = ptu.np_to_var(x_np)
y_hat = encoder.encode(x)
y_hat_np = y_hat.data.numpy()
x_hat_np = t_to_xy(y_hat_np[:, 0])
plt.subplot(2, 1, 1)
plt.plot(np.array(losses))
plt.title("Training Loss")
plt.subplot(2, 1, 2)
plt.plot(x_np[:, 0], x_np[:, 1], '.')
plt.plot(x_hat_np[:, 0], x_hat_np[:, 1], '.')
plt.title("Samples")
plt.legend(["Samples", "Estimates"])
plt.show()
def train_encoder(encoder, decoder, encoder_opt):
batch, true_latents = swirl_data(BS)
batch = ptu.np_to_var(batch)
latents, means, log_stds, stds = encoder.get_encoding_and_suff_stats(
batch
)
kl = kl_to_prior(means, log_stds, stds)
latents = encoder.encode(batch)
decoder_output = decoder(latents)
decoder_means = decoder_output[:, 0:2]
decoder_log_stds = decoder_output[:, 2:4]
distribution = Normal(decoder_means, decoder_log_stds.exp())
reconstruction_log_prob = distribution.log_prob(batch).sum(dim=1)
# elbo = - kl + reconstruction_log_prob
# loss = - elbo.mean()
loss = - reconstruction_log_prob.mean()
# This is the second place where we cheat:
latent_loss = ((ptu.np_to_var(true_latents) - latents) ** 2).mean()
loss = loss# + latent_loss
encoder_opt.zero_grad()
loss.backward()
encoder_opt.step()
return loss
def train_decoder(encoder, decoder, decoder_opt):
batch, true_latents = swirl_data(BS)
batch = ptu.np_to_var(batch)
latents = encoder.encode(batch)
decoder_output = decoder(latents)
decoder_means = decoder_output[:, 0:2]
decoder_log_stds = decoder_output[:, 2:4]
distribution = Normal(decoder_means, decoder_log_stds.exp())
reconstruction_log_prob = distribution.log_prob(batch).sum(dim=1)
loss = - reconstruction_log_prob.mean()
decoder_opt.zero_grad()
loss.backward()
decoder_opt.step()
return loss
def train_alternating(*_):
encoder = Encoder(
nn.Linear(2, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, 2),
)
encoder_opt = Adam(encoder.parameters())
decoder = Decoder(
nn.Linear(1, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, 4),
)
decoder_opt = Adam(decoder.parameters())
encoder_losses = []
decoder_losses = []
for _ in range(100):
for _ in range(N_BATCHES):
encoder_losses.append(
train_encoder(encoder, decoder, encoder_opt).data.numpy()
)
for _ in range(N_BATCHES):
decoder_losses.append(
train_decoder(encoder, decoder, decoder_opt).data.numpy()
)
# Visualize
vis_samples_np, true_latents_np = swirl_data(N_VIS)
vis_samples = ptu.np_to_var(vis_samples_np)
true_xy_mean_np = t_to_xy(true_latents_np)
latents = encoder.encode(vis_samples)
reconstructed_samples = decoder.decode(latents).data.numpy()
generated_samples = decoder.decode(
ptu.Variable(torch.randn(*latents.shape))
).data.numpy()
plt.subplot(2, 2, 1)
plt.plot(np.array(encoder_losses))
plt.title("Encoder Loss")
plt.subplot(2, 2, 2)
plt.plot(np.array(decoder_losses))
plt.title("Decoder Loss")
plt.subplot(2, 3, 4)
plt.plot(generated_samples[:, 0], generated_samples[:, 1], '.')
plt.title("Generated Samples")
plt.subplot(2, 3, 5)
plt.plot(reconstructed_samples[:, 0], reconstructed_samples[:, 1], '.')
estimated_means = t_to_xy(latents.data.numpy())
# plt.plot(estimated_means[:, 0], estimated_means[:, 1], '.')
plt.title("Reconstruction")
# plt.legend(["Samples", "Projected Latents"])
plt.subplot(2, 3, 6)
plt.plot(vis_samples_np[:, 0], vis_samples_np[:, 1], '.')
plt.plot(true_xy_mean_np[:, 0], true_xy_mean_np[:, 1], '.')
plt.title("Original Samples")
plt.legend(["Original", "True means"])
plt.show()
def train():
encoder = Encoder(
nn.Linear(2, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, 2),
)
encoder_opt = Adam(encoder.parameters())
# This is the first place that we cheat. However, this pretraining isn't
# needed if you just add the loss to the training (see below)
# pretrain_encoder(encoder, encoder_opt)
decoder = Decoder(
nn.Linear(1, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, 4),
)
decoder_opt = Adam(decoder.parameters())
print("Done training encoder")
losses = []
kls = []
log_probs = []
for _ in range(N_BATCHES):
batch, true_latents = swirl_data(BS)
batch = ptu.np_to_var(batch)
latents, means, log_stds, stds = encoder.get_encoding_and_suff_stats(
batch
)
kl = kl_to_prior(means, log_stds, stds)
latents = encoder.encode(batch)
# decoder_output = decoder(latents.detach())
decoder_output = decoder(latents)
decoder_means = decoder_output[:, 0:2]
decoder_log_stds = decoder_output[:, 2:4]
distribution = Normal(decoder_means, decoder_log_stds.exp())
reconstruction_log_prob = distribution.log_prob(batch).sum(dim=1)
elbo = - kl + reconstruction_log_prob
loss = - elbo.mean()
# This is the second place where we cheat:
latent_loss = ((ptu.np_to_var(true_latents) - latents) ** 2).mean()
loss = loss + latent_loss
decoder_opt.zero_grad()
encoder_opt.zero_grad()
loss.backward()
decoder_opt.step()
encoder_opt.step()
losses.append(loss.data.numpy())
kls.append(kl.mean().data.numpy())
log_probs.append(reconstruction_log_prob.mean().data.numpy())
# Visualize
vis_samples_np, true_latents_np = swirl_data(N_VIS)
vis_samples = ptu.np_to_var(vis_samples_np)
true_xy_mean_np = t_to_xy(true_latents_np)
latents = encoder.encode(vis_samples)
reconstructed_samples = decoder.decode(latents).data.numpy()
generated_samples = decoder.decode(
ptu.Variable(torch.randn(*latents.shape))
).data.numpy()
plt.subplot(2, 3, 1)
plt.plot(np.array(losses))
plt.title("Training Loss")
plt.subplot(2, 3, 2)
plt.plot(np.array(kls))
plt.title("KLs")
plt.subplot(2, 3, 3)
plt.plot(np.array(log_probs))
plt.title("Log Probs")
plt.subplot(2, 3, 4)
plt.plot(generated_samples[:, 0], generated_samples[:, 1], '.')
plt.title("Generated Samples")
plt.subplot(2, 3, 5)
plt.plot(reconstructed_samples[:, 0], reconstructed_samples[:, 1], '.')
estimated_means = t_to_xy(latents.data.numpy())
plt.plot(estimated_means[:, 0], estimated_means[:, 1], '.')
plt.title("Reconstruction")
plt.subplot(2, 3, 6)
plt.plot(vis_samples_np[:, 0], vis_samples_np[:, 1], '.')
plt.plot(true_xy_mean_np[:, 0], true_xy_mean_np[:, 1], '.')
plt.title("Original Samples")
plt.legend(["Original", "True means"])
plt.show()
if __name__ == '__main__':
train_alternating()
# train()
| 30.556851
| 100
| 0.621983
| 1,461
| 10,481
| 4.239562
| 0.134839
| 0.053277
| 0.030998
| 0.04133
| 0.705037
| 0.652083
| 0.603972
| 0.589603
| 0.572812
| 0.572812
| 0
| 0.017868
| 0.241771
| 10,481
| 342
| 101
| 30.646199
| 0.761545
| 0.090736
| 0
| 0.589552
| 0
| 0
| 0.026876
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044776
| false
| 0
| 0.026119
| 0.003731
| 0.11194
| 0.003731
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91087a71b49d992aa86f465838203ca33ae315a2
| 893
|
py
|
Python
|
litex/build/openfpgaloader.py
|
JosephBushagour/litex
|
2b49430f2c53c4a8caa66b678af4660127b546e4
|
[
"ADSL"
] | null | null | null |
litex/build/openfpgaloader.py
|
JosephBushagour/litex
|
2b49430f2c53c4a8caa66b678af4660127b546e4
|
[
"ADSL"
] | null | null | null |
litex/build/openfpgaloader.py
|
JosephBushagour/litex
|
2b49430f2c53c4a8caa66b678af4660127b546e4
|
[
"ADSL"
] | null | null | null |
#
# This file is part of LiteX.
#
# Copyright (c) 2020 Florent Kermarrec <florent@enjoy-digital.fr>
# SPDX-License-Identifier: BSD-2-Clause
from litex.build.tools import write_to_file
from litex.build.generic_programmer import GenericProgrammer
# openFPGAloader ------------------------------------------------------------------------------------------
class OpenFPGALoader(GenericProgrammer):
needs_bitreverse = False
def __init__(self, board):
self.board = board
def load_bitstream(self, bitstream_file):
cmd = ["openFPGALoader", "--board", self.board, "--bitstream", bitstream_file]
self.call(cmd)
def flash(self, address, data_file):
cmd = ["openFPGALoader", "--board", self.board, "--write-flash", "--bitstream", data_file]
if address:
cmd.append("--offset")
cmd.append(address)
self.call(cmd)
| 31.892857
| 107
| 0.603583
| 94
| 893
| 5.595745
| 0.510638
| 0.068441
| 0.079848
| 0.098859
| 0.13308
| 0.13308
| 0
| 0
| 0
| 0
| 0
| 0.006849
| 0.182531
| 893
| 27
| 108
| 33.074074
| 0.713699
| 0.263158
| 0
| 0.133333
| 0
| 0
| 0.130568
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.133333
| 0
| 0.466667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9108d9fb9a94a37ce1d64b6c7561be3aaeaa3b32
| 10,557
|
py
|
Python
|
NutriBuddiAPIServices/ImageClassifier/NutriBuddiClassifier/Classifier/FoodClassifier.py
|
NutriBuddi/NutriBuddi
|
b4343216cbc99b17a1faf4df50b681465418291f
|
[
"MIT"
] | 2
|
2017-12-11T03:47:14.000Z
|
2017-12-16T01:29:03.000Z
|
NutriBuddiAPIServices/ImageClassifier/NutriBuddiClassifier/Classifier/FoodClassifier.py
|
NutriBuddi/NutriBuddi
|
b4343216cbc99b17a1faf4df50b681465418291f
|
[
"MIT"
] | null | null | null |
NutriBuddiAPIServices/ImageClassifier/NutriBuddiClassifier/Classifier/FoodClassifier.py
|
NutriBuddi/NutriBuddi
|
b4343216cbc99b17a1faf4df50b681465418291f
|
[
"MIT"
] | null | null | null |
class FoodClassifier:
#Class Attributes:
#model - the underlying keras model
#labels - the labels to be associated with the activation of each output neuron.
#Labels must be the same size as the output layer of the neural network.
def __init__(self, modelpath, labels, min_confidence = 0.6):
from keras.models import load_model
from keras.applications.resnet50 import ResNet50
self.resnet = ResNet50(include_top=False,weights='imagenet',pooling='max',input_shape=(224,224,3))
self.extModel = load_model(modelpath)
if(isinstance(labels,str)):
#its a file path
from os.path import exists
if(exists(labels)):
f = open(labels,'r')
x = f.readlines()
y = []
for i in x:
y.append(i.split('\n')[0])
self.labels = y
else:
self.labels = labels
self.num_classes = len(labels)
self.min_confidence=min_confidence
def predict(self,img):
import os
from PIL import Image
from keras.preprocessing.image import img_to_array
import numpy as np
#check if image is a filepath
if(isinstance(img,str)):
if(not os.path.exists(img)):
print("Error: Invalid File Path")
return ""
else:
#if its a filepath, convert to PIL image
img = Image.open(img)
#resize image
#shape from model input
shape = self.resnet.input_shape
imgr = img.resize(shape[1:3])
x = img_to_array(imgr).reshape((1,shape[1],shape[2],shape[3]))
#predict
features = self.resnet.predict(x)
prediction = self.extModel.predict(features)
#get max of predictions and return label(s)
predIdx = np.argmax(prediction)
if(prediction[0,predIdx]<self.min_confidence):
return ""
else:
return self.labels[predIdx]
def set_extModel(self,model):
self.extModel = model
def get_extModel(self):
return self.extModel
def set_labels(self,labels):
self.labels = labels
def get_labels(self):
return self.labels
def set_min_confidence(self,conf):
self.min_confidence=conf
def get_min_confidence(self):
return self.min_confidence
def generate_features_from_directory(location,target_image_count,model=None):
#generates feature maps from the convolutional layers of ResNet50 using all
#images from the directory
#INPUT:
#directory containing NESTED DIRECTORIES of images. (Very Important)
#the number of feature maps to generate for each image class
#OUTPUT:
#a npy file containing the 2048-dimensional feature vector
#produced by ResNet50's convolutional layers
#data is generated in batches of 32
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
from keras.applications.resnet50 import ResNet50
from os import listdir
from os.path import isdir
#create the model, if not defined
if model==None:
model = ResNet50(weights='imagenet',include_top=False,pooling='max')
#create the data generation
datagen = ImageDataGenerator()
#for each directory in
if(not isdir(location)):
print("could not find location: " + location)
return
for label in listdir(location):
#first check that its a directory
label_path = location+'/'+label
if(not isdir(label_path)):
continue
#create the data generator
#Output size is 256x256 to fit the ResNet50
print("Generating feature maps for " + label + "...")
generator = datagen.flow_from_directory(
label_path,
target_size = (224,224),
batch_size = 32,
class_mode=None)
#use ResNet50 to create the features
features = model.predict_generator(generator,target_image_count/32)
#features = np.reshape(features,(features.shape[0],features.shape[3]))
#save the features in a numpy binary
np.save(location+'/'+label+'.npy', features)
def create_data_set(data_path,output_folder,save_to_file=True):
#combines all npy files into one large file with their respective labels
#INPUTS:
#a directory containing npy fils of all different classes
#Outputs:
#training array and training labels
#label array is returned as a one hot encoding
#label names
from os.path import isdir
from os import listdir
import numpy as np
#find out how many classes
num_classes = 0
label_names = []
if(not isdir(data_path)):
print("Could not find directory: "+ data_path)
return
data_contents = listdir(data_path)
for f in data_contents:
if(f.endswith('.npy')):
num_classes +=1
label_names.append(f.split('.')[0])
if(num_classes==0):
print("Could not find any data files in directory: "+data_path)
return
#generate one-hot label vectors
labels = np.zeros([num_classes,num_classes])
for i in range(0,num_classes):
labels[i][i]=1
#load all arrays into memory.
#In the future, might need to do this on either a high ram machine
#or find another way to concatenate data
arrays = []
sizes = []
for f in data_contents:
if(f.endswith('.npy')):
arr = np.load(data_path+'/'+f)
sizes.append(arr.shape[0])
arrays.append(arr)
X = np.vstack([arr for arr in arrays])
#load the labels into memory
labelcodes = []
for i in range(0,num_classes):
labelcodes.append(np.vstack([labels[i]]*sizes[i]))
y = np.vstack([l for l in labelcodes])
if(save_to_file):
np.save(output_folder+'/data_set.npy',X)
np.save(output_folder+'/label_codes.npy',y)
with open(output_folder+"/labels.txt","w") as output:
output.write("".join([label + '\n' for label in label_names]))
return X,y,label_names
def train_classifier_from_images(train_dir,train_size,val_dir,val_size,output_dir):
#INPUTS:
#train_dir is the directory containig the training images
#test_dir is the directory containing the validation images
#output_dir is the directory to save the trained model
#train_size is the number of images to generate for each training class
#val_size is the number of images to generate for each validation class
#OUTPUTS
#A model that takes as input a 2048-vector of feature maps and outputs
#a prediction of what an image with those features might be.
#The labels file is also placed in this directory
#The model created is an SVM with softmax activation.
from time import time
from keras.applications.resnet50 import ResNet50
from keras.models import Sequential
from keras.optimizers import SGD
from keras.regularizers import l2
from keras.layers import Dense
from sklearn.utils import shuffle
from keras.callbacks import EarlyStopping, ModelCheckpoint
#import ResNet50 without top layer
print("Loading the ResNet50 Network...")
resnet = ResNet50(weights='imagenet',include_top=False,pooling='max')
#create the training and validation datasets for each class
print("Generating Training Set...")
generate_features_from_directory(train_dir,train_size,model=resnet)
print("Generating Testing Set...")
generate_features_from_directory(val_dir,val_size,model=resnet)
#create the combined dataset
print("Combining datasets...")
X_train,y_train,labels = create_data_set(train_dir,output_dir+"/train",save_to_file=True)
X_val,y_val,labels = create_data_set(val_dir,output_dir+"/validation",save_to_file=True)
#shuffle the train data
X_train,y_train = shuffle(X_train,y_train)
num_classes = len(labels)
#create the extension model
print("Creating extension model...")
extModel = Sequential()
extModel.add(Dense(num_classes,input_shape=(2048,), activation='softmax', W_regularizer=l2(0.01)))
extModel.compile(loss='hinge',optimizer=SGD(lr=0.01,momentum=0.9),metrics=["accuracy"])
#callbacks
checkpoint = ModelCheckpoint(output_dir + "/extModel"+str(int(time()))+".h5", monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)
early = EarlyStopping(monitor='val_acc', min_delta=0, patience=10, verbose=1, mode='auto')
with open(output_dir+"/labels.txt","w") as output:
output.write("".join([label + '\n' for label in labels]))
#train model
print("Training...")
extModel.fit(X_train,y_train,
batch_size=32,
epochs=50,
validation_data=(X_val,y_val),
callbacks = [checkpoint,early])
return extModel
def add_to_train(train_dir,image,label, resnet):
#INPUTS
#Train_dir - the directory that all npy files are contained
#image - the path to the image being added
#resnet - the resnet model to be used for feature determination
#label - the name of the item
#Appends the features of the new item to the training set data for that label
from PIL import Image
from os.path import exists
from keras.preprocessing.image import img_to_array
if(isinstance(image,str)):
if(not exists(image)):
print("Error: Invalid File Path")
return ""
else:
#if its a filepath, convert to PIL image
img = Image.open(image)
shape = resnet.input_shape
imgr = img.resize(shape[1:3])
x = img_to_array(imgr).reshape((1,shape[1],shape[2],shape[3]))
#predict
features = resnet.predict(x)
import numpy as np
npyname = train_dir+'/'+label+'.npy'
if(not exists(npyname)):
np.save(npyname,features)
else:
fullset = np.load(npyname)
newset = np.append(fullset,features,axis=0)
np.save(npyname,newset)
| 33.728435
| 180
| 0.626883
| 1,365
| 10,557
| 4.738462
| 0.216117
| 0.016698
| 0.006184
| 0.009895
| 0.182746
| 0.148887
| 0.142239
| 0.120903
| 0.107607
| 0.097712
| 0
| 0.015212
| 0.283887
| 10,557
| 312
| 181
| 33.836538
| 0.840344
| 0.242683
| 0
| 0.25731
| 0
| 0
| 0.063076
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.070175
| false
| 0
| 0.157895
| 0.017544
| 0.304094
| 0.070175
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9109df56e39b2986de46c0b2bc4cedc05e614932
| 5,234
|
py
|
Python
|
exchange_sockets/bitstamp_websocket.py
|
SpiralDevelopment/crypto-hft-data
|
205f01fd555eab4f636ffbb701dfcde53d27becc
|
[
"MIT"
] | 31
|
2020-07-20T14:11:39.000Z
|
2022-03-17T03:18:33.000Z
|
exchange_sockets/bitstamp_websocket.py
|
SpiralDevelopment/crypto-hft-data
|
205f01fd555eab4f636ffbb701dfcde53d27becc
|
[
"MIT"
] | null | null | null |
exchange_sockets/bitstamp_websocket.py
|
SpiralDevelopment/crypto-hft-data
|
205f01fd555eab4f636ffbb701dfcde53d27becc
|
[
"MIT"
] | 11
|
2020-07-20T14:11:52.000Z
|
2022-03-14T04:20:19.000Z
|
from exchange_sockets.exchange_websocket import ExchangeWebSocket
from singletones.custom_logger import MyLogger
import websocket
import threading
from time import sleep
from time import time
import json
import ssl
logger = MyLogger()
class BitstampWebsocket(ExchangeWebSocket):
def __init__(self, pairs_n_streams):
super().__init__('Bitstamp', pairs_n_streams)
self.possible_streams = ['live_trades', 'diff_order_book']
self.streams = []
def init_streams(self):
for pair, streams in self.pairs_n_streams.items():
for sub_stream in streams.split(','):
if self.has_stream(sub_stream):
cur = dict()
cur['event'] = 'bts:subscribe'
cur['data'] = {'channel': "{}_{}".format(sub_stream, pair)}
self.streams.append(cur)
def start_multiple_websocket(self, init_streams=True):
super().start_multiple_websocket(init_streams=init_streams)
websocket.enableTrace(True)
self.ws = websocket.WebSocketApp("wss://ws.bitstamp.net",
on_open=self.__on_open,
on_message=self.__on_message,
on_error=self.__on_error,
on_close=self.__on_close)
self.wst = threading.Thread(target=lambda: self.ws.run_forever(sslopt={'cert_reqs': ssl.CERT_NONE}))
self.wst.daemon = True
self.wst.start()
logger.debug("Started thread")
# Wait for connect before continuing
conn_timeout = 15
while not self.ws.sock or not self.ws.sock.connected and conn_timeout:
sleep(1)
conn_timeout -= 1
if not conn_timeout:
logger.error("%s Couldn't connect to %s! Exiting.",
self.node,
self.exchange)
self.close_socket()
else:
logger.info('{} socket is started:\n{}\n{}'.format(self.exchange,
self.node,
str(self.streams)))
def save_trades(self, message):
data = message['data']
channel = message['channel']
symbol = channel.split('_')[-1]
stream = channel[:-(len(symbol) + 1)]
append_data = "{},{},{},{}\n".format(data['timestamp'],
data['price'],
data['amount'],
data['type'])
self.file_manager.save_data_to_file(self.exchange,
stream,
symbol,
append_data)
def save_level2_orderbook(self, message):
data = message['data']
channel = message['channel']
symbol = channel.split('_')[-1]
stream = channel[:-(len(symbol) + 1)]
all_data = {}
data_time = data['timestamp']
for side in ['bids', 'asks']:
for cur in data[side]:
if not all_data.get(symbol, None):
all_data[symbol] = []
price = cur[0]
size = cur[1]
all_data[symbol].append("{},{},{}\n".format(
data_time,
price,
size if side == "bids" else "-{}".format(size)))
for symbol, l2_ob_data in all_data.items():
for l2_ob in l2_ob_data:
self.file_manager.save_data_to_file(self.exchange,
stream,
symbol,
l2_ob)
def __on_message(self, ws, message):
if message is None:
return
try:
self.last_msg_time = int(time())
message = json.loads(message)
channel = message['channel']
if channel.startswith('diff_order_book'):
self.save_level2_orderbook(message)
elif channel.startswith('live_trades'):
self.save_trades(message)
except Exception as e:
logger.debug(str(e))
def __on_error(self, ws, error):
self.on_error = True
logger.error("On error\n{}\n{} {}".format(self.node,
self.exchange,
error))
def __on_close(self, ws):
logger.info("On close\n{}".format(self.exchange))
def __on_open(self, ws):
logger.info("On Open\n{}".format(self.exchange))
if self.streams:
for stream in self.streams:
logger.info('Subscribing to %s', json.dumps(stream))
self.ws.send(json.dumps(stream))
sleep(2)
else:
logger.error('%s. Stream is not initialized', self.exchange)
def close_socket(self):
self.exited = True
if self.ws:
self.ws.close()
| 35.364865
| 108
| 0.488154
| 526
| 5,234
| 4.652091
| 0.262357
| 0.026972
| 0.017981
| 0.023294
| 0.130772
| 0.11606
| 0.11606
| 0.11606
| 0.11606
| 0.11606
| 0
| 0.00547
| 0.40619
| 5,234
| 147
| 109
| 35.605442
| 0.781853
| 0.006496
| 0
| 0.163793
| 0
| 0
| 0.075606
| 0.00404
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086207
| false
| 0
| 0.068966
| 0
| 0.172414
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
910a76a4ae610e5e78371c5e387ad8044c415dcd
| 2,509
|
py
|
Python
|
src/data_loading.py
|
katerakelly/pytorch-maml
|
75907aca148ad053dfaf75fc138319f0d89534a8
|
[
"MIT"
] | 565
|
2017-08-29T02:02:30.000Z
|
2022-03-28T13:44:55.000Z
|
src/data_loading.py
|
lolinkun/pytorch-maml
|
75907aca148ad053dfaf75fc138319f0d89534a8
|
[
"MIT"
] | 20
|
2017-10-23T02:19:51.000Z
|
2021-06-02T07:17:28.000Z
|
src/data_loading.py
|
lolinkun/pytorch-maml
|
75907aca148ad053dfaf75fc138319f0d89534a8
|
[
"MIT"
] | 140
|
2017-09-09T09:18:15.000Z
|
2022-03-28T04:15:26.000Z
|
import numpy as np
import random
import torch
from torch.utils.data import DataLoader
from torch.utils.data.sampler import Sampler
import torchvision.transforms as transforms
from dataset import Omniglot, MNIST
'''
Helpers for loading class-balanced few-shot tasks
from datasets
'''
class ClassBalancedSampler(Sampler):
'''
Samples class-balanced batches from 'num_cl' pools each
of size 'num_inst'
If 'batch_cutoff' is None, indices for iterating over batches
of the entire dataset will be returned
Otherwise, indices for the number of batches up to the batch_cutoff
will be returned
(This is to allow sampling with replacement across training iterations)
'''
def __init__(self, num_cl, num_inst, batch_cutoff=None):
self.num_cl = num_cl
self.num_inst = num_inst
self.batch_cutoff = batch_cutoff
def __iter__(self):
'''return a single list of indices, assuming that items will be grouped by class '''
# First construct batches of 1 instance per class
batches = [[i+j*self.num_inst for i in torch.randperm(self.num_inst)] for j in range(self.num_cl)]
batches = [[batches[j][i] for j in range(self.num_cl)] for i in range(self.num_inst)]
# Shuffle within each batch so that classes don't always appear in same order
for sublist in batches:
random.shuffle(sublist)
if self.batch_cutoff is not None:
random.shuffle(batches)
batches = batches[:self.batch_cutoff]
batches = [item for sublist in batches for item in sublist]
return iter(batches)
def __len__(self):
return 1
def get_data_loader(task, batch_size=1, split='train'):
# NOTE: batch size here is # instances PER CLASS
if task.dataset == 'mnist':
normalize = transforms.Normalize(mean=[0.13066, 0.13066, 0.13066], std=[0.30131, 0.30131, 0.30131])
dset = MNIST(task, transform=transforms.Compose([transforms.ToTensor(), normalize]), split=split)
else:
normalize = transforms.Normalize(mean=[0.92206, 0.92206, 0.92206], std=[0.08426, 0.08426, 0.08426])
dset = Omniglot(task, transform=transforms.Compose([transforms.ToTensor(), normalize]), split=split)
sampler = ClassBalancedSampler(task.num_cl, task.num_inst, batch_cutoff = (None if split != 'train' else batch_size))
loader = DataLoader(dset, batch_size=batch_size*task.num_cl, sampler=sampler, num_workers=1, pin_memory=True)
return loader
| 39.825397
| 121
| 0.697489
| 358
| 2,509
| 4.765363
| 0.340782
| 0.023447
| 0.021102
| 0.024619
| 0.166471
| 0.101993
| 0.101993
| 0.078546
| 0.078546
| 0
| 0
| 0.038326
| 0.209645
| 2,509
| 62
| 122
| 40.467742
| 0.821987
| 0.232363
| 0
| 0
| 0
| 0
| 0.008301
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117647
| false
| 0
| 0.205882
| 0.029412
| 0.441176
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
910af5706d2a9981705d65b7f790c5595e73aa3e
| 1,823
|
py
|
Python
|
DoChaP-db/UnusedScripts/main.py
|
Tal-Shay-Group/DoChaP
|
e721c6742fdff5f771bb947d92fa6cf66831939a
|
[
"MIT"
] | 2
|
2021-05-28T04:59:17.000Z
|
2021-09-03T13:25:40.000Z
|
DoChaP-db/UnusedScripts/main.py
|
Tal-Shay-Group/DoChaP
|
e721c6742fdff5f771bb947d92fa6cf66831939a
|
[
"MIT"
] | null | null | null |
DoChaP-db/UnusedScripts/main.py
|
Tal-Shay-Group/DoChaP
|
e721c6742fdff5f771bb947d92fa6cf66831939a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import sys
import os
sys.path.append(os.getcwd())
from Director import Director
from OrthologsBuilder import *
from SpeciesDB import *
if __name__ == "__main__":
inputDict = {}
for inarg in sys.argv[1:]:
try:
splitArg = inarg.strip("-").split("=")
if splitArg[0] in ("download", "withEns"):
inputDict[splitArg[0]] = splitArg[1]
else:
raise ValueError("Wrong input arguments. only accepts arguments 'download' and 'withEns'")
except AttributeError or IndexError:
raise ValueError("Make sure that input arguments are argumentName=argumentValue")
species = ['M_musculus', 'H_sapiens', 'R_norvegicus', 'D_rerio', 'X_tropicalis']
download = inputDict['download'] == 'True'
withEns = inputDict['withEns'] == 'True'
print("Running DBbuilder with Download {} and withENS {}".format(download, withEns))
print(type(download))
print(type(withEns))
director = Director()
orthologs = OrthologsBuilder(species=species, download=download)
director.setBuilder(orthologs)
director.collectFromSource(download=download)
spl = len(species)
spnum = 1
for sp in species:
print("===========Current Species: {}===========".format(sp))
dbBuild = dbBuilder(sp, download=download, withEns=withEns)
dbBuild.create_tables_db(merged=False)
dbBuild.fill_in_db(merged=False)
print("Filling {} completed!".format(dbBuild.dbName))
if spnum == 1:
dbBuild.create_tables_db(merged=True)
dbBuild.fill_in_db(merged=True)
if spnum == spl:
dbBuild.create_index()
dbBuild.AddOrthology(orthologs.OrthoTable)
spnum += 1
print("Filling {} completed!".format(dbBuild.dbName))
| 37.979167
| 106
| 0.638508
| 196
| 1,823
| 5.826531
| 0.443878
| 0.028021
| 0.031524
| 0.036778
| 0.154116
| 0.070053
| 0
| 0
| 0
| 0
| 0
| 0.004979
| 0.228744
| 1,823
| 47
| 107
| 38.787234
| 0.807255
| 0.008777
| 0
| 0.046512
| 0
| 0
| 0.199889
| 0.014396
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.116279
| 0
| 0.116279
| 0.139535
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
910ba6c11fb3b85edca95edcb1ac441727f03f60
| 16,258
|
py
|
Python
|
TWLight/settings/base.py
|
amire80/TWLight
|
063a385ea46c61a4889ba88e3fded4183c3a6bd3
|
[
"MIT"
] | null | null | null |
TWLight/settings/base.py
|
amire80/TWLight
|
063a385ea46c61a4889ba88e3fded4183c3a6bd3
|
[
"MIT"
] | 56
|
2021-07-03T12:34:47.000Z
|
2022-03-29T12:20:08.000Z
|
TWLight/settings/base.py
|
amire80/TWLight
|
063a385ea46c61a4889ba88e3fded4183c3a6bd3
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Base settings for twlight project.
This is not intended to be used as the live settings file for a project and will
not work as one. You should instead use production.py, local.py, heroku.py, or
another file that you write. These files should live in the settings directory;
start with 'from .base import *'; and proceed to add or override settings as
appropriate to their context. In particular, you will need to set ALLOWED_HOSTS
before your app will run.
If you want to use production settings, you are now done. If not, you will also
need to set the environment variables indicated in the README.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
import os
import json
from django.contrib import messages
from django.urls import reverse_lazy
from django.utils.translation import gettext_lazy as _
# Import available locales from Faker, so we can determine what languages we fake in tests.
from faker.config import AVAILABLE_LOCALES as FAKER_AVAILABLE_LOCALES
# We're going to replace Django's default logging config.
import logging.config
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TWLIGHT_HOME = os.path.dirname(
os.path.dirname(os.path.abspath(os.path.join(os.path.abspath(__file__), os.pardir)))
)
TWLIGHT_ENV = os.environ.get("TWLIGHT_ENV")
# An atypical way of setting django languages for TranslateWiki integration:
# https://translatewiki.net/wiki/Thread:Support/_The_following_issue_is_unconfirmed,_still_to_be_investigated._Adding_TheWikipediaLibrary_Card_Platform_TranslateWiki
# Get the language codes from the locale directories, and compare them to the
# languages in Wikimedia CLDR. Use langauge autonyms from Wikimedia.
# We periodically pull:
# https://raw.githubusercontent.com/wikimedia/language-data/master/data/language-data.json
# into locale/language-data.json
def get_languages_from_locale_subdirectories(dir):
current_languages = []
language_data_json = open(os.path.join(dir, "language-data.json"))
languages = json.loads(language_data_json.read())["languages"]
for locale_dir in os.listdir(dir):
if os.path.isdir(os.path.join(dir, locale_dir)):
for lang_code, lang_data in languages.items():
autonym = lang_data[-1]
if locale_dir == lang_code:
current_languages += [(lang_code, autonym)]
return sorted(set(current_languages))
# Get the intersection of available Faker locales and the specified language set.
def get_django_faker_languages_intersection(languages):
languages_intersection = []
for locale in FAKER_AVAILABLE_LOCALES:
for i, (djlang_code, djlang_name) in enumerate(languages):
# Exclude common English locales from random test selection; English often works while others are broken.
if (
locale == djlang_code
and locale != "en"
and locale != "en_US"
and locale != "en_GB"
):
languages_intersection += [locale]
return sorted(set(languages_intersection))
# ------------------------------------------------------------------------------
# ------------------------> core django configurations <------------------------
# ------------------------------------------------------------------------------
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = [
"django.contrib.admin",
"django.contrib.admindocs",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"whitenoise.runserver_nostatic", # Not a django app; replaces staticfiles
"django.contrib.staticfiles",
"django.contrib.sites", # required by django.contrib.comments
]
THIRD_PARTY_APPS = [
"annoying",
"crispy_forms",
"reversion",
"dal",
"dal_select2",
"django_comments",
"django_cron",
"django_filters",
"modeltranslation",
# DO NOT CONFUSE THIS with requests, the Python URL library! This is
# django-request, the user analytics package.
"request",
"django_countries",
"rest_framework",
"rest_framework.authtoken",
"django_extensions",
]
TWLIGHT_APPS = [
"TWLight.i18n",
"TWLight.users",
"TWLight.resources",
"TWLight.applications",
"TWLight.emails",
"TWLight.graphs",
"TWLight.comments",
"TWLight.api",
"TWLight.ezproxy",
]
# dal (autocomplete_light) and modeltranslation must go before django.contrib.admin.
INSTALLED_APPS = THIRD_PARTY_APPS + DJANGO_APPS + TWLIGHT_APPS
# CRON CONFIGURATION
# ------------------------------------------------------------------------------
CRON_CLASSES = [
"TWLight.crons.BackupCronJob",
"TWLight.crons.SendCoordinatorRemindersCronJob",
"TWLight.crons.UserRenewalNoticeCronJob",
"TWLight.crons.ProxyWaitlistDisableCronJob",
"TWLight.crons.UserUpdateEligibilityCronJob",
"TWLight.crons.ClearSessions",
]
# REST FRAMEWORK CONFIG
# ------------------------------------------------------------------------------
REST_FRAMEWORK = {
"DEFAULT_VERSIONING_CLASS": "rest_framework.versioning.NamespaceVersioning"
}
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
# WhiteNoise should be loaded before everything but security.
"whitenoise.middleware.WhiteNoiseMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
# LocaleMiddleware must go after Session (and Cache, if used), but before
# Common.
"django.middleware.locale.LocaleMiddleware",
"django.middleware.common.CommonMiddleware",
"django.contrib.admindocs.middleware.XViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
# The default storage backend relies on sessions.
# That’s why SessionMiddleware must be enabled and appear before
# MessageMiddleware.
"django.contrib.messages.middleware.MessageMiddleware",
]
# DEBUG
# ------------------------------------------------------------------------------
# By setting this an an environment variable, it is easy to switch debug on in
# servers to do a quick test.
# DEBUG SHOULD BE FALSE ON PRODUCTION for security reasons.
DEBUG = bool(os.environ.get("DEBUG", "False").lower() == "true")
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
# WMF sysadmins strongly prefer mysql, so use that.
# If you're deploying to Heroku, heroku.py will override this.
DATABASES = {
"default": {
"ENGINE": "django.db.backends.mysql",
"NAME": os.environ.get("DJANGO_DB_NAME", None),
"USER": os.environ.get("DJANGO_DB_USER", None),
"PASSWORD": os.environ.get("DJANGO_DB_PASSWORD", None),
"HOST": os.environ.get("DJANGO_DB_HOST", None),
"PORT": "3306",
# This is critical for handling Unicode data due to stupid properties
# of MySQL; see https://stackoverflow.com/questions/2108824/mysql-incorrect-string-value-error-when-save-unicode-string-in-django .
"OPTIONS": {
"charset": "utf8mb4",
"init_command": "SET sql_mode='STRICT_ALL_TABLES'; SET storage_engine='INNODB';",
},
}
}
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get("SECRET_KEY")
# In production, this list should contain the URL of the server and nothing
# else, for security reasons. For local testing '*' is OK.
ALLOWED_HOSTS = os.environ.get("ALLOWED_HOSTS", "localhost 127.0.0.1 [::1]").split(" ")
# Let Django know about external URLs in case they differ from internal
# Needed to be added for /admin
USE_X_FORWARDED_HOST = True
REQUEST_BASE_URL = os.environ.get("REQUEST_BASE_URL", None)
ROOT_URLCONF = "TWLight.urls"
WSGI_APPLICATION = "TWLight.wsgi.application"
SITE_ID = 1
# Overwrite messages.ERROR to use danger instead, to play nice with bootstrap
MESSAGE_TAGS = {messages.ERROR: "danger"}
# INTERNATIONALIZATION CONFIGURATION
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = "en" # Sets site default language.
# https://django-modeltranslation.readthedocs.io/en/latest/installation.html#advanced-settings
MODELTRANSLATION_DEFAULT_LANGUAGE = (
LANGUAGE_CODE # sets the modeltranslation default language.
)
LOCALE_PATHS = [
# makemessages looks for locale/ in the top level, not the project level.
os.path.join(os.path.dirname(BASE_DIR), "locale")
]
# We're letting the file-based translation contributions dictate the languages
# available to the system. This keeps our column and index count for db-stored
# translations as low as possible while allowing translatewiki contributions to
# be used without reconfiguring the site.
LANGUAGES = get_languages_from_locale_subdirectories(LOCALE_PATHS[0])
FAKER_LOCALES = get_django_faker_languages_intersection(LANGUAGES)
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [os.path.join(BASE_DIR, "templates")],
"OPTIONS": {
# Reiterating the default so we can add to it later.
"context_processors": (
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.debug",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.request",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages",
),
# We cache templates by default.
"loaders": [
(
"django.template.loaders.cached.Loader",
[
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
)
],
},
}
]
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, "collectedstatic")
STATIC_URL = "/static/"
STATICFILES_DIRS = [os.path.join(BASE_DIR, "static")]
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# MEDIA FILE CONFIGURATION
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/1.8/topics/files/
MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), "media")
MEDIA_URL = "/media/"
# ------------------------------------------------------------------------------
# -----------------> third-party and TWLight configurations <-------------------
# ------------------------------------------------------------------------------
CRISPY_TEMPLATE_PACK = "bootstrap3"
# EZPROXY CONFIGURATION
# ------------------------------------------------------------------------------
TWLIGHT_EZPROXY_URL = os.environ.get("TWLIGHT_EZPROXY_URL", None)
TWLIGHT_EZPROXY_SECRET = os.environ.get("TWLIGHT_EZPROXY_SECRET", None)
# OAUTH CONFIGURATION
# ------------------------------------------------------------------------------
LOGIN_URL = reverse_lazy("oauth_login")
LOGIN_REDIRECT_URL = reverse_lazy("users:home")
AUTHENTICATION_BACKENDS = [
"TWLight.users.oauth.OAuthBackend",
"django.contrib.auth.backends.ModelBackend",
]
TWLIGHT_OAUTH_PROVIDER_URL = os.environ.get("TWLIGHT_OAUTH_PROVIDER_URL", None)
TWLIGHT_OAUTH_CONSUMER_KEY = os.environ.get("TWLIGHT_OAUTH_CONSUMER_KEY", None)
TWLIGHT_OAUTH_CONSUMER_SECRET = os.environ.get("TWLIGHT_OAUTH_CONSUMER_SECRET", None)
# API CONFIGURATION
# ------------------------------------------------------------------------------
TWLIGHT_API_PROVIDER_ENDPOINT = os.environ.get("TWLIGHT_API_PROVIDER_ENDPOINT", None)
# COMMENTS CONFIGURATION
# ------------------------------------------------------------------------------
COMMENTS_APP = "TWLight.comments"
# REVERSION CONFIGURATION
# ------------------------------------------------------------------------------
# See https://django-reversion.readthedocs.org/ .
# We are NOT using reversion middleware, because that creates revisions when
# save() is called in the context of some http requests, but not on all database
# saves. This makes it untestable. Instead we decorate the Application.save().
# DJMAIL CONFIGURATION
# ------------------------------------------------------------------------------
DJMAIL_REAL_BACKEND = os.environ.get(
"DJANGO_EMAIL_BACKEND", "django.core.mail.backends.console.EmailBackend"
)
EMAIL_BACKEND = "djmail.backends.async.EmailBackend"
EMAIL_HOST = os.environ.get("DJANGO_EMAIL_HOST", "localhost")
EMAIL_PORT = 25
EMAIL_HOST_USER = ""
EMAIL_HOST_PASSWORD = ""
EMAIL_USE_TLS = False
INSTALLED_APPS += ["djmail"]
# DJANGO_REQUEST CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE += ["request.middleware.RequestMiddleware"]
# The following are set for privacy purposes. Note that, if some amount of
# geographic tracking is desired, there is a REQUEST_ANONYMOUS_IP setting which
# scrubs the last octet of the IP address, which could be used instead of
# REQUEST_LOG_IP. There is not a way to get semi-granular user tracking (such
# as tracking only authenticated vs anonymous users).
REQUEST_LOG_IP = False
REQUEST_LOG_USER = False
# LOGGING CONFIGURATION
# ------------------------------------------------------------------------------
# We're replacing the default logging config to get better control of the
# mail_admins behavior.
LOGGING_CONFIG = None
logging.config.dictConfig(
{
"version": 1,
"disable_existing_loggers": False,
"filters": {
"require_debug_false": {"()": "django.utils.log.RequireDebugFalse"},
"require_debug_true": {"()": "django.utils.log.RequireDebugTrue"},
},
"formatters": {
"django.server": {
"()": "django.utils.log.ServerFormatter",
"format": "[%(server_time)s] %(message)s",
}
},
"handlers": {
"nodebug_console": {
"level": "WARNING",
"filters": ["require_debug_false"],
"class": "logging.StreamHandler",
},
"debug_console": {
"level": "INFO",
"filters": ["require_debug_true"],
"class": "logging.StreamHandler",
},
"django.server": {
"level": "INFO",
"class": "logging.StreamHandler",
"formatter": "django.server",
},
},
"loggers": {
"django": {
"handlers": ["nodebug_console", "debug_console"],
"level": os.environ.get("DJANGO_LOG_LEVEL", "INFO"),
},
"django.server": {
"handlers": ["django.server"],
"level": os.environ.get("DJANGO_LOG_LEVEL", "INFO"),
"propagate": False,
},
"TWLight": {
"handlers": ["nodebug_console", "debug_console"],
"level": os.environ.get("DJANGO_LOG_LEVEL", "INFO"),
},
},
}
)
| 35.969027
| 165
| 0.60807
| 1,690
| 16,258
| 5.701183
| 0.302367
| 0.018682
| 0.024909
| 0.016814
| 0.113129
| 0.078568
| 0.058433
| 0.058433
| 0.037571
| 0.014323
| 0
| 0.003831
| 0.181142
| 16,258
| 451
| 166
| 36.04878
| 0.719898
| 0.430373
| 0
| 0.070248
| 0
| 0
| 0.37761
| 0.230239
| 0
| 0
| 0
| 0
| 0
| 1
| 0.008264
| false
| 0.008264
| 0.028926
| 0
| 0.045455
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
910c31b853b8a837a994aa06e68742ed3449818b
| 19,836
|
py
|
Python
|
modelator_py/util/tla/_optable.py
|
informalsystems/modelator-py
|
d66464096c022799e680e6201590a2ead69be32d
|
[
"Apache-2.0"
] | null | null | null |
modelator_py/util/tla/_optable.py
|
informalsystems/modelator-py
|
d66464096c022799e680e6201590a2ead69be32d
|
[
"Apache-2.0"
] | 3
|
2022-03-30T16:01:49.000Z
|
2022-03-31T13:40:03.000Z
|
modelator_py/util/tla/_optable.py
|
informalsystems/modelator-py
|
d66464096c022799e680e6201590a2ead69be32d
|
[
"Apache-2.0"
] | null | null | null |
"""Table of operators."""
# Copyright 2020 by California Institute of Technology
# Copyright (c) 2008-2013 INRIA and Microsoft Corporation
# All rights reserved. Licensed under 3-clause BSD.
#
# This module is based on the file:
#
# <https://github.com/tlaplus/tlapm/blob/main/src/optable.ml>
#
import pprint
from .ast import Nodes as nodes
# open Builtin
# type fixity =
# | Nonfix
# | Prefix | Postfix
# | Infix of assoc
class Fixity:
pass
class Nonfix(Fixity):
pass
class Prefix(Fixity):
pass
class Postfix(Fixity):
pass
class Infix(Fixity):
def __init__(self, assoc):
self.assoc = assoc
# and assoc =
# | Left | Non | Right
class Assoc:
pass
class Left(Assoc):
pass
class Right(Assoc):
pass
class Non(Assoc):
pass
# and dom =
# (* primitive operators *)
# | Logic | Sets | Modal
# (* user-definable operators *)
# | User
dom = {"Logic", "Sets", "Modal", "User"}
# type prec = int * int
class Prec:
def __init__(self, a, b):
self.a = a
self.b = b
# let withdef (name, prec, fix, als, defn) = (
# name, prec, fix, als, Some defn);;
def withdef(tuple_):
name, prec, fix, als, defn = tuple_
return (name, prec, fix, als, defn)
# let tlaops = [
# Logic,
# List.map withdef [
# '=>', ( 1, 1), Infix(Non()), [], Implies ;
# '<=>', ( 2, 2), Infix(Non()), [ '\\equiv' ], Equiv ;
# '/\\', ( 3, 3), Infix(Left()), [ '\\land' ], Conj ;
# '\\/', ( 3, 3), Infix(Left()), [ '\\lor' ], Disj ;
# '~', ( 4, 4), Prefix, [ '\\neg' ; '\\lnot' ], Neg ;
# '=', ( 5, 5), Infix(Non()), [], Eq ;
# '#', ( 5, 5), Infix(Non()), [ '/=' ], Neq ;
# ] ;
# Sets,
# List.map withdef [
# 'SUBSET', ( 8, 8), Prefix, [], SUBSET ;
# 'UNION', ( 8, 8), Prefix, [], UNION ;
# 'DOMAIN', ( 9, 9), Prefix, [], DOMAIN ;
# '\\subseteq', ( 5, 5), Infix(Non()), [], Subseteq ;
# '\\in', ( 5, 5), Infix(Non()), [], Mem ;
# '\\notin', ( 5, 5), Infix(Non()), [], Notmem ;
# '\\', ( 8, 8), Infix(Non()), [], Setminus ;
# '\\cap', ( 8, 8), Infix(Left()), [ '\\intersect' ], Cap ;
# '\\cup', ( 8, 8), Infix(Left()), [ '\\union' ], Cup ;
# ] ;
# Sets,
# [ '\\X', (10,13), Prefix, [ '\\times' ], None ] ;
# Modal,
# List.map withdef [
# ''', (15,15), Postfix, [], Prime ;
# '~>', ( 2, 2), Infix(Non()), [ '\\leadsto' ], Leadsto ;
# 'ENABLED', ( 4,15), Prefix, [], ENABLED ;
# 'UNCHANGED', ( 4,15), Prefix, [], UNCHANGED ;
# '\\cdot', ( 5,14), Infix(Left()), [], Cdot ;
# '-+->', ( 2, 2), Infix(Non()), [], Actplus ;
# '[]', ( 4,15), Prefix, [], Box true ;
# '<>', ( 4,15), Prefix, [], Diamond ;
# ] ;
# User,
# List.map (fun (name, prec, fix, als) -> (name, prec, fix, als, None)) [
# '^', (14,14), Infix(Non()), [] ;
# '/', (13,13), Infix(Non()), [] ;
# '*', (13,13), Infix(Left()), [] ;
# '-.', (12,12), Prefix, [ '-' ] ;
# '-', (11,11), Infix(Left()), [] ;
# '+', (10,10), Infix(Left()), [] ;
# '^+', (15,15), Postfix, [] ;
# '^*', (15,15), Postfix, [] ;
# '^#', (15,15), Postfix, [] ;
# '<', ( 5, 5), Infix(Non()), [] ;
# '=<', ( 5, 5), Infix(Non()), [ '<=' ; '\\leq' ] ;
# '>', ( 5, 5), Infix(Non()), [] ;
# '>=', ( 5, 5), Infix(Non()), [ '\\geq' ] ;
# '...', ( 9, 9), Infix(Non()), [] ;
# '..', ( 9, 9), Infix(Non()), [] ;
# '|', (10,11), Infix(Left()), [] ;
# '||', (10,11), Infix(Left()), [] ;
# '&&', (13,13), Infix(Left()), [] ;
# '&', (13,13), Infix(Left()), [] ;
# '$$', ( 9,13), Infix(Left()), [] ;
# '$', ( 9,13), Infix(Left()), [] ;
# '??', ( 9,13), Infix(Left()), [] ;
# '%%', (10,11), Infix(Left()), [] ;
# '%', (10,11), Infix(Non()), [ '\\mod' ] ;
# '##', ( 9,13), Infix(Left()), [] ;
# '++', (10,10), Infix(Left()), [] ;
# '--', (11,11), Infix(Left()), [] ;
# '**', (13,13), Infix(Left()), [] ;
# '//', (13,13), Infix(Non()), [] ;
# '^^', (14,14), Infix(Non()), [] ;
# '@@', ( 6, 6), Infix(Left()), [] ;
# '!!', ( 9,13), Infix(Non()), [] ;
# '|-', ( 5, 5), Infix(Non()), [] ;
# '|=', ( 5, 5), Infix(Non()), [] ;
# '-|', ( 5, 5), Infix(Non()), [] ;
# '=|', ( 5, 5), Infix(Non()), [] ;
# '<:', ( 7, 7), Infix(Non()), [] ;
# ':>', ( 7, 7), Infix(Non()), [] ;
# ':=', ( 5, 5), Infix(Non()), [] ;
# '::=', ( 5, 5), Infix(Non()), [] ;
# '(+)', (10,10), Infix(Left()), [ '\\oplus' ] ;
# '(-)', (11,11), Infix(Left()), [ '\\ominus' ] ;
# '(.)', (13,13), Infix(Left()), [ '\\odot' ] ;
# '(/)', (13,13), Infix(Non()), [ '\\oslash' ] ;
# '(\\X)', (13,13), Infix(Left()), [ '\\otimes' ] ;
# '\\uplus', ( 9,13), Infix(Left()), [] ;
# '\\sqcap', ( 9,13), Infix(Left()), [] ;
# '\\sqcup', ( 9,13), Infix(Left()), [] ;
# '\\div', (13,13), Infix(Non()), [] ;
# '\\wr', ( 9,14), Infix(Non()), [] ;
# '\\star', (13,13), Infix(Left()), [] ;
# '\\o', (13,13), Infix(Left()), [ '\\circ' ] ;
# '\\bigcirc', (13,13), Infix(Left()), [] ;
# '\\bullet', (13,13), Infix(Left()), [] ;
# '\\prec', ( 5, 5), Infix(Non()), [] ;
# '\\succ', ( 5, 5), Infix(Non()), [] ;
# '\\preceq', ( 5, 5), Infix(Non()), [] ;
# '\\succeq', ( 5, 5), Infix(Non()), [] ;
# '\\sim', ( 5, 5), Infix(Non()), [] ;
# '\\simeq', ( 5, 5), Infix(Non()), [] ;
# '\\ll', ( 5, 5), Infix(Non()), [] ;
# '\\gg', ( 5, 5), Infix(Non()), [] ;
# '\\asymp', ( 5, 5), Infix(Non()), [] ;
# '\\subset', ( 5, 5), Infix(Non()), [] ;
# '\\supset', ( 5, 5), Infix(Non()), [] ;
# '\\supseteq', ( 5, 5), Infix(Non()), [] ;
# '\\approx', ( 5, 5), Infix(Non()), [] ;
# '\\cong', ( 5, 5), Infix(Non()), [] ;
# '\\sqsubset', ( 5, 5), Infix(Non()), [] ;
# '\\sqsubseteq', ( 5, 5), Infix(Non()), [] ;
# '\\sqsupset', ( 5, 5), Infix(Non()), [] ;
# '\\sqsupseteq', ( 5, 5), Infix(Non()), [] ;
# '\\doteq', ( 5, 5), Infix(Non()), [] ;
# '\\propto', ( 5, 5), Infix(Non()), [] ;
# ] ;
# ]
def _generate_tlaops():
tlaops = [
(
"Logic",
[
("=>", (1, 1), Infix(Non()), list(), nodes.Implies()),
("<=>", (2, 2), Infix(Non()), ["\\equiv"], nodes.Equiv()),
("/\\", (3, 3), Infix(Left()), ["\\land"], nodes.Conj()),
("\\/", (3, 3), Infix(Left()), ["\\lor"], nodes.Disj()),
("~", (4, 4), Prefix(), ["\\neg", "\\lnot"], nodes.Neg()),
("=", (5, 5), Infix(Non()), list(), nodes.Eq()),
("#", (5, 5), Infix(Non()), ["/="], nodes.Neq()),
],
),
(
"Sets",
[
("SUBSET", (8, 8), Prefix(), list(), nodes.SUBSET()),
("UNION", (8, 8), Prefix(), list(), nodes.UNION()),
("DOMAIN", (9, 9), Prefix(), list(), nodes.DOMAIN()),
("\\subseteq", (5, 5), Infix(Non()), list(), nodes.Subseteq()),
("\\in", (5, 5), Infix(Non()), list(), nodes.Mem()),
("\\notin", (5, 5), Infix(Non()), [], nodes.Notmem()),
("\\", (8, 8), Infix(Non()), ["\\setminus"], nodes.Setminus()),
("\\cap", (8, 8), Infix(Left()), ["\\intersect"], nodes.Cap()),
("\\cup", (8, 8), Infix(Left()), ["\\union"], nodes.Cup()),
("\\X", (10, 13), Infix(Left()), ["\\times"], None),
],
),
(
"Modal",
[
("'", (15, 15), Postfix(), list(), nodes.Prime()),
("~>", (2, 2), Infix(Non()), ["\\leadsto"], nodes.LeadsTo()),
("ENABLED", (4, 15), Prefix(), list(), nodes.ENABLED()),
("UNCHANGED", (4, 15), Prefix(), list(), nodes.UNCHANGED()),
("\\cdot", (5, 14), Infix(Left()), list(), nodes.Cdot()),
("-+->", (2, 2), Infix(Non()), list(), nodes.WhilePlus()),
("[]", (4, 15), Prefix(), list(), nodes.Box(True)),
("<>", (4, 15), Prefix(), list(), nodes.Diamond()),
],
),
(
"User",
[
(name, prec, fix, als, None)
for name, prec, fix, als in [
("^", (14, 14), Infix(Non()), list()),
("/", (13, 13), Infix(Non()), list()),
("*", (13, 13), Infix(Left()), list()),
("-.", (12, 12), Prefix(), ["-"]),
("-", (11, 11), Infix(Left()), list()),
("+", (10, 10), Infix(Left()), list()),
("^+", (15, 15), Postfix(), list()),
("^*", (15, 15), Postfix(), list()),
("^#", (15, 15), Postfix(), list()),
("<", (5, 5), Infix(Non()), list()),
("=<", (5, 5), Infix(Non()), ["<=", "\\leq"]),
(">", (5, 5), Infix(Non()), list()),
(">=", (5, 5), Infix(Non()), ["\\geq"]),
("...", (9, 9), Infix(Non()), list()),
("..", (9, 9), Infix(Non()), list()),
("|", (10, 11), Infix(Left()), list()),
("||", (10, 11), Infix(Left()), list()),
("&&", (13, 13), Infix(Left()), list()),
("&", (13, 13), Infix(Left()), list()),
("$$", (9, 13), Infix(Left()), list()),
("$", (9, 13), Infix(Left()), list()),
("??", (9, 13), Infix(Left()), list()),
("%%", (10, 11), Infix(Left()), list()),
("%", (10, 11), Infix(Non()), ["\\mod"]),
("##", (9, 13), Infix(Left()), list()),
("++", (10, 10), Infix(Left()), list()),
("--", (11, 11), Infix(Left()), list()),
("**", (13, 13), Infix(Left()), list()),
("//", (13, 13), Infix(Non()), list()),
("^^", (14, 14), Infix(Non()), list()),
("@@", (6, 6), Infix(Left()), list()),
("!!", (9, 13), Infix(Non()), list()),
("|-", (5, 5), Infix(Non()), list()),
("|=", (5, 5), Infix(Non()), list()),
("-|", (5, 5), Infix(Non()), list()),
("=|", (5, 5), Infix(Non()), list()),
("<:", (7, 7), Infix(Non()), list()),
(":>", (7, 7), Infix(Non()), list()),
(":=", (5, 5), Infix(Non()), list()),
("::=", (5, 5), Infix(Non()), list()),
("(+)", (10, 10), Infix(Left()), ["\\oplus"]),
("(-)", (11, 11), Infix(Left()), ["\\ominus"]),
("(.)", (13, 13), Infix(Left()), ["\\odot"]),
("(/)", (13, 13), Infix(Non()), ["\\oslash"]),
("(\\X)", (13, 13), Infix(Left()), ["\\otimes"]),
("\\uplus", (9, 13), Infix(Left()), list()),
("\\sqcap", (9, 13), Infix(Left()), list()),
("\\sqcup", (9, 13), Infix(Left()), list()),
("\\div", (13, 13), Infix(Non()), list()),
("\\wr", (9, 14), Infix(Non()), list()),
("\\star", (13, 13), Infix(Left()), list()),
("\\o", (13, 13), Infix(Left()), ["\\circ"]),
("\\bigcirc", (13, 13), Infix(Left()), list()),
("\\bullet", (13, 13), Infix(Left()), list()),
("\\prec", (5, 5), Infix(Non()), list()),
("\\succ", (5, 5), Infix(Non()), list()),
("\\preceq", (5, 5), Infix(Non()), list()),
("\\succeq", (5, 5), Infix(Non()), list()),
("\\sim", (5, 5), Infix(Non()), list()),
("\\simeq", (5, 5), Infix(Non()), list()),
("\\ll", (5, 5), Infix(Non()), list()),
("\\gg", (5, 5), Infix(Non()), list()),
("\\asymp", (5, 5), Infix(Non()), list()),
("\\subset", (5, 5), Infix(Non()), list()),
("\\supset", (5, 5), Infix(Non()), list()),
("\\supseteq", (5, 5), Infix(Non()), list()),
("\\approx", (5, 5), Infix(Non()), list()),
("\\cong", (5, 5), Infix(Non()), list()),
("\\sqsubset", (5, 5), Infix(Non()), list()),
("\\sqsubseteq", (5, 5), Infix(Non()), list()),
("\\sqsupset", (5, 5), Infix(Non()), list()),
("\\sqsupseteq", (5, 5), Infix(Non()), list()),
("\\doteq", (5, 5), Infix(Non()), list()),
("\\propto", (5, 5), Infix(Non()), list()),
]
],
),
]
return tlaops
# type tlaop = {
# name : string ;
# prec : prec ;
# fix : fixity ;
# dom : dom ;
# defn : Builtin.builtin option ;
# }
class TLAOP:
def __init__(self, name, prec, fixity, dom, defn):
self.name = name # str
self.prec = prec # Prec
self.fix = fixity # Fixity
self.dom = dom
self.defn = defn
def __repr__(self):
return (
f"TLAOP({self.name}, {self.prec}, " f"{self.fix}, {self.dom}, {self.defn})"
)
# let optable =
# let module H = Hashtbl in
# let tab = H.create 109 in
# List.iter begin
# fun (dom, ops) ->
# List.iter begin
# fun (name, prec, fix, als, defn) ->
# let op = { name = name ;
# prec = prec ;
# fix = fix ; dom = dom ;
# defn = defn }
# in
# H.add tab name op ;
# List.iter (fun s -> H.add tab s op) als
# end ops
# end tlaops ;
# tab
def _generate_optable():
tlaops = _generate_tlaops()
optable = dict()
for dom, ops in tlaops:
for name, prec, fixity, alternatives, defn in ops:
op = TLAOP(name, prec, fixity, dom, defn)
optable.setdefault(name, list())
optable[name].append(op)
for s in alternatives:
optable.setdefault(s, list())
optable[s].append(op)
return optable
optable = _generate_optable()
# pprint.pprint(optable)
# let nonfix name defn =
# { name = name ; prec = (-1, -1) ;
# fix = Nonfix ; dom = User ; defn = defn }
#
# let lookup name =
# if Hashtbl.mem optable name then
# Hashtbl.find optable name
# else
# nonfix name None
#
# (** Mapping from builtins to standard tlaops *)
# let standard_form b =
# match b with
# | TRUE -> nonfix 'TRUE' (Some TRUE)
# | FALSE -> nonfix 'FALSE' (Some FALSE)
# | Implies -> lookup '=>'
# | Equiv -> lookup '<=>'
# | Conj -> lookup '/\\'
# | Disj -> lookup '\\/'
# | Neg -> lookup '~'
# | Eq -> lookup '='
# | Neq -> lookup '#'
# | Divides ->
# {
# name = '?|';
# prec = (10, 11);
# fix = Infix(Non());
# dom = Logic;
# defn = Some Divides;
# }
#
# | STRING -> nonfix 'STRING' (Some STRING)
# | BOOLEAN -> nonfix 'BOOLEAN' (Some BOOLEAN)
# | SUBSET -> lookup 'SUBSET'
# | UNION -> lookup 'UNION'
# | DOMAIN -> lookup 'DOMAIN'
# | Subseteq -> lookup '\\subseteq'
# | Mem -> lookup '\\in'
# | Notmem -> lookup '\\notin'
# | Setminus -> lookup '\\'
# | Cap -> lookup '\\cap'
# | Cup -> lookup '\\cup'
#
# | Prime -> lookup '''
# | StrongPrime -> lookup '''
# | Leadsto -> lookup '~>'
# | ENABLED -> lookup 'ENABLED'
# | UNCHANGED -> lookup 'UNCHANGED'
# | Cdot -> lookup '\\cdot'
# | Actplus -> lookup '-+->'
# | Box _ -> lookup '[]'
# | Diamond -> lookup '<>'
#
# | Plus -> { (lookup '+') with defn = Some Plus }
# | Minus -> { (lookup '-') with defn = Some Minus }
# | Uminus -> { (lookup '-.') with defn = Some Uminus ; name = '-' }
# | Times -> { (lookup '*') with defn = Some Times }
# | Ratio -> { (lookup '/') with defn = Some Ratio }
# | Quotient -> { (lookup '\\div') with defn = Some Quotient }
# | Remainder -> { (lookup '%') with defn = Some Remainder }
# | Exp -> { (lookup '^') with defn = Some Exp }
# | Lteq -> { (lookup '=<') with defn = Some Lteq }
# | Lt -> { (lookup '<') with defn = Some Lt }
# | Gteq -> { (lookup '>=') with defn = Some Gteq }
# | Gt -> { (lookup '>') with defn = Some Gt }
# | Range -> { (lookup '..') with defn = Some Range }
# | Nat -> nonfix 'Nat' (Some Nat)
# | Int -> nonfix 'Int' (Some Int)
# | Real -> nonfix 'Real' (Some Real)
# | Infinity -> nonfix 'Infinity' (Some Infinity)
#
# | Seq -> nonfix 'Seq' (Some Seq)
# | Len -> nonfix 'Len' (Some Len)
# | BSeq -> nonfix 'BSeq' (Some BSeq)
# | Append -> nonfix 'Append' (Some Append)
# | Cat -> { (lookup '\\o') with defn = Some Cat }
# | Head -> nonfix 'Head' (Some Head)
# | Tail -> nonfix 'Tail' (Some Tail)
# | SubSeq -> nonfix 'SubSeq' (Some SubSeq)
# | SelectSeq -> nonfix 'SelectSeq' (Some SelectSeq)
#
# | OneArg -> { (lookup ':>') with defn = Some OneArg }
# | Extend -> { (lookup '@@') with defn = Some Extend }
# | Print -> nonfix 'Print' (Some Print)
# | PrintT -> nonfix 'PrintT' (Some PrintT)
# | Assert -> nonfix 'Assert' (Some Assert)
# | JavaTime -> nonfix 'JavaTime' (Some JavaTime)
# | TLCGet -> nonfix 'TLCGet' (Some TLCGet)
# | TLCSet -> nonfix 'TLCSet' (Some TLCSet)
# | Permutations -> nonfix 'Permutations' (Some Permutations)
# | SortSeq -> nonfix 'SortSeq' (Some SortSeq)
# | RandomElement -> nonfix 'RandomElement' (Some RandomElement)
# | Any -> nonfix 'Any' (Some Any)
# | ToString -> nonfix 'ToString' (Some ToString)
#
# | Unprimable -> nonfix 'Unprimable' None
# | Irregular -> nonfix 'Irregular' None
# ;;
| 41.497908
| 87
| 0.354658
| 1,793
| 19,836
| 3.907975
| 0.139989
| 0.122164
| 0.06993
| 0.0999
| 0.477951
| 0.26445
| 0.173969
| 0.149565
| 0.139147
| 0.107036
| 0
| 0.047663
| 0.392872
| 19,836
| 477
| 88
| 41.584906
| 0.534169
| 0.557219
| 0
| 0.089385
| 0
| 0
| 0.074971
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039106
| false
| 0.044693
| 0.011173
| 0.005587
| 0.134078
| 0.005587
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
910cebe2f9c8f06e688c3bb7c05c5907ea9954d5
| 40,599
|
py
|
Python
|
DIE/UI/FunctionViewEx.py
|
a1ext/DIE
|
1a3a19f016f44cf611847ce4f0d126b136040cb6
|
[
"MIT"
] | 5
|
2017-05-17T21:53:46.000Z
|
2019-07-12T20:05:20.000Z
|
DIE/UI/FunctionViewEx.py
|
a1ext/DIE
|
1a3a19f016f44cf611847ce4f0d126b136040cb6
|
[
"MIT"
] | null | null | null |
DIE/UI/FunctionViewEx.py
|
a1ext/DIE
|
1a3a19f016f44cf611847ce4f0d126b136040cb6
|
[
"MIT"
] | 1
|
2020-03-15T21:25:14.000Z
|
2020-03-15T21:25:14.000Z
|
import networkx as nx
from awesome.context import ignored
import sark
import idaapi
import idautils
import idc
from idaapi import PluginForm
from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5
if use_qt5:
_QSortFilterProxyModel = QtCore.QSortFilterProxyModel
_MatchRecursive = QtCore.Qt.MatchRecursive
_MatchExactly = QtCore.Qt.MatchExactly
_PositionAtTop = QtWidgets.QAbstractItemView.PositionAtTop
else:
_QSortFilterProxyModel = QtGui.QSortFilterProxyModel
_MatchRecursive = QtCore.Qt.MatchFlag.MatchRecursive
_MatchExactly = QtCore.Qt.MatchFlag.MatchExactly
_PositionAtTop = QtWidgets.QAbstractItemView.ScrollHint.PositionAtTop
import DIE.UI.Die_Icons
import DIE.UI.ValueViewEx
import DIE.UI.ParserView
import DIE.UI.BPView
import DIE.Lib.IDAConnector
import DIE.Lib.DIEDb
import DIE.Lib.BpHandler
import sark.ui
class FunctionView(PluginForm):
"""
DIE Function View
"""
def __init__(self):
super(FunctionView, self).__init__()
self.value_view = None
self.bp_handler = None
self.die_icons = None
self.die_db = None
self.highligthed_items = []
def Show(self):
# Reset highlighted items
self.highligthed_items = []
return PluginForm.Show(self,
"Function View",
options=PluginForm.FORM_PERSIST)
def OnCreate(self, form):
"""
Called when the plugin form is created
"""
self.value_view = DIE.UI.ValueViewEx.get_view()
self.bp_handler = DIE.Lib.BpHandler.get_bp_handler()
self.die_icons = DIE.UI.Die_Icons.get_die_icons()
self.die_db = DIE.Lib.DIEDb.get_db()
# Get parent widget
self.parent = form_to_widget(form)
self.functionModel = QtGui.QStandardItemModel()
self.functionTreeView = QtWidgets.QTreeView()
self.functionTreeView.setExpandsOnDoubleClick(False)
#self.functionTreeView.setSortingEnabled(True)
delegate = TreeViewDelegate(self.functionTreeView)
self.functionTreeView.setItemDelegate(delegate)
self.functionTreeView.doubleClicked.connect(self.itemDoubleClickSlot)
self._model_builder(self.functionModel)
self.functionTreeView.setModel(self.functionModel)
self.functionTreeView.setColumnWidth(0, 200)
self.functionTreeView.setColumnWidth(1, 20)
self.functionTreeView.setColumnWidth(2, 20)
self.functionTreeView.setColumnWidth(3, 20)
self.functionTreeView.setColumnWidth(4, 250)
self.functionTreeView.setColumnWidth(5, 100)
self.functionTreeView.setColumnWidth(6, 20)
self.functionTreeView.setColumnWidth(7, 450)
self.functionTreeView.setColumnWidth(8, 20)
self.functionTreeView.setColumnWidth(9, 450)
# Context menus
self.functionTreeView.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.functionTreeView.customContextMenuRequested.connect(self.onCustomContextMenu)
# Actions
self.context_menu_param = None # Parameter to be passed to context menu slots
action_exclude_func = QtWidgets.QAction("Exclude Function", self.functionTreeView, triggered=lambda: self.on_exclude_func(self.context_menu_param))
action_exclude_func_adrs = QtWidgets.QAction("Exclude All Function Calls", self.functionTreeView, triggered=lambda: self.on_exclude_func_adrs(self.context_menu_param))
action_exclude_ea = QtWidgets.QAction("Exclude Address", self.functionTreeView, triggered=lambda: self.on_exclude_ea(self.context_menu_param))
action_exclude_library = QtWidgets.QAction("Exclude Library", self.functionTreeView, triggered=lambda: self.on_exclude_library(self.context_menu_param))
action_value_detail = QtWidgets.QAction("Inspect Value Details", self.functionTreeView, triggered=lambda: self.on_value_detail(self.context_menu_param))
action_show_callgraph = QtWidgets.QAction("Show Call-Graph", self.functionTreeView, triggered=lambda: self.on_show_callgraph(self.context_menu_param))
# Function ContextMenu
self.function_context_menu = QtWidgets.QMenu(self.functionTreeView)
self.function_context_menu.addAction(action_exclude_func)
self.function_context_menu.addAction(action_exclude_library)
self.function_context_menu.addAction(action_exclude_func_adrs)
# Function ea ContextMenu
self.ea_context_menu = QtWidgets.QMenu(self.functionTreeView)
self.ea_context_menu.addAction(action_exclude_ea)
self.ea_context_menu.addAction(action_show_callgraph)
# Argument value ContextMenu
self.value_context_menu = QtWidgets.QMenu(self.functionTreeView)
self.value_context_menu.addAction(action_value_detail)
# Therad ComboBox
threads = []
if self.die_db is not None:
threads = self.die_db.get_thread_list()
thread_id_list = []
thread_id_list.append("All Threads")
for thread in threads:
thread_id_list.append(str(thread.thread_num))
self.thread_id_combo = QtWidgets.QComboBox()
self.thread_id_combo.addItems(thread_id_list)
self.thread_id_combo.activated[str].connect(self.on_thread_combobox_change)
self.thread_id_label = QtWidgets.QLabel("Thread: ")
# Toolbar
self.function_toolbar = QtWidgets.QToolBar()
self.function_toolbar.addWidget(self.thread_id_label)
self.function_toolbar.addWidget(self.thread_id_combo)
# Grid
layout = QtWidgets.QGridLayout()
layout.addWidget(self.function_toolbar)
layout.addWidget(self.functionTreeView)
self.parent.setLayout(layout)
def OnClose(self, form):
idaapi.msg("Closed\n")
def isVisible(self):
"""
Is functionview visible
@return: True if visible, otherwise False
"""
try:
return self.functionTreeView.isVisible()
except:
return False
def _model_builder(self, model):
"""
Build the function model.
@param model: QStandardItemModel object
"""
model.clear() # Clear the model
root_node = model.invisibleRootItem()
self._make_model_headers(model)
if self.die_db is None:
return
# Add db functions to the model
for function in self.die_db.get_functions():
item_list_func = self._make_function_item(function)
if function.is_lib_func: # Color library function
for tmp_item in item_list_func:
tmp_item.setBackground(QtGui.QColor(184, 223, 220))
item_function = item_list_func[0]
root_node.appendRow(item_list_func)
# Add function contexts ea\occurrences for the current function
func_context_dict = self.die_db.get_function_context_dict(function)
for function_context_ea in func_context_dict:
function_context_list = func_context_dict[function_context_ea]
if not len(function_context_list) > 0:
continue
item_func_context_list = self._make_function_ea_item(function_context_list[0])
item_func_context_ea = item_func_context_list[0]
item_function.appendRow(item_func_context_list)
occurrence_num = 0
for function_context in function_context_list:
item_func_context_list = self._make_func_occur_item(function_context, occurrence_num)
item_func_context = item_func_context_list[0]
item_func_context_ea.appendRow(item_func_context_list)
self._insert_thread_data(item_function, function_context.thread_id)
self._insert_thread_data(item_func_context_ea, function_context.thread_id)
# Add function arguments to each context
current_call_values = self.die_db.get_call_values(function_context)
current_ret_values = self.die_db.get_return_values(function_context)
curret_ret_arg_value = self.die_db.get_return_arg_value(function_context)
for arg_index in xrange(0, function.arg_num):
try:
current_arg = self.die_db.get_function_arg(function, arg_index)
self._add_model_arg_value(item_func_context,
current_call_values[arg_index],
current_ret_values[arg_index],
current_arg.name,
current_arg.type)
except IndexError:
break
ret_arg = self.die_db.get_function_arg(function, -1)
if ret_arg is None:
ret_arg_type = "VOID"
else:
ret_arg_type = ret_arg.type
# Add return argument
self._add_model_arg_value(item_func_context,
None,
curret_ret_arg_value,
"ret_arg",
ret_arg_type)
# Increment occurrence counter
occurrence_num += 1
# Add non-executed function to the model
# for func_ea in idautils.Functions():
# func_name = DIE.Lib.IDAConnector.get_function_name(func_ea)
#
# if self.die_db.get_function_by_name(func_name) is None:
# item_list_func = self._make_nonexec_function_time(func_name)
#
# if function.is_lib_func: # Color library function
# for tmp_item in item_list_func:
# tmp_item.setBackground(QtGui.QColor(255, 0, 0, 127))
#
# root_node.appendRow(item_list_func)
def _make_model_headers(self, model):
"""
Set the model horizontal header data
@param model: the QStandardItemModel which headers should be set
"""
### Function Header
item_header = QtGui.QStandardItem("Function")
item_header.setToolTip("Function Name")
model.setHorizontalHeaderItem(0, item_header)
### Call number header
item_header = QtGui.QStandardItem("#")
item_header.setToolTip("Number of calls preformed to this function")
model.setHorizontalHeaderItem(1, item_header)
### Indirect Header
item_header = QtGui.QStandardItem("I")
item_header.setToolTip("Indirect Call")
model.setHorizontalHeaderItem(2, item_header)
### Indirect Header
item_header = QtGui.QStandardItem("N")
item_header.setToolTip("New Function")
model.setHorizontalHeaderItem(3, item_header)
### Indirect Header
item_header = QtGui.QStandardItem("Type")
item_header.setToolTip("Argument Type")
model.setHorizontalHeaderItem(4, item_header)
### New Function Header
item_header = QtGui.QStandardItem("Name")
item_header.setToolTip("Argument Name")
model.setHorizontalHeaderItem(5, item_header)
### Call Value Icon Header
item_header = QtGui.QStandardItem("")
model.setHorizontalHeaderItem(6, item_header)
### Call Value Header
item_header = QtGui.QStandardItem("Call Value")
item_header.setToolTip("Argument`s value on function call")
model.setHorizontalHeaderItem(7, item_header)
### Return Value Icon Header
item_header = QtGui.QStandardItem("")
model.setHorizontalHeaderItem(8, item_header)
### Return Value Header
item_header = QtGui.QStandardItem("Return Value")
item_header.setToolTip("Argument`s value on function return")
model.setHorizontalHeaderItem(9, item_header)
def _make_thread_id_data(self, thread_id):
"""
Delimit thread_id data in order to support filtering\sorting on multi-thread data items
@param thread_id: thread id to normalize
@return: a normalized string of the thread_id to be used sa data for ThreadId_Role
"""
return "t%st" % str(thread_id)
def _insert_thread_data(self, item, thread_id):
"""
Insert thread_id data into a model item.
The value found in thread_id argument will be delimited by the _make_thread_id_data function
(e.g: thread_id 123 will become 't123t')
the delimited value will then be appended to a string of concatenated (unique) child-item thread-ids
(for example a item data value can be "a123aa5672aa11112a") for threads 123, 5672 and 111112
@param item: the model item to add the data to
@param thread_id: thread_id number
@return: True if thread data was successfully added to item, otherwise False
"""
try:
current_thread_id = self._make_thread_id_data(thread_id)
thread_data = item.data(role=DIE.UI.ThreadId_Role)
if thread_data is None:
item.setData(current_thread_id, role=DIE.UI.ThreadId_Role)
elif not current_thread_id in thread_data:
item.setData(thread_data + current_thread_id, role=DIE.UI.ThreadId_Role)
return True
except Exception as ex:
idaapi.msg("Error while inserting thread data: %s\n" %ex)
return False
def _make_function_item(self, function):
"""
Build a tree item for a function name (level-0)
@param function: dbFunction object
@return: QStandradItemModel item for the function
"""
function_txt = "%s" % function.function_name
item_function = QtGui.QStandardItem(self.die_icons.icon_function, function_txt)
item_function.setData(function, role=DIE.UI.Function_Role)
function_count = self.die_db.count_function_occurs(function)
item_function_count = QtGui.QStandardItem(str(function_count))
item_function_count.setEditable(False)
item_function.setEditable(False)
item_list = [item_function,
item_function_count,
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem()]
return item_list
def _make_nonexec_function_time(self, function_name):
"""
Build a tree item for a function name (for a non-executed function)
@type: String
@param function_name: Function name
@return:
"""
item_function = QtGui.QStandardItem(self.die_icons.icon_function, function_name)
item_function_count = QtGui.QStandardItem("0")
item_function_count.setEditable(False)
item_function.setEditable(False)
item_list = [item_function, item_function_count]
return item_list
def _make_function_ea_item(self, function_context):
"""
Build a tree item for a function_ea node (level-1)
@param function_context: a dbFunction_Context object
@return: QStandradItemModel item for the function context
"""
calling_function_start = None
with ignored(sark.exceptions.SarkNoFunction):
calling_function_start = sark.Function(function_context.calling_ea).startEA
if calling_function_start is not None:
call_offset = function_context.calling_ea - calling_function_start
func_ea_txt = "%s+%s" % (function_context.calling_func_name, hex(call_offset))
else:
func_ea_txt = "[%s]:%s" % (function_context.calling_func_name, hex(function_context.calling_ea))
item_func_context_ea = QtGui.QStandardItem(func_ea_txt)
item_func_context_ea.setEditable(False)
item_func_context_ea.setData(hex(function_context.calling_ea), role=QtCore.Qt.ToolTipRole)
item_func_context_ea.setData(function_context, role=DIE.UI.FunctionContext_Role)
item_func_context_ea.setData(id(function_context), role=DIE.UI.ContextId_Role) # Used for module look-ups
item_func_is_indirect = QtGui.QStandardItem()
item_func_is_indirect.setEditable(False)
if function_context.is_indirect:
item_func_is_indirect.setIcon(self.die_icons.icon_v)
item_func_is_new = QtGui.QStandardItem()
item_func_is_new.setEditable(False)
if function_context.is_new_func:
item_func_is_new.setIcon(self.die_icons.icon_v)
item_list = [item_func_context_ea,
QtGui.QStandardItem(),
item_func_is_indirect,
item_func_is_new,
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem()]
return item_list
def _make_func_occur_item(self, function_context, occur_num):
"""
Build a tree item for function occurrence (level-2)
@param function_context: a dbFunction_Context object
@param occur_num: occurrence number
@return: QStandradItemModel item for the function occurrence
"""
func_occur_txt = "Occur %s" % str(occur_num)
item_func_context = QtGui.QStandardItem(func_occur_txt)
item_func_context.setColumnCount(5)
item_func_context.setEditable(False)
item_func_context.setData(function_context, role=DIE.UI.FunctionContext_Role)
item_func_context.setData(id(function_context), role=DIE.UI.ContextId_Role) # Used for module look-ups
item_func_context.setData(self._make_thread_id_data(function_context.thread_id), role=DIE.UI.ThreadId_Role)
item_list = [item_func_context,
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem()]
return item_list
def _add_model_arg_value(self, parent, call_value, ret_value, arg_name, arg_type, nest_depth=0):
"""
Add a debug value
@param parent:
@param call_value:
@param ret_value:
@param arg_name:
@param arg_type:
@return:
"""
arg_count = parent.rowCount()
this_row_item = QtGui.QStandardItem("")
this_row_item.setData(parent.data(role=DIE.UI.ThreadId_Role), role=DIE.UI.ThreadId_Role) # Inherit thread data from parent
# Set indentation for argument types (for nested values)
arg_ident = " " * nest_depth
arg_ident_type = arg_ident + arg_type
item_parsed_val_flag_call = QtGui.QStandardItem()
item_parsed_val_call = QtGui.QStandardItem()
item_parsed_val_flag_ret = QtGui.QStandardItem()
item_parsed_val_ret = QtGui.QStandardItem()
# Get Call Value
if call_value is not None:
parsed_vals = self.die_db.get_parsed_values(call_value)
this_row_item.setData(parsed_vals, role=DIE.UI.CallValue_Role)
if parsed_vals is not None and len(parsed_vals) > 0:
is_guessed, best_val = self.die_db.get_best_parsed_val(parsed_vals)
item_parsed_val_call = QtGui.QStandardItem(best_val.data)
if is_guessed:
item_parsed_val_flag_call.setIcon(self.die_icons.icon_question)
if len(parsed_vals) > 1: # If more the 1 item, show a combo-box
item_parsed_val_call.setData(parsed_vals, role=DIE.UI.ParsedValuesRole)
item_parsed_val_flag_call.setIcon(self.die_icons.icon_more)
else:
item_parsed_val_call.setData(parsed_vals[0], role=DIE.UI.ParsedValueRole)
else:
parsed_val_data = "NULL"
if call_value.derref_depth == 0:
parsed_val_data = "!MAX_DEREF!"
if call_value.raw_value is not None:
parsed_val_data = hex(call_value.raw_value)
if len(call_value.nested_values) > 0 or call_value.reference_flink is not None:
parsed_val_data = ""
item_parsed_val_call = QtGui.QStandardItem(parsed_val_data)
# Get return value
if ret_value is not None:
parsed_vals = self.die_db.get_parsed_values(ret_value)
this_row_item.setData(parsed_vals, role=DIE.UI.RetValue_Role)
# If len(parsed_vals)>1 create a combobox delegate.
if parsed_vals:
is_guessed, best_val = self.die_db.get_best_parsed_val(parsed_vals)
item_parsed_val_ret = QtGui.QStandardItem(best_val.data)
if is_guessed:
item_parsed_val_flag_ret.setIcon(self.die_icons.icon_question)
if len(parsed_vals) > 1: # If more the 1 item, show a combo-box
item_parsed_val_ret.setData(parsed_vals, role=DIE.UI.ParsedValuesRole)
item_parsed_val_flag_ret.setIcon(self.die_icons.icon_more)
else:
item_parsed_val_ret.setData(parsed_vals[0], role=DIE.UI.ParsedValueRole)
else:
parsed_val_data = "NULL"
if ret_value.derref_depth == 0:
parsed_val_data = "!MAX_DEREF!"
if ret_value.raw_value is not None:
parsed_val_data = hex(ret_value.raw_value)
if ret_value.nested_values or ret_value.reference_flink is not None:
parsed_val_data = ""
item_parsed_val_ret = QtGui.QStandardItem(parsed_val_data)
parent.setChild(arg_count, 0, this_row_item)
parent.setChild(arg_count, 1, QtGui.QStandardItem())
parent.setChild(arg_count, 2, QtGui.QStandardItem())
parent.setChild(arg_count, 3, QtGui.QStandardItem())
parent.setChild(arg_count, 4, QtGui.QStandardItem(arg_ident_type))
parent.setChild(arg_count, 5, QtGui.QStandardItem(arg_name))
parent.setChild(arg_count, 6, item_parsed_val_flag_call)
parent.setChild(arg_count, 7, item_parsed_val_call)
parent.setChild(arg_count, 8, item_parsed_val_flag_ret)
parent.setChild(arg_count, 9, item_parsed_val_ret)
# If current object contains reference values, add them to the module
self._add_model_arg_ref(this_row_item, call_value, ret_value, nest_depth)
# If current object is a container object, Add its members to the module
self._add_model_container_members(this_row_item, call_value, ret_value, nest_depth)
def _add_model_arg_ref(self, parent, call_value, ret_value, nest_depth=0):
"""
Add a reference value to module
@param parent:
@param call_value:
@param ret_value:
@param nest_depth:
@return:
"""
# If call debug value is a reference
if call_value is not None:
if call_value.reference_flink is not None and not call_value.is_definitely_parsed:
ref_val_call = self.die_db.get_dbg_value(call_value.reference_flink)
ref_val_ret = None
# Try to get the same reference from the return debug value.
if ret_value is not None and ret_value.type == call_value.type:
if ret_value.reference_flink is not None and not ret_value.is_definitely_parsed:
ref_val_ret = self.die_db.get_dbg_value(ret_value.reference_flink)
self._add_model_arg_value(parent, ref_val_call, ref_val_ret, ref_val_call.name, ref_val_call.type, nest_depth+1)
# If return debug value is a reference (and call value is not)
elif ret_value is not None:
if ret_value.reference_flink is not None and not ret_value.is_definitely_parsed:
ref_val = self.die_db.get_dbg_value(ret_value.reference_flink)
self._add_model_arg_value(parent, None, ref_val, ref_val.name, ref_val.type, nest_depth+1)
def _add_model_container_members(self, parent, call_value, ret_value, nest_depth=0):
"""
Add container members to module
@param parent:
@param call_value:
@param ret_value:
@param nest_depth:
@return:
"""
# If call value is a container type (struct\union\etc)
if call_value is not None and call_value.nested_values is not None:
if call_value.nested_values:
for index in xrange(0, len(call_value.nested_values)):
nested_val_call = self.die_db.get_dbg_value(call_value.nested_values[index])
nested_val_ret = None
# Try to get the same member from the return debug value.
if ret_value is not None and ret_value.type == call_value.type:
if ret_value.nested_values is not None:
if ret_value.nested_values:
nested_val_ret = self.die_db.get_dbg_value(ret_value.nested_values[index])
self._add_model_arg_value(parent, nested_val_call, nested_val_ret, nested_val_call.name, nested_val_call.type, nest_depth+1)
# If return value is a container type (and call value is not)
elif ret_value is not None:
if ret_value.nested_values is not None:
if ret_value.nested_values:
for nested_value in ret_value.nested_values:
nested_val_ret = self.die_db.get_dbg_value(nested_value)
self._add_model_arg_value(parent,
None,
nested_val_ret,
nested_val_ret.name,
nested_val_ret.type,
nest_depth+1)
def reset_function_count(self, thread_id=None):
"""
Reset the function count and set the count according to currently selected thread_id
@param thread_id: currently selected thread_id
"""
root_item = self.functionModel.item(0, 0)
rows = root_item.rowCount()
thread_id = self.thread_id_combo.currentText()
for row in xrange(0, rows):
cur_item = root_item.child(row, 0)
function = cur_item.data(role=DIE.UI.Function_Role)
if function is not None:
count = 0
if thread_id is None:
count = self.die_db.count_function_occurs(function)
else:
count = self.die_db.count_function_occurs(function, int(thread_id))
func_count_item = root_item.child(row, 1)
func_count_item.setText(str(count))
###############################################################################################
# Highlight Items.
def highlight_item(self, item):
"""
Highlight a single item
@param item: module item
"""
try:
item.setBackground(QtGui.QColor('yellow'))
cur_font = item.font()
cur_font.setBold(True)
item.setFont(cur_font)
except Exception as ex:
idaapi.msg("Error while highlighting item: %s\n" %ex)
def highlight_item_row(self, item):
"""
highlight the entire row containing a table item
@param item: table item
"""
try:
if not item.index().isValid():
return
parent = item.parent()
if parent is None:
parent = item
if not parent.hasChildren():
self.highlight_item(parent)
return
row = item.row()
column_num = parent.columnCount()
for column in xrange(0, column_num):
if self.functionModel.hasIndex(row, column, parent.index()):
cur_index = self.functionModel.index(row, column, parent.index())
self.highlight_item(self.functionModel.itemFromIndex(cur_index))
persistent_index = QtCore.QPersistentModelIndex(cur_index)
self.highligthed_items.append(persistent_index)
except Exception as ex:
idaapi.msg("Error while highlighting item row: %s\n" % ex)
def clear_highlights(self):
"""
Clear all highlighted items
@return:
"""
try:
self.functionTreeView.collapseAll()
for persistent_index in self.highligthed_items:
if persistent_index.isValid():
item = self.functionModel.itemFromIndex(persistent_index)
item.setBackground(QtGui.QColor('white'))
cur_font = item.font()
cur_font.setBold(False)
item.setFont(cur_font)
self.highligthed_items = []
except Exception as ex:
idaapi.msg("Error while clearing highlights: %s\n" % ex)
###############################################################################################
# Find Items.
def find_function(self, function_name):
"""
Find and highlight a function in current module
@param function_name: Function name
"""
self.clear_highlights()
matched_items = self.functionModel.findItems(function_name)
for item in matched_items:
self.functionTreeView.expand(item.index())
self.functionTreeView.scrollTo(item.index(), _PositionAtTop)
self.highlight_item_row(item)
def find_context_list(self, context_list):
"""
Find and highlight a list of function contexts
@param context_list: list of function contexts (of type dbFunction_Context)
"""
try:
self.clear_highlights()
root_index = self.functionModel.index(0, 0)
if not root_index.isValid():
return
for func_context in context_list:
context_id = id(func_context)
matched_items = self.functionModel.match(root_index, DIE.UI.ContextId_Role, context_id, -1, _MatchRecursive | _MatchExactly)
for index in matched_items:
if not index.isValid():
continue
# Do not highlight "ea root" items, only occurrences of it.
if not index.data().startswith("Occur"):
continue
item = self.functionModel.itemFromIndex(index)
self.functionTreeView.expand(index)
self.functionTreeView.scrollTo(index, _PositionAtTop)
self.highlight_item_row(item)
return True
except Exception as ex:
idaapi.msg("Error while looking up function context in FunctionView: %s\n" % ex)
return False
###############################################################################################
# Slots.
# @QtCore.Slot(QtCore.QModelIndex)
def itemDoubleClickSlot(self, index):
"""
TreeView DoubleClicked Slot.
@param index: QModelIndex object of the clicked tree index item.
@return:
"""
function = index.data(role=DIE.UI.Function_Role)
if function is not None:
ea = function.function_start
if function.is_lib_func:
ea = function.proto_ea
if ea is not None and ea is not idc.BADADDR:
idc.Jump(ea)
return True
func_context = index.data(role=DIE.UI.FunctionContext_Role)
if func_context is not None:
ea = func_context.calling_ea
if ea is not None and ea is not idc.BADADDR:
idc.Jump(ea)
return True
# @QtCore.Slot(QtCore.QPoint)
def onCustomContextMenu(self, point):
index = self.functionTreeView.indexAt(point)
is_function_item = index.data(role=DIE.UI.Function_Role)
is_func_context_item = index.data(role=DIE.UI.FunctionContext_Role)
is_value_item = index.data(role=DIE.UI.ParsedValueRole)
if is_function_item is not None:
self.context_menu_param = is_function_item
self.function_context_menu.exec_(self.functionTreeView.mapToGlobal(point))
if is_func_context_item is not None:
self.context_menu_param = is_func_context_item
self.ea_context_menu.exec_(self.functionTreeView.mapToGlobal(point))
if is_value_item is not None:
self.context_menu_param = is_value_item
self.value_context_menu.exec_(self.functionTreeView.mapToGlobal(point))
# @QtCore.Slot(str)
def on_exclude_func(self, function):
if not isinstance(function, DIE.Lib.DIEDb.dbFunction):
if function is not None:
raise ValueError("Wrong value sent to 'on_exclude_func_adrs': %s. excpected dbFunction_Context" % function.__class__)
else:
raise ValueError("Wrong value sent to 'on_exclude_func_adrs'")
self.bp_handler.add_bp_funcname_exception(function.function_name)
return
# @QtCore.Slot(str)
def on_exclude_func_adrs(self, function):
if not isinstance(function, DIE.Lib.DIEDb.dbFunction):
if function is not None:
raise ValueError("Wrong value sent to 'on_exclude_func_adrs': %s. excpected dbFunction_Context" % function.__class__)
else:
raise ValueError("Wrong value sent to 'on_exclude_func_adrs'")
func_context_list = self.die_db.get_function_context_list(function)
for func_context in func_context_list:
self.bp_handler.add_bp_ea_exception(func_context.calling_ea)
return
# @QtCore.Slot(str)
def on_exclude_ea(self, function_context):
if not isinstance(function_context, DIE.Lib.DIEDb.dbFunction_Context):
if function_context is not None:
raise ValueError("Wrong value sent to 'on_exclude_ea': %s. excpected dbFunction_Context" % function_context.__class__)
else:
raise ValueError("Wrong value sent to 'on_exclude_ea'")
self.bp_handler.add_bp_ea_exception(function_context.calling_ea)
return
# @QtCore.Slot(str)
def on_show_callgraph(self, function_context):
if not isinstance(function_context, DIE.Lib.DIEDb.dbFunction_Context):
if function_context is not None:
raise ValueError("Wrong value sent to 'on_show_callgraph': %s. excpected dbFunction_Context" % function_context.__class__)
else:
raise ValueError("Wrong value sent to 'on_show_callgraph'")
graph = nx.DiGraph()
call_graph = self.die_db.get_call_graph_to(function_context)
if not call_graph:
idaapi.msg("No Execution Graph")
return
for ctxt_node in call_graph:
(from_address, to_address) = ctxt_node
graph.add_edge(from_address, to_address)
function_name = self.die_db.get_function_name(function_context.function)
viewer = sark.ui.NXGraph(graph, "Callgraph for {}".format(function_name), handler=sark.ui.AddressNodeHandler())
viewer.Show()
return
# @QtCore.Slot(str)
def on_exclude_library(self, function):
if not isinstance(function, DIE.Lib.DIEDb.dbFunction):
if function is not None:
raise ValueError("Wrong value sent to 'on_exclude_func_adrs': %s. excpected dbFunction_Context" % function.__class__)
else:
raise ValueError("Wrong value sent to 'on_exclude_func_adrs'")
if function.is_lib_func and function.lib_name is not None:
self.bp_handler.add_module_exception(function.lib_name)
return
# @QtCore.Slot(str)
def on_value_detail(self, value):
if not self.value_view.isVisible():
self.value_view.Show()
self.value_view.find_value(value)
return
def on_thread_combobox_change(self, thread_id):
self.reset_function_count(thread_id) # reset function count according to currently selected thread
if thread_id == "All Threads":
if not self.functionTreeView.model() is self.functionModel:
self.functionTreeView.setModel(self.functionModel)
return
hidden_threads = ".*" + self._make_thread_id_data(thread_id) + ".*"
threadProxyModel = _QSortFilterProxyModel()
threadProxyModel.setFilterRole(DIE.UI.ThreadId_Role)
threadProxyModel.setFilterRegExp(hidden_threads)
threadProxyModel.setSourceModel(self.functionModel)
self.functionTreeView.setModel(threadProxyModel)
def on_valueview_button(self):
value_view = DIE.UI.ValueViewEx.get_view()
value_view.Show()
def on_pluginsview_button(self):
plugins_view = DIE.UI.ParserView.get_view()
plugins_view.Show()
def on_bpview_button(self):
bp_view = DIE.UI.BPView.get_view()
bp_view.Show()
###############################################################################################
# View Delegates.
class TreeViewDelegate(QtWidgets.QStyledItemDelegate):
"""
Delegate for parsed value viewing in the tree view
"""
def __init__(self, parent):
QtWidgets.QStyledItemDelegate.__init__(self, parent)
self.parent = parent
def createEditor(self, parent, option, index):
parsed_val_list = index.data(role=DIE.UI.ParsedValuesRole)
# Show combobox only if parsed_value as two or more items.
if parsed_val_list is not None and len(parsed_val_list) > 1:
lines = []
for parsed_val in parsed_val_list:
line_txt = "%d, %s, %s" % (parsed_val.score, parsed_val.data, parsed_val.description)
lines.append(line_txt)
combo_box = QtWidgets.QComboBox(parent)
combo_box.addItems(lines)
return combo_box
def setEditorData(self, editor, index):
editor.blockSignals(True)
editor.setCurrentIndex(int(index.model().data(index)))
editor.blockSignals(False)
# Singelton
function_view = None
def initialize():
global function_view
function_view = FunctionView()
def get_view():
return function_view
| 40.885196
| 176
| 0.610828
| 4,565
| 40,599
| 5.148302
| 0.10011
| 0.04289
| 0.013786
| 0.011233
| 0.48749
| 0.419539
| 0.352396
| 0.286444
| 0.243766
| 0.214748
| 0
| 0.005301
| 0.307618
| 40,599
| 992
| 177
| 40.926411
| 0.830772
| 0.122097
| 0
| 0.244556
| 0
| 0
| 0.038721
| 0.004052
| 0
| 0
| 0
| 0
| 0
| 1
| 0.065327
| false
| 0
| 0.026801
| 0.001675
| 0.142379
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
910e142fb045682f0db143a5a746598a72de10d6
| 1,103
|
py
|
Python
|
peerbot/PeerBot.py
|
danerprog/PeerHostedDiscordBot
|
310467d8f123826a20ed92174666beb46fe35d02
|
[
"Apache-2.0"
] | null | null | null |
peerbot/PeerBot.py
|
danerprog/PeerHostedDiscordBot
|
310467d8f123826a20ed92174666beb46fe35d02
|
[
"Apache-2.0"
] | null | null | null |
peerbot/PeerBot.py
|
danerprog/PeerHostedDiscordBot
|
310467d8f123826a20ed92174666beb46fe35d02
|
[
"Apache-2.0"
] | null | null | null |
from peerbot.PeerBotStateMachine import PeerBotStateMachine
from utils.Logger import Logger
import discord
class PeerBot(discord.Client):
def __init__(self, args):
self.args = args
self.isBotReady = False
super().__init__()
async def on_ready(self):
stringifiedUserId = str(self.args['userId'])
self.logger = Logger.getLogger("PeerBot - " + stringifiedUserId)
self.logger.trace("on_ready called")
self.stateMachine = PeerBotStateMachine(await self._getStateMachineArgs(self.args))
self.isBotReady = True
self.stateMachine.start()
async def on_message(self, message):
if self.isBotReady:
self.logger.trace("on_message called")
self.stateMachine.execute(message)
async def _getStateMachineArgs(self, args):
return {
'user' : await self.fetch_user(int(args['userId'])),
'protocolChannel' : await self.fetch_channel(int(args['protocolChannelId'])),
'appInfo' : await self.application_info()
}
| 34.46875
| 91
| 0.637353
| 110
| 1,103
| 6.236364
| 0.390909
| 0.058309
| 0.034985
| 0.049563
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.260199
| 1,103
| 32
| 92
| 34.46875
| 0.840686
| 0
| 0
| 0
| 0
| 0
| 0.087862
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0
| 0.12
| 0
| 0.24
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9111af8dea9204ecc79252d0615a08b9fa56ab3b
| 4,998
|
py
|
Python
|
tests/apps/persons/test_cms_plugins_person.py
|
lunika/richie
|
b0b04d0ffc0b16f2f1b8a8201418b8f86941e45f
|
[
"MIT"
] | null | null | null |
tests/apps/persons/test_cms_plugins_person.py
|
lunika/richie
|
b0b04d0ffc0b16f2f1b8a8201418b8f86941e45f
|
[
"MIT"
] | null | null | null |
tests/apps/persons/test_cms_plugins_person.py
|
lunika/richie
|
b0b04d0ffc0b16f2f1b8a8201418b8f86941e45f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Unit tests for the Person plugin and its model
"""
from django import forms
from django.conf import settings
from django.test import TestCase
from cms.api import add_plugin, create_page
from cmsplugin_plain_text.cms_plugins import PlaintextPlugin
from djangocms_picture.cms_plugins import PicturePlugin
from richie.apps.core.factories import FilerImageFactory, UserFactory
from richie.apps.core.helpers import create_i18n_page
from richie.apps.persons.cms_plugins import PersonPlugin
from richie.apps.persons.factories import PersonFactory
from richie.apps.persons.models import PersonPluginModel
class PersonPluginTestCase(TestCase):
"""
Test that PersonPlugin correctly displays a Person's page placeholders content
"""
def test_cms_plugins_person_form_page_choices(self):
"""
The form to create a person plugin should only list person pages in the select box.
"""
class PersonPluginModelForm(forms.ModelForm):
"""A form for testing the choices in the select box"""
class Meta:
model = PersonPluginModel
exclude = ()
person = PersonFactory()
other_page_title = "other page"
create_page(other_page_title, "richie/fullwidth.html", settings.LANGUAGE_CODE)
plugin_form = PersonPluginModelForm()
self.assertIn(person.get_full_name(), plugin_form.as_table())
self.assertNotIn(other_page_title, plugin_form.as_table())
def test_cms_plugins_person_render(self):
"""
Test that a PersonPlugin correctly renders person's page specific information
"""
# Create a filer fake image
staff = UserFactory(is_staff=True, is_superuser=True)
image = FilerImageFactory(owner=staff)
# Create a Person
person = PersonFactory()
person_page = person.extended_object
# Add portrait to related placeholder
portrait_placeholder = person_page.placeholders.get(slot="portrait")
add_plugin(
portrait_placeholder,
PicturePlugin,
"en",
**{"picture": image, "attributes": {"alt": "portrait description"}}
)
add_plugin(
portrait_placeholder,
PicturePlugin,
"fr",
**{"picture": image, "attributes": {"alt": "description du portrait"}}
)
# A resume to related placeholder
resume_placeholder = person_page.placeholders.get(slot="resume")
add_plugin(
resume_placeholder, PlaintextPlugin, "en", **{"body": "A short resume"}
)
add_plugin(
resume_placeholder, PlaintextPlugin, "fr", **{"body": "Un résumé court"}
)
# Create a page to add the plugin to
page = create_i18n_page({"en": "A page", "fr": "Une page"})
placeholder = page.placeholders.get(slot="maincontent")
add_plugin(placeholder, PersonPlugin, "en", **{"person": person})
add_plugin(placeholder, PersonPlugin, "fr", **{"person": person})
page.publish("en")
page.publish("fr")
# Check the page content in English
url = page.get_absolute_url(language="en")
response = self.client.get(url)
# Person's name should be present as a link to the cms page
# And CMS page title should be in title attribute of the link
self.assertContains(
response,
'<a href="{url}" title="{page_title}">'.format(
url=person_page.get_absolute_url(), page_title=person_page.get_title()
),
status_code=200,
)
self.assertContains(response, person.get_full_name(), html=True)
# Person's portrait and its properties should be present
# pylint: disable=no-member
self.assertContains(response, image.file.name)
# Short resume should be present
self.assertContains(
response,
'<div class="person-plugin__content__text">A short resume</div>',
html=True,
)
# The person's full name should be wrapped in a h2
self.assertContains(
response,
'<h2 class="person-plugin__content__title">{:s}</h2>'.format(
person.get_full_name()
),
html=True,
)
# Same checks in French
url = page.get_absolute_url(language="fr")
response = self.client.get(url)
self.assertContains(
response,
'<a href="{url}" title="{page_title}">'.format(
url=person_page.get_absolute_url(), page_title=person_page.get_title()
),
status_code=200,
)
# pylint: disable=no-member
self.assertContains(response, image.file.name)
self.assertContains(
response,
'<div class="person-plugin__content__text">Un résumé court</div>',
html=True,
)
| 36.75
| 91
| 0.62585
| 557
| 4,998
| 5.452424
| 0.262118
| 0.023708
| 0.068489
| 0.023708
| 0.320053
| 0.249588
| 0.156734
| 0.156734
| 0.156734
| 0.119197
| 0
| 0.003871
| 0.276311
| 4,998
| 135
| 92
| 37.022222
| 0.835776
| 0.173069
| 0
| 0.391304
| 0
| 0
| 0.117676
| 0.04623
| 0
| 0
| 0
| 0
| 0.108696
| 1
| 0.021739
| false
| 0
| 0.119565
| 0
| 0.173913
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91124d593f9dcda3e366e95378c8d482f7f013ee
| 9,295
|
py
|
Python
|
mathics/core/subexpression.py
|
Mathics3/mathics-core
|
54dc3c00a42cd893c6430054e125291b6eb55ead
|
[
"Apache-2.0"
] | 90
|
2021-09-11T14:14:00.000Z
|
2022-03-29T02:08:29.000Z
|
mathics/core/subexpression.py
|
Mathics3/mathics-core
|
54dc3c00a42cd893c6430054e125291b6eb55ead
|
[
"Apache-2.0"
] | 187
|
2021-09-13T01:00:41.000Z
|
2022-03-31T11:52:52.000Z
|
mathics/core/subexpression.py
|
Mathics3/mathics-core
|
54dc3c00a42cd893c6430054e125291b6eb55ead
|
[
"Apache-2.0"
] | 10
|
2021-10-05T15:44:26.000Z
|
2022-03-21T12:34:33.000Z
|
# cython: language_level=3
# -*- coding: utf-8 -*-
from mathics.core.expression import Expression
from mathics.core.symbols import Atom, Symbol
from mathics.core.atoms import Integer
from mathics.builtin.base import MessageException
"""
This module provides some infrastructure to deal with SubExpressions.
"""
def _pspec_span_to_tuple(pspec, expr):
"""
This function takes an expression and a Mathics
`Span` Expression and returns a tuple with the positions
of the leaves.
"""
start = 1
stop = None
step = 1
leaves = pspec.leaves
if len(leaves) > 3:
raise MessageException("Part", "span", leaves)
if len(leaves) > 0:
start = leaves[0].get_int_value()
if len(leaves) > 1:
stop = leaves[1].get_int_value()
if stop is None:
if leaves[1].get_name() == "System`All":
stop = None
else:
raise MessageException("Part", "span", pspec)
else:
stop = stop - 1 if stop > 0 else len(expr.leaves) + stop
if len(pspec.leaves) > 2:
step = leaves[2].get_int_value()
if start is None or step is None:
raise MessageException("Part", "span", pspec)
if start == 0 or stop == 0:
# index 0 is undefined
raise MessageException("Part", "span", Integer(0))
if start < 0:
start = len(expr.leaves) - start
else:
start = start - 1
if stop is None:
stop = 0 if step < 0 else len(expr.leaves) - 1
stop = stop + 1 if step > 0 else stop - 1
return tuple(k for k in range(start, stop, step))
class ExpressionPointer(object):
"""
This class represents a reference to a leaf in an expression.
Supports a minimal part of the basic interface of `mathics.core.symbols.BaseElement`.
"""
def __init__(self, expr, pos=None):
"""
Initializes a ExpressionPointer pointing to the leaf in position `pos`
of `expr`.
expr: can be an Expression, a Symbol, or another ExpressionPointer
pos: int or None
If `pos==0`, then the pointer points to the `head` of the expression.
If `pos` is `None`, it points out the whole expression.
"""
if pos is None:
if type(expr) is ExpressionPointer:
self.parent = expr.parent
self.position = expr.position
else:
self.parent = expr
self.position = None
else:
self.parent = expr
self.position = pos
def __str__(self) -> str:
return "%s[[%s]]" % (self.parent, self.position)
def __repr__(self) -> str:
return self.__str__()
@property
def original(self):
return None
@original.setter
def original(self, value):
raise ValueError("Expression.original is write protected.")
@property
def head(self):
pos = self.position
if pos is None:
return self.parent.head
elif pos == 0:
return self.parent.head.head
return self.parent.leaves[pos - 1].head
@head.setter
def head(self, value):
raise ValueError("ExpressionPointer.head is write protected.")
@property
def leaves(self):
pos = self.position
if pos is None:
return self.parent.leaves
elif pos == 0:
self.parent.head.leaves
return self.parent.leaves[pos - 1].leaves
@leaves.setter
def leaves(self, value):
raise ValueError("ExpressionPointer.leaves is write protected.")
def get_head_name(self):
return self.head.get_name()
def is_atom(self):
pos = self.position
if pos is None:
return self.parent.is_atom()
elif pos == 0:
return self.parent.head.is_atom()
return self.parent.leaves[pos - 1].is_atom()
def to_expression(self):
parent = self.parent
p = self.position
if p == 0:
if isinstance(parent, Symbol):
return parent
else:
return parent.head.copy()
else:
leaf = self.parent.leaves[p - 1]
if isinstance(leaf, Atom):
return leaf
else:
return leaf.copy()
def replace(self, new):
"""
This method replaces the value pointed out by a `new` value.
"""
# First, look for the ancestor that is not an ExpressionPointer,
# keeping the positions of each step:
parent = self.parent
pos = [self.position]
while type(parent) is ExpressionPointer:
position = parent.position
if position is None:
parent = parent.parent
continue
pos.append(parent.position)
parent = parent.parent
# At this point, we hit the expression, and we have
# the path to reach the position
i = pos.pop()
try:
while pos:
if i == 0:
parent = parent._head
else:
parent = parent.elements[i - 1]
i = pos.pop()
except Exception:
raise MessageException("Part", "span", pos)
# Now, we have a pointer to an element in a true `Expression`.
# Now, set it to the new value.
if i == 0:
parent.set_head(new)
else:
parent.set_element(i - 1, new)
class SubExpression(object):
"""
This class represents a Subexpression of an existing Expression.
Assignment to a subexpression results in the change of the original Expression.
"""
def __new__(cls, expr, pos=None):
"""
`expr` can be an `Expression`, a `ExpressionPointer` or
another `SubExpression`
`pos` can be `None`, an integer value or an `Expression` that
indicates a subset of leaves in the original `Expression`.
If `pos` points out to a single whole leaf of `expr`, then
returns an `ExpressionPointer`.
"""
# If pos is a list, take the first element, and
# store the remainder.
if type(pos) in (tuple, list):
pos, rem_pos = pos[0], pos[1:]
if len(rem_pos) == 0:
rem_pos = None
else:
rem_pos = None
# Trivial conversion: if pos is an `Integer`, convert
# to a Python native int
if type(pos) is Integer:
pos = pos.get_int_value()
# pos == `System`All`
elif isinstance(pos, Symbol) and pos.get_name() == "System`All":
pos = None
elif type(pos) is Expression:
if pos.has_form("System`List", None):
tuple_pos = [i.get_int_value() for i in pos.leaves]
if any([i is None for i in tuple_pos]):
raise MessageException("Part", "pspec", pos)
pos = tuple_pos
elif pos.has_form("System`Span", None):
pos = _pspec_span_to_tuple(pos, expr)
else:
raise MessageException("Part", "pspec", pos)
if pos is None or type(pos) is int:
if rem_pos is None:
return ExpressionPointer(expr, pos)
else:
return SubExpression(ExpressionPointer(expr, pos), rem_pos)
elif type(pos) is tuple:
self = super(SubExpression, cls).__new__(cls)
self._headp = ExpressionPointer(expr.head, 0)
self._elementsp = [
SubExpression(ExpressionPointer(expr, k + 1), rem_pos) for k in pos
]
return self
def is_atom(self):
return False
def __str__(self):
return (
self.head.__str__()
+ "[\n"
+ ",\n".join(["\t " + leaf.__str__() for leaf in self.leaves])
+ "\n\t]"
)
def __repr__(self):
return self.__str__()
@property
def head(self):
return self._headp
@head.setter
def head(self, value):
raise ValueError("SubExpression.head is write protected.")
def get_head_name(self):
return self._headp.parent.get_head_name()
@property
def elements(self):
return self._elementsp
@elements.setter
def elements(self, value):
raise ValueError("SubExpression.leaves is write protected.")
@property
def leaves(self):
return self._elementsp
@leaves.setter
def leaves(self, value):
raise ValueError("SubExpression.leaves is write protected.")
def to_expression(self):
return Expression(
self._headp.to_expression(),
*(leaf.to_expression() for leaf in self._elementsp)
)
def replace(self, new):
"""
Asigns `new` to the subexpression, according to the logic of `mathics.core.walk_parts`
"""
if (new.has_form("List", None) or new.get_head_name() == "System`List") and len(
new.leaves
) == len(self._elementsp):
for leaf, sub_new in zip(self._elementsp, new.leaves):
leaf.replace(sub_new)
else:
for leaf in self._elementsp:
leaf.replace(new)
| 30.276873
| 94
| 0.563636
| 1,131
| 9,295
| 4.532272
| 0.16092
| 0.033164
| 0.010925
| 0.012876
| 0.238588
| 0.152946
| 0.117441
| 0.095981
| 0.067109
| 0.044089
| 0
| 0.007174
| 0.340183
| 9,295
| 306
| 95
| 30.375817
| 0.828632
| 0.1773
| 0
| 0.368932
| 0
| 0
| 0.051905
| 0.006283
| 0
| 0
| 0
| 0
| 0
| 1
| 0.131068
| false
| 0
| 0.019417
| 0.058252
| 0.296117
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9112d9a09ef3e419ea9c838421fb6d27323a5f4c
| 1,960
|
py
|
Python
|
lib/python/treadmill/tests/api/cell_test.py
|
vrautela/treadmill
|
05e47fa8acdf8bad7af78e737efb26ea6488de82
|
[
"Apache-2.0"
] | 1
|
2019-04-14T20:17:07.000Z
|
2019-04-14T20:17:07.000Z
|
lib/python/treadmill/tests/api/cell_test.py
|
vrautela/treadmill
|
05e47fa8acdf8bad7af78e737efb26ea6488de82
|
[
"Apache-2.0"
] | 1
|
2017-09-18T10:36:12.000Z
|
2017-09-18T10:36:12.000Z
|
lib/python/treadmill/tests/api/cell_test.py
|
evreng/treadmill
|
05e47fa8acdf8bad7af78e737efb26ea6488de82
|
[
"Apache-2.0"
] | null | null | null |
"""Cell API tests.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import mock
from treadmill import admin
from treadmill.api import cell
class ApiCellTest(unittest.TestCase):
"""treadmill.api.cell tests."""
def setUp(self):
self.cell = cell.API()
def tearDown(self):
pass
@mock.patch('treadmill.context.AdminContext.conn',
mock.Mock(return_value=admin.Admin(None, None)))
@mock.patch('treadmill.admin.Cell.list', mock.Mock(return_value=[]))
def test_list(self):
"""Dummy test for treadmill.api.cell._list()"""
self.cell.list()
cell_admin = admin.Cell(None)
self.assertTrue(cell_admin.list.called)
@mock.patch('treadmill.context.AdminContext.conn',
mock.Mock(return_value=admin.Admin(None, None)))
@mock.patch('treadmill.admin.Cell.get',
mock.Mock(return_value={'cell': 'ny-999-cell'}))
def test_get(self):
"""Dummy test for treadmill.api.cell.get()"""
cell_admin = admin.Cell(None)
self.cell.get('some-cell')
cell_admin.get.assert_called_with('some-cell')
@mock.patch('treadmill.context.AdminContext.conn',
mock.Mock(return_value=admin.Admin(None, None)))
@mock.patch('treadmill.admin.Cell.get',
mock.Mock(return_value={'cell': 'ny-999-cell'}))
@mock.patch('treadmill.admin.Cell.create', mock.Mock())
def test_create(self):
"""Dummy test for treadmill.api.cell.create()"""
cell_admin = admin.Cell(None)
self.cell.create('some-cell', {'location': 'ny',
'treadmillid': 'treadmld',
'version': 'v3'})
cell_admin.get.assert_called_with('some-cell', dirty=True)
if __name__ == '__main__':
unittest.main()
| 32.131148
| 72
| 0.628061
| 238
| 1,960
| 4.97479
| 0.226891
| 0.053209
| 0.106419
| 0.096284
| 0.5625
| 0.539696
| 0.517736
| 0.38598
| 0.325169
| 0.325169
| 0
| 0.004627
| 0.228061
| 1,960
| 60
| 73
| 32.666667
| 0.777925
| 0.084694
| 0
| 0.317073
| 0
| 0
| 0.1794
| 0.116016
| 0
| 0
| 0
| 0
| 0.073171
| 1
| 0.121951
| false
| 0.02439
| 0.195122
| 0
| 0.341463
| 0.02439
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9116cf95f3505891c20808a9297cb4047c9dcb7a
| 776
|
py
|
Python
|
sandbox/pdp2/arbitrary_data/zip_files.py
|
projectpai/paipass
|
8b8e70b6808bf026cf957e240c7eed7bfcf4c55d
|
[
"MIT"
] | 3
|
2021-04-17T10:20:26.000Z
|
2022-03-08T07:36:13.000Z
|
sandbox/pdp2/arbitrary_data/zip_files.py
|
projectpai/paipass
|
8b8e70b6808bf026cf957e240c7eed7bfcf4c55d
|
[
"MIT"
] | null | null | null |
sandbox/pdp2/arbitrary_data/zip_files.py
|
projectpai/paipass
|
8b8e70b6808bf026cf957e240c7eed7bfcf4c55d
|
[
"MIT"
] | null | null | null |
import zipfile
import random
RAND_INT_RANGE = (1,100)
def wrf(fname):
with open(fname, 'w') as f:
for i in range(100):
f.write(str(random.randint(*RAND_INT_RANGE)))
fnames = []
for i in range(10):
fname = 'file' + str(i) + '.txt'
wrf(fname)
fnames.append(fname)
dirpaths = set()
with zipfile.ZipFile('myzip.zip', 'w', compression=zipfile.ZIP_DEFLATED) as zf:
for fname in fnames:
dirpath = '/dirpath'+str(random.randint(*RAND_INT_RANGE))
# let's not have duplicate dirpaths.
while dirpath in dirpaths:
dirpath = '/dirpath' + str(random.randint(*RAND_INT_RANGE))
zf.write(fname, arcname=dirpath+'/'+fname)
dirpaths.add(dirpath)
print('dirpaths', dirpaths)
print('fnames', fnames)
| 26.758621
| 79
| 0.636598
| 106
| 776
| 4.575472
| 0.415094
| 0.057732
| 0.098969
| 0.123711
| 0.230928
| 0.230928
| 0.173196
| 0.173196
| 0
| 0
| 0
| 0.014851
| 0.219072
| 776
| 28
| 80
| 27.714286
| 0.785479
| 0.043814
| 0
| 0.090909
| 0
| 0
| 0.067568
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.090909
| 0
| 0.136364
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
911750f22693957597b2ca1cf0ab39d191230dfc
| 1,497
|
py
|
Python
|
tests/testproject/testproject/tests/test_middleware.py
|
mwesterhof/wagtail_managed404
|
a961271c7fc70accb43ec329da9defe36e3dab3c
|
[
"MIT"
] | 1
|
2021-03-11T10:06:04.000Z
|
2021-03-11T10:06:04.000Z
|
tests/testproject/testproject/tests/test_middleware.py
|
mwesterhof/wagtail_managed404
|
a961271c7fc70accb43ec329da9defe36e3dab3c
|
[
"MIT"
] | null | null | null |
tests/testproject/testproject/tests/test_middleware.py
|
mwesterhof/wagtail_managed404
|
a961271c7fc70accb43ec329da9defe36e3dab3c
|
[
"MIT"
] | null | null | null |
import unittest
from django.test import Client
from wagtail.core.models import Page
from wagtail_managed404.models import PageNotFoundEntry
class TestMiddleware(unittest.TestCase):
"""Tests for `wagtail_app_pages` package."""
def setUp(self):
self.client = Client()
self.invalid_url = '/definitely_not_an_actual_url/'
self.redirect_to_url = '/much_better_url/'
self.redirect_to_page = Page.objects.get(depth=2)
def test_redirect_to_url(self):
PageNotFoundEntry.objects.all().delete()
entry = self._trigger_404()
entry.redirect_to_url = self.redirect_to_url
entry.save()
self._validate_redirect(self.invalid_url, self.redirect_to_url)
def test_redirect_to_page(self):
PageNotFoundEntry.objects.all().delete()
entry = self._trigger_404()
entry.redirect_to_page = self.redirect_to_page
entry.save()
self._validate_redirect(self.invalid_url, self.redirect_to_page.url)
def _trigger_404(self):
response = self.client.get(self.invalid_url)
self.assertEquals(response.status_code, 404)
entries = PageNotFoundEntry.objects.filter(url=self.invalid_url)
self.assertEquals(entries.count(), 1)
return entries.first()
def _validate_redirect(self, source_url, target_url):
response = self.client.get(source_url)
self.assertEquals(response.status_code, 302)
self.assertEquals(response.url, target_url)
| 34.022727
| 76
| 0.706079
| 187
| 1,497
| 5.363636
| 0.304813
| 0.069791
| 0.083749
| 0.084746
| 0.437687
| 0.329013
| 0.255234
| 0.255234
| 0.255234
| 0.255234
| 0
| 0.016611
| 0.195725
| 1,497
| 43
| 77
| 34.813953
| 0.816445
| 0.025384
| 0
| 0.1875
| 0
| 0
| 0.032347
| 0.020647
| 0
| 0
| 0
| 0
| 0.125
| 1
| 0.15625
| false
| 0
| 0.125
| 0
| 0.34375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
911a60720a34ab009d3e5702a34a60c445eb65cc
| 5,827
|
py
|
Python
|
kronos/kronos.py
|
jinified/kronos
|
1f110372a025d28ccc407372320491ee818c893d
|
[
"MIT"
] | null | null | null |
kronos/kronos.py
|
jinified/kronos
|
1f110372a025d28ccc407372320491ee818c893d
|
[
"MIT"
] | null | null | null |
kronos/kronos.py
|
jinified/kronos
|
1f110372a025d28ccc407372320491ee818c893d
|
[
"MIT"
] | null | null | null |
"""
Kronos: A simple scheduler for graduate training programme
Entities: User, Schedule, Rotation
"""
from operator import itemgetter
from datetime import datetime, timedelta
def getRotationCapacity(rotationId, startDate, endDate, assignments):
""" Calculate number of users assigned to a particular rotation during the specified duration
"""
start = datetime.strptime(startDate, "%d%m%Y")
end = datetime.strptime(endDate, "%d%m%Y")
duration = int((end - start).days / 7.0)
# Weeks involved during the rotation
weeks = [(start + timedelta(weeks=x)).strftime("%W%Y") for x in range(0, duration)]
capacity = sum(itemgetter(*weeks)(assignments[rotationId][0][0]))
return capacity
def score_assignment(
assignments,
solution,
earliestAvailableDate,
core_rotations=["PMO", "PE", "SE", "PM"],
rotation_duration={
"PMO": 12,
"PE": 12,
"SE": 12,
"PM": 12,
"SYS": 12,
"ARC": 12,
"ANA": 12,
},
):
""" Calculate loss function for suggested solution (negative = better)
Parameters:
assignments (dict): global assignment object by rotation
solution (dict): rotation assignment for a user
earliestAvailableDate (date): earliest date where a user can be assigned a rotation
core_rotations (list): rotation that should be completed first
rotation_duration (dict): duration of each rotation
"""
print(solution)
# SOFT CONSTRAINT 1 - Core rotations should be completed in the first 4 rotations if possible
core_first_loss = sum(
[
-3 if x[0] in core_rotations else 0
for x in solution
if int(x[1]) <= len(core_rotations)
]
)
# SOFT CONSTRAINT 2 - External Assignment must be assigned last
external_assignment_loss = (
99 if "EXT" in [x[0] for x in solution] and solution[-1][0] != "EXT" else 0
)
# Calculate timing of each rotation from solution
solution = [
(
x[0],
rotation_duration[x[0]]
+ (sum([rotation_duration[x[0]] for x in solution[:i]]) if i != 0 else 0),
)
for i, x in enumerate(solution)
]
startDate = earliestAvailableDate
schedule = []
for x in solution:
endDate = startDate + timedelta(weeks=x[1]) - timedelta(days=1)
# Make sure the date falls on weekday
if endDate.weekday() >= 5:
endDate -= timedelta(endDate.weekday() - 4)
schedule.append(
(x[0], startDate.strftime("%d%m%Y"), endDate.strftime("%d%m%Y"))
)
startDate += timedelta(weeks=x[1])
spread_first_loss = sum(
[getRotationCapacity(x[0], x[1], x[2], assignments) for x in schedule]
)
loss = core_first_loss + external_assignment_loss + spread_first_loss
return loss
def schedule2assignments(schedule):
""" Convert schedule object to assignment object
"""
rotations = {}
for userId, userSchedule in schedule.items():
for rotation in userSchedule:
id = rotation["rotationId"]
if id not in rotations:
rotations[id] = [[{}], []]
print(rotations[id][0][0])
startDate, endDate = itemgetter("startDate", "endDate")(rotation)
start = datetime.strptime(startDate, "%d%m%Y")
end = datetime.strptime(endDate, "%d%m%Y")
duration = int((end - start).days / 7.0)
for i in range(duration):
date = (start + timedelta(weeks=i)).strftime("%W%Y")
if date not in rotations[id][0][0]:
rotations[id][0][0][date] = 0
rotations[id][0][0][date] += 1
rotations[id][1].append((userId, startDate, endDate))
sortedDate = sorted(list(rotations[id][0][0].keys()))
if len(rotations[id][0]) < 2:
rotations[id][0].append(sortedDate[0])
rotations[id][0].append(sortedDate[-1])
elif sortedDate[0] < rotations[id][0][1]:
rotations[id][0][1] = sortedDate[0]
elif len(rotations[id][0]) > 2 and sortedDate[-1] > rotations[id][0][2]:
rotations[id][0][2] = sortedDate[-1]
print(rotations)
return rotations
def assignments2schedule(assignments):
""" Convert assignment object to overall schedule
"""
users = {}
for rotationId, rotationInfo in assignments.items():
for userId, userAssignment in rotationInfo[1].items():
if userId not in users:
users[userId] = []
users[userId].append(
{
"rotationId": rotationId,
"startDate": userAssignment[0],
"endDate": userAssignment[1],
}
)
print(users)
return users
def generateUserSchedule(user, assignments, scoring_function):
""" Generate most optimal user schedule
Parameters:
user (object): User
assignments (dict): Time-bounded assignments
scoring_function (function): scoring function to rank possible assignments
Returns:
schedule (list): list of rotations
"""
return [{"rotationId": "PMO", "startDate": "012018"}]
def getOverallSchedule(users):
""" Generate overall schedule from individual user's schedule
Parameters:
users (list): list of Users
Returns:
schedule (dict): overall assignments
"""
return {}
def getConflictingAssignments(schedule):
""" Get list of assignments which exceeded rotation capacity
Parameters:
schedule (dict): overall assignments
Returns:
confictingAssignmentsByRotation (dict): overall schedule with conflicting assignments
"""
return {}
if __name__ == "__main__":
pass
| 33.107955
| 97
| 0.60151
| 654
| 5,827
| 5.311927
| 0.238532
| 0.047496
| 0.044905
| 0.01871
| 0.127807
| 0.084053
| 0.064479
| 0.050086
| 0.050086
| 0.050086
| 0
| 0.021947
| 0.28059
| 5,827
| 175
| 98
| 33.297143
| 0.806775
| 0.27098
| 0
| 0.073395
| 0
| 0
| 0.040303
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.06422
| false
| 0.009174
| 0.018349
| 0
| 0.146789
| 0.036697
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
911aa9326eb51bb9ac375b836bec89f414a26904
| 2,384
|
py
|
Python
|
personal_env/lib/python3.8/site-packages/pylint/lint/utils.py
|
jestinmwilson/personal-website
|
6e47a7f33ed3b1ca5c1d42c89c5380d22992ed74
|
[
"MIT"
] | null | null | null |
personal_env/lib/python3.8/site-packages/pylint/lint/utils.py
|
jestinmwilson/personal-website
|
6e47a7f33ed3b1ca5c1d42c89c5380d22992ed74
|
[
"MIT"
] | null | null | null |
personal_env/lib/python3.8/site-packages/pylint/lint/utils.py
|
jestinmwilson/personal-website
|
6e47a7f33ed3b1ca5c1d42c89c5380d22992ed74
|
[
"MIT"
] | null | null | null |
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
import contextlib
import sys
from pylint.utils import utils
class ArgumentPreprocessingError(Exception):
"""Raised if an error occurs during argument preprocessing."""
def preprocess_options(args, search_for):
"""look for some options (keys of <search_for>) which have to be processed
before others
values of <search_for> are callback functions to call when the option is
found
"""
i = 0
while i < len(args):
arg = args[i]
if arg.startswith("--"):
try:
option, val = arg[2:].split("=", 1)
except ValueError:
option, val = arg[2:], None
try:
cb, takearg = search_for[option]
except KeyError:
i += 1
else:
del args[i]
if takearg and val is None:
if i >= len(args) or args[i].startswith("-"):
msg = "Option %s expects a value" % option
raise ArgumentPreprocessingError(msg)
val = args[i]
del args[i]
elif not takearg and val is not None:
msg = "Option %s doesn't expects a value" % option
raise ArgumentPreprocessingError(msg)
cb(option, val)
else:
i += 1
def _patch_sys_path(args):
original = list(sys.path)
changes = []
seen = set()
for arg in args:
path = utils.get_python_path(arg)
if path not in seen:
changes.append(path)
seen.add(path)
sys.path[:] = changes + sys.path
return original
@contextlib.contextmanager
def fix_import_path(args):
"""Prepare sys.path for running the linter checks.
Within this context, each of the given arguments is importable.
Paths are added to sys.path in corresponding order to the arguments.
We avoid adding duplicate directories to sys.path.
`sys.path` is reset to its original value upon exiting this context.
"""
original = _patch_sys_path(args)
try:
yield
finally:
sys.path[:] = original
| 30.961039
| 81
| 0.557047
| 284
| 2,384
| 4.623239
| 0.46831
| 0.053313
| 0.016756
| 0.019802
| 0.080731
| 0.080731
| 0.080731
| 0
| 0
| 0
| 0
| 0.005219
| 0.356963
| 2,384
| 76
| 82
| 31.368421
| 0.851272
| 0.280621
| 0
| 0.22449
| 0
| 0
| 0.039141
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061224
| false
| 0
| 0.081633
| 0
| 0.183673
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
911ae3a32af48a82692eb10be784caaac6d3d48a
| 4,847
|
py
|
Python
|
mol_dqn/experimental/multi_obj.py
|
deepneuralmachine/google-research
|
d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231
|
[
"Apache-2.0"
] | 23,901
|
2018-10-04T19:48:53.000Z
|
2022-03-31T21:27:42.000Z
|
mol_dqn/experimental/multi_obj.py
|
deepneuralmachine/google-research
|
d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231
|
[
"Apache-2.0"
] | 891
|
2018-11-10T06:16:13.000Z
|
2022-03-31T10:42:34.000Z
|
mol_dqn/experimental/multi_obj.py
|
deepneuralmachine/google-research
|
d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231
|
[
"Apache-2.0"
] | 6,047
|
2018-10-12T06:31:02.000Z
|
2022-03-31T13:59:28.000Z
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Generates molecules that satisfy two targets.
Target1: SAS
Target2: QED
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import os
from absl import app
from absl import flags
from rdkit import Chem
from rdkit.Chem import QED
from rdkit.Contrib import SA_Score
from tensorflow.compat.v1 import gfile
from mol_dqn.chemgraph.mcts import deep_q_networks
from mol_dqn.chemgraph.mcts import molecules as molecules_mdp
from mol_dqn.chemgraph.mcts import run_dqn
from mol_dqn.chemgraph.tensorflow import core
flags.DEFINE_float('target_sas', 1, 'The target SAS of the molecule.')
flags.DEFINE_float('target_qed', 0.5, 'The target QED of the molecule.')
flags.DEFINE_float('gamma', 0.999, 'discount')
FLAGS = flags.FLAGS
class MultiObjectiveRewardMolecule(molecules_mdp.Molecule):
"""Defines the subclass of generating a molecule with a specific reward.
The reward is defined as a 1-D vector with 2 entries: similarity and QED
reward = (similarity_score, qed_score)
"""
def _reward(self):
"""Calculates the reward of the current state.
The reward is defined as a tuple of the similarity and QED value.
Returns:
A tuple of the similarity and qed value
"""
# calculate similarity.
# if the current molecule does not contain the scaffold of the target,
# similarity is zero.
if self._state is None:
return 0.0, 0.0
mol = Chem.MolFromSmiles(self._state)
if mol is None:
return 0.0, 0.0
qed_value = QED.qed(mol)
sas = SA_Score.sascorer.calculateScore(mol)
return -abs(sas - FLAGS.target_sas), -abs(qed_value - FLAGS.target_qed)
def soft_cst(v, l, r):
if l <= v <= r:
return 1
return -min(abs(l - v), abs(r - v))
class Molecule(molecules_mdp.Molecule):
"""SAS and QED reward molecule."""
def _reward(self):
"""Calculates the reward of the current state.
The reward is defined as a tuple of the similarity and QED value.
Returns:
A tuple of the similarity and qed value
"""
# calculate similarity.
# if the current molecule does not contain the scaffold of the target,
# similarity is zero.
if self._state is None:
return 0.0, 0.0
mol = Chem.MolFromSmiles(self._state)
if mol is None:
return 0.0, 0.0
qed_value = QED.qed(mol)
sas = SA_Score.sascorer.calculateScore(mol)
# c1 = soft_cst(sas, FLAGS.target_sas - 0.2, FLAGS.target_sas + 0.2)
# c2 = soft_cst(qed_value, FLAGS.target_qed - 0.1, FLAGS.target_qed + 0.1)
# # if c1 < 0 and c2 < 0:
# # return - c1 * c2
# # else:
# # return c1 * c2
return (soft_cst(sas, FLAGS.target_sas - 0.2, FLAGS.target_sas + 0.2) +
soft_cst(qed_value, FLAGS.target_qed - 0.1,
FLAGS.target_qed + 0.1)) * FLAGS.gamma**(
self.max_steps - self._counter)
def main(argv):
del argv
if FLAGS.hparams is not None:
with gfile.Open(FLAGS.hparams, 'r') as f:
hparams = deep_q_networks.get_hparams(**json.load(f))
else:
hparams = deep_q_networks.get_hparams()
hparams.add_hparam('target_qed', FLAGS.target_qed)
hparams.add_hparam('target_sas', FLAGS.target_sas)
environment = Molecule(
atom_types=set(hparams.atom_types),
init_mol='CCc1c(C)[nH]c2CCC(CN3CCOCC3)C(=O)c12',
allow_removal=hparams.allow_removal,
allow_no_modification=hparams.allow_no_modification,
allow_bonds_between_rings=False,
allowed_ring_sizes={3, 4, 5, 6},
max_steps=hparams.max_steps_per_episode)
dqn = deep_q_networks.DeepQNetwork(
input_shape=(hparams.batch_size, hparams.fingerprint_length + 1),
q_fn=functools.partial(
deep_q_networks.multi_layer_model, hparams=hparams),
optimizer=hparams.optimizer,
grad_clipping=hparams.grad_clipping,
num_bootstrap_heads=hparams.num_bootstrap_heads,
gamma=hparams.gamma,
epsilon=1.0)
run_dqn.run_training(
hparams=hparams,
environment=environment,
dqn=dqn,
)
core.write_hparams(hparams, os.path.join(FLAGS.model_dir, 'config.json'))
if __name__ == '__main__':
app.run(main)
| 30.484277
| 78
| 0.704766
| 725
| 4,847
| 4.546207
| 0.314483
| 0.007282
| 0.007282
| 0.023058
| 0.349211
| 0.342536
| 0.272451
| 0.272451
| 0.272451
| 0.272451
| 0
| 0.020419
| 0.201774
| 4,847
| 158
| 79
| 30.677215
| 0.831481
| 0.340004
| 0
| 0.197531
| 0
| 0
| 0.055144
| 0.011609
| 0
| 0
| 0
| 0
| 0
| 1
| 0.049383
| false
| 0
| 0.197531
| 0
| 0.37037
| 0.024691
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
911c431b68da1378ffaf6b7b804e393825322dec
| 1,770
|
py
|
Python
|
examples/cli-solver/cli_solver.py
|
danagle/boggled
|
13fea4c31b5dff72093c38d1ad368dec9d44f4d0
|
[
"MIT"
] | null | null | null |
examples/cli-solver/cli_solver.py
|
danagle/boggled
|
13fea4c31b5dff72093c38d1ad368dec9d44f4d0
|
[
"MIT"
] | null | null | null |
examples/cli-solver/cli_solver.py
|
danagle/boggled
|
13fea4c31b5dff72093c38d1ad368dec9d44f4d0
|
[
"MIT"
] | null | null | null |
# cli_solver.py
import argparse
import os
from boggled import BoggleBoard, BoggleSolver, BoggleWords
def solve_board(board, words):
solver = BoggleSolver(board, words)
solver.solve()
return solver
def display_board_details(board):
print("Board details:")
print("Columns: ", board.columns)
print("Rows: ", board.rows)
s = '\n'
for pos in board.tiles:
s += ' ' if len(board.tiles[pos]) == 2 else ' '
s += board.tiles[pos]
if (pos % board.columns) == 0:
s += '\n'
print(s)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("letters", type=str,
help="Board letters")
parser.add_argument("dictionary", type=str,
help="The text file containing the dictionary word list.")
parser.add_argument("-m", "--min", type=int,
help="The minimum word size.")
parser.add_argument("-p", "--paths", action="store_true",
help="Include the path followed for each word found.")
args = parser.parse_args()
if os.path.isfile(args.dictionary):
if isinstance(args.min, int):
words = BoggleWords(args.min)
else:
words = BoggleWords()
words.loadFromFile(args.dictionary)
board = BoggleBoard(args.letters)
display_board_details(board)
solved_board = solve_board(board, words)
print('Found:', len(solved_board.found))
if args.paths:
for word in solved_board.found:
print('{} : {}'.format(word, solved_board.found[word]))
else:
print(solved_board.foundWords)
else:
print("Error: Unable to find the dictionary.")
| 30
| 82
| 0.589266
| 204
| 1,770
| 4.985294
| 0.372549
| 0.054081
| 0.066863
| 0.039331
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001581
| 0.285311
| 1,770
| 58
| 83
| 30.517241
| 0.802372
| 0.007345
| 0
| 0.065217
| 0
| 0
| 0.153846
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0.065217
| 0
| 0.130435
| 0.173913
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
911cc6fdfec9f96a292bbbfc6b3b0ac51752840f
| 45,086
|
py
|
Python
|
src/wepy/orchestration/orchestrator.py
|
gitter-badger/wepy-1
|
9bc619aeae178ad5d10f658fae2abfd2c7aeb18a
|
[
"MIT"
] | 35
|
2017-08-22T15:39:06.000Z
|
2022-03-20T15:17:52.000Z
|
src/wepy/orchestration/orchestrator.py
|
gitter-badger/wepy-1
|
9bc619aeae178ad5d10f658fae2abfd2c7aeb18a
|
[
"MIT"
] | 33
|
2017-10-02T22:04:45.000Z
|
2022-03-02T22:19:08.000Z
|
src/wepy/orchestration/orchestrator.py
|
stxinsite/wepy
|
352d4c1316b20e839aae8824eedd66f0f2d0b456
|
[
"MIT"
] | 17
|
2018-07-14T15:33:30.000Z
|
2022-01-18T16:30:55.000Z
|
from copy import copy, deepcopy
import sqlite3
from hashlib import md5
import time
import os
import os.path as osp
from base64 import b64encode, b64decode
from zlib import compress, decompress
import itertools as it
import logging
# instead of pickle we use dill, so we can save dynamically defined
# classes
import dill
from wepy.sim_manager import Manager
from wepy.orchestration.configuration import Configuration
from wepy.orchestration.snapshot import SimApparatus, SimSnapshot
from wepy.util.kv import KV, SQLITE3_INMEMORY_URI, gen_uri
class OrchestratorError(Exception):
""" """
pass
class Orchestrator():
""" """
# we freeze the pickle protocol for making hashes, because we care
# more about stability than efficiency of newer versions
HASH_PICKLE_PROTOCOL = 3
DEFAULT_WORKDIR = Configuration.DEFAULT_WORKDIR
DEFAULT_CONFIG_NAME = Configuration.DEFAULT_CONFIG_NAME
DEFAULT_NARRATION = Configuration.DEFAULT_NARRATION
DEFAULT_MODE = Configuration.DEFAULT_MODE
DEFAULT_CHECKPOINT_FILENAME = "checkpoint.orch.sqlite"
ORCH_FILENAME_TEMPLATE = "{config}{narration}.orch.sqlite"
# the default way to oepn up the whole parent database
DEFAULT_ORCHESTRATION_MODE = 'x'
# mode to open the individual kv stores on the parent database
KV_MODE = 'r+'
# default timeout for connecting to a database
SQLITE3_DEFAULT_TIMEOUT = 5
# the fields to return (and their order) as a record for a run
# query
RUN_SELECT_FIELDS = ('last_cycle_idx', 'config_hash')
def __init__(self, orch_path=None,
mode='x',
append_only=False,
):
self._mode = mode
self._append_only = append_only
# handle the path and convert to a proper URI for the database
# given the path and the mode
self._db_uri = gen_uri(orch_path, mode)
# run table: start_hash, end_hash, num_cycles, configuration_id
# get a raw connection to the database
self._db = sqlite3.connect(self.db_uri, uri=True,
timeout=self.SQLITE3_DEFAULT_TIMEOUT)
self._closed = False
# set isolation level to autocommit
self._db.isolation_level = None
# we can use read_uncommited only in append_only mode (no
# updates) because you never have to worry about dirty reads
# since you can't update
if self.append_only:
self._db.execute("PRAGMA read_uncommited=1")
# we make a table for the run data, if it doesn't already
# exist
c = self._db.cursor().execute(self.create_run_table_query)
# initialize or open each of the separate KV-stores (tables in
# the same SQLite3 database)
# change the mode for the KV stores since we already created the database
# metadata: default init walkers, default apparatus, default
# configuration
self.metadata_kv = KV(db_url=self.db_uri,
table='meta',
mode='a',
value_types=None,
append_only=self.append_only)
# snapshots
self.snapshot_kv = KV(db_url=self.db_uri,
table='snapshots',
primary_key='snaphash',
value_name='snapshot',
mode='a',
append_only=self.append_only)
# configurations
self.configuration_kv = KV(db_url=self.db_uri,
table='configurations',
primary_key='config_hash',
value_name='config',
mode='a',
append_only=self.append_only)
@property
def mode(self):
return self._mode
@property
def append_only(self):
return self._append_only
def close(self):
if self._closed == True:
raise IOError("The database connection is already closed")
else:
# close all the connections
self.metadata_kv.close()
self.configuration_kv.close()
self.snapshot_kv.close()
self._db.close()
self._closed = True
@property
def db_uri(self):
return self._db_uri
@property
def orch_path(self):
# if it is not an in-memory database we parse off the path and
# return that
if self.db_uri == SQLITE3_INMEMORY_URI:
return None
else:
# URIs have the following form: protocol:url?query
# destructure the URI
_, tail = self.db_uri.split(':')
if len(tail.split('?')) > 1:
url, _ = tail.split('?')
else:
url = tail
return url
@classmethod
def serialize(cls, snapshot):
"""Serialize a snapshot to a compressed, encoded, pickle string
representation.
Currently uses the dill module for pickling because the base
pickle module is inadequate. However, it is mostly compatible
and can be read natively with pickle but this usage is
officially not supported. Instead use the deserialize_snapshot.
Also compresses with default zlib compression and is encoded
in base64.
The object will always have a deepcopy performed on it so that
all of the extraneous references to it are avoided since there
is no (AFAIK) way to make sure all references to an object are
deleted.
NOTE: Perhaps there is a way and that should be done (and
tested) to see if it provides stable pickles (i.e. pickles
that always hash to the same value). To avoid the overhead of
copying large objects.
Parameters
----------
snapshot : SimSnapshot object
The snapshot of the simulation you want to serialize.
Returns
-------
serial_str : str
Serialized string of the snapshot object
"""
serial_str = b64encode(
compress(
dill.dumps(
deepcopy(snapshot),
protocol=cls.HASH_PICKLE_PROTOCOL,
recurse=True)
)
)
return serial_str
# core methods for serializing python objects, used for snapshots,
# apparatuses, configurations, and the initial walker list
@classmethod
def deserialize(cls, serial_str):
"""Deserialize an unencoded string snapshot to an object.
Parameters
----------
serial_str : str
Serialized string of the snapshot object
Returns
-------
snapshot : SimSnapshot object
Simulation snapshot object
"""
return dill.loads(decompress(b64decode(serial_str)))
# defaults getters and setters
def set_default_sim_apparatus(self, sim_apparatus):
# serialize the apparatus and then set it
serial_app = self.serialize(sim_apparatus)
self.metadata_kv['default_sim_apparatus'] = serial_app
def set_default_init_walkers(self, init_walkers):
# serialize the apparatus and then set it
serial_walkers = self.serialize(init_walkers)
self.metadata_kv['default_init_walkers'] = serial_walkers
def set_default_configuration(self, configuration):
# serialize the apparatus and then set it
serial_config = self.serialize(configuration)
config_hash = self.hash_snapshot(serial_config)
self.metadata_kv['default_configuration_hash'] = config_hash
self.configuration_kv[config_hash] = serial_config
def set_default_snapshot(self, snapshot):
snaphash = self.add_snapshot(snapshot)
# then save the hash in the metadata
self.metadata_kv['default_snapshot_hash'] = snaphash
return snaphash
def gen_default_snapshot(self):
# generate the snapshot
sim_start_hash = self.gen_start_snapshot(self.get_default_init_walkers())
# then save the hash in the metadata
self.metadata_kv['default_snapshot_hash'] = sim_start_hash
return sim_start_hash
def get_default_sim_apparatus(self):
return self.deserialize(self.metadata_kv['default_sim_apparatus'])
def get_default_init_walkers(self):
return self.deserialize(self.metadata_kv['default_init_walkers'])
def get_default_configuration(self):
config_hash = self.metadata_kv['default_configuration_hash']
return self.get_configuration(config_hash)
def get_default_configuration_hash(self):
return self.metadata_kv['default_configuration_hash']
def get_default_snapshot(self):
start_hash = self.metadata_kv['default_snapshot_hash']
return self.get_snapshot(start_hash)
def get_default_snapshot_hash(self):
return self.metadata_kv['default_snapshot_hash']
@classmethod
def hash_snapshot(cls, serial_str):
"""
Parameters
----------
serial_str :
Returns
-------
"""
return md5(serial_str).hexdigest()
def get_snapshot(self, snapshot_hash):
"""Returns a copy of a snapshot.
Parameters
----------
snapshot_hash :
Returns
-------
"""
return self.deserialize(self.snapshot_kv[snapshot_hash])
def get_configuration(self, config_hash):
"""Returns a copy of a snapshot.
Parameters
----------
config_hash :
Returns
-------
"""
return self.deserialize(self.configuration_kv[config_hash])
@property
def snapshot_hashes(self):
""" """
# iterate over the snapshot kv
return list(self.snapshot_kv.keys())
@property
def configuration_hashes(self):
""" """
# iterate over the snapshot kv
return list(self.configuration_kv.keys())
def add_snapshot(self, snapshot):
"""
Parameters
----------
snapshot :
Returns
-------
"""
# serialize the snapshot using the protocol for doing so
serialized_snapshot = self.serialize(snapshot)
# get the hash of the snapshot
snaphash = self.hash_snapshot(serialized_snapshot)
# check that the hash is not already in the snapshots
if any([True if snaphash == md5 else False for md5 in self.snapshot_hashes]):
# just skip the rest of the function and return the hash
return snaphash
# save the snapshot in the KV store
self.snapshot_kv[snaphash] = serialized_snapshot
return snaphash
def add_serial_snapshot(self, serial_snapshot):
# get the hash of the snapshot
snaphash = self.hash_snapshot(serial_snapshot)
# check that the hash is not already in the snapshots
if any([True if snaphash == md5 else False for md5 in self.snapshot_hashes]):
# just skip the rest of the function and return the hash
return snaphash
# save the snapshot in the KV store
self.snapshot_kv[snaphash] = serial_snapshot
return snaphash
def gen_start_snapshot(self, init_walkers):
"""
Parameters
----------
init_walkers :
Returns
-------
"""
# make a SimSnapshot object using the initial walkers and
start_snapshot = SimSnapshot(init_walkers, self.get_default_sim_apparatus())
# save the snapshot, and generate its hash
sim_start_md5 = self.add_snapshot(start_snapshot)
return sim_start_md5
@property
def default_snapshot_hash(self):
""" """
return self.metadata_kv['default_snapshot_hash']
@property
def default_snapshot(self):
""" """
return self.get_snapshot(self.default_snapshot_hash)
def snapshot_registered(self, snapshot):
"""Check whether a snapshot is already in the database, based on the
hash of it.
This serializes the snapshot so may be slow.
Parameters
----------
snapshot : SimSnapshot object
The snapshot object you want to query for.
Returns
-------
"""
# serialize and hash the snapshot
snaphash = self.hash_snapshot(self.serialize(snapshot))
# then check it
return self.snapshot_hash_registered(snaphash)
def snapshot_hash_registered(self, snapshot_hash):
"""Check whether a snapshot hash is already in the database.
Parameters
----------
snapshot_hash : str
The string hash of the snapshot.
Returns
-------
"""
if any([True if snapshot_hash == h else False for h in self.snapshot_hashes]):
return True
else:
return False
def configuration_hash_registered(self, config_hash):
"""Check whether a snapshot hash is already in the database.
Parameters
----------
snapshot_hash : str
The string hash of the snapshot.
Returns
-------
"""
if any([True if config_hash == h else False for h in self.configuration_hashes]):
return True
else:
return False
### run methods
def add_configuration(self, configuration):
serialized_config = self.serialize(configuration)
config_hash = self.hash_snapshot(serialized_config)
# check that the hash is not already in the snapshots
if any([True if config_hash == md5 else False for md5 in self.configuration_hashes]):
# just skip the rest of the function and return the hash
return config_hash
# save the snapshot in the KV store
self.configuration_kv[config_hash] = serialized_config
return config_hash
def add_serial_configuration(self, serial_configuration):
# get the hash of the configuration
snaphash = self.hash_snapshot(serial_configuration)
# check that the hash is not already in the configurations
if any([True if snaphash == md5 else False for md5 in self.configuration_hashes]):
# just skip the rest of the function and return the hash
return snaphash
# save the configuration in the KV store
self.configuration_kv[snaphash] = serial_configuration
return snaphash
@property
def create_run_table_query(self):
create_run_table_query = """
CREATE TABLE IF NOT EXISTS runs
(start_hash TEXT NOT NULL,
end_hash TEXT NOT NULL,
config_hash NOT NULL,
last_cycle_idx INTEGER NOT NULL,
PRIMARY KEY (start_hash, end_hash))
"""
return create_run_table_query
@property
def add_run_record_query(self):
add_run_row_query = """
INSERT INTO runs (start_hash, end_hash, config_hash, last_cycle_idx)
VALUES (?, ?, ?, ?)
"""
return add_run_row_query
@property
def update_run_record_query(self):
q = """
UPDATE runs
SET config_hash = ?,
last_cycle_idx = ?
WHERE start_hash=? AND end_hash=?
"""
return q
@property
def delete_run_record_query(self):
q = """
DELETE FROM runs
WHERE start_hash=? AND end_hash=?
"""
return q
def _add_run_record(self, start_hash, end_hash, configuration_hash, cycle_idx):
params = (start_hash, end_hash, configuration_hash, cycle_idx)
# do it as a transaction
c = self._db.cursor()
# run the insert
c.execute(self.add_run_record_query, params)
def _delete_run_record(self, start_hash, end_hash):
params = (start_hash, end_hash)
cursor = self._db.cursor()
cursor.execute(self.delete_run_record_query, params)
def _update_run_record(self, start_hash, end_hash, new_config_hash, new_last_cycle_idx):
params = (new_config_hash, new_last_cycle_idx, start_hash, end_hash)
# do it as a transaction
c = self._db.cursor()
# run the update
c.execute(self.update_run_record_query, params)
def register_run(self, start_hash, end_hash, config_hash, cycle_idx):
"""
Parameters
----------
start_hash :
end_hash :
config_hash :
cycle_idx : int
The cycle of the simulation run the checkpoint was generated for.
Returns
-------
"""
# check that the hashes are for snapshots in the orchestrator
# if one is not registered raise an error
if not self.snapshot_hash_registered(start_hash):
raise OrchestratorError(
"snapshot start_hash {} is not registered with the orchestrator".format(
start_hash))
if not self.snapshot_hash_registered(end_hash):
raise OrchestratorError(
"snapshot end_hash {} is not registered with the orchestrator".format(
end_hash))
if not self.configuration_hash_registered(config_hash):
raise OrchestratorError(
"config hash {} is not registered with the orchestrator".format(
config_hash))
# save the configuration and get it's id
self._add_run_record(start_hash, end_hash, config_hash, cycle_idx)
def get_run_records(self):
get_run_record_query = """
SELECT *
FROM runs
""".format(fields=', '.join(self.RUN_SELECT_FIELDS))
cursor = self._db.cursor()
cursor.execute(get_run_record_query)
records = cursor.fetchall()
return records
def get_run_record(self, start_hash, end_hash):
get_run_record_query = """
SELECT {fields}
FROM runs
WHERE start_hash=? AND end_hash=?
""".format(fields=', '.join(self.RUN_SELECT_FIELDS))
params = (start_hash, end_hash)
cursor = self._db.cursor()
cursor.execute(get_run_record_query, params)
record = cursor.fetchone()
return record
def run_last_cycle_idx(self, start_hash, end_hash):
record = self.get_run_record(start_hash, end_hash)
last_cycle_idx = record[self.RUN_SELECT_FIELDS.index('last_cycle_idx')]
return last_cycle_idx
def run_configuration(self, start_hash, end_hash):
record = self.get_run_record(start_hash, end_hash)
config_hash = record[self.RUN_SELECT_FIELDS.index('config_hash')]
# get the configuration object and deserialize it
return self.deserialize(self.configuration_kv[config_hash])
def run_configuration_hash(self, start_hash, end_hash):
record = self.get_run_record(start_hash, end_hash)
config_hash = record[self.RUN_SELECT_FIELDS.index('config_hash')]
return config_hash
def run_hashes(self):
return [(rec[0], rec[1]) for rec in self.get_run_records()]
def run_continues(self, start_hash, end_hash):
"""Given a start hash and end hash for a run, find the run that this
continues.
Parameters
----------
start_hash :
end_hash :
Returns
-------
run_id
"""
# loop through the runs in this orchestrator until we find one
# where the start_hash matches the end hash
runs = self.run_hashes()
run_idx = 0
while True:
run_start_hash, run_end_hash = runs[run_idx]
# if the start hash of the queried run is the same as the
# end hash for this run we have found it
if start_hash == run_end_hash:
return (run_start_hash, run_end_hash)
run_idx += 1
# if the index is over the number of runs we quit and
# return None as no match
if run_idx >= len(runs):
return None
def _init_checkpoint_db(self, start_hash, configuration, checkpoint_dir, mode='x'):
logging.debug("Initializing checkpoint orch database")
# make the checkpoint with the default filename at the checkpoint directory
checkpoint_path = osp.join(checkpoint_dir, self.DEFAULT_CHECKPOINT_FILENAME)
# create a new database in the mode specified
logging.debug("Creating checkpoint database")
checkpoint_orch = Orchestrator(checkpoint_path, mode=mode)
# add the starting snapshot, bypassing the serialization stuff
logging.debug("Setting the starting snapshot")
checkpoint_orch.snapshot_kv[start_hash] = self.snapshot_kv[start_hash]
# if we have a new configuration at runtime serialize and
# hash it
serialized_config = self.serialize(configuration)
config_hash = self.hash_snapshot(serialized_config)
# save the configuration as well
checkpoint_orch.configuration_kv[config_hash] = serialized_config
checkpoint_orch.close()
logging.debug("closing connection to checkpoint database")
return checkpoint_path, config_hash
def _save_checkpoint(self, checkpoint_snapshot, config_hash,
checkpoint_db_path, cycle_idx,
):
"""
Parameters
----------
checkpoint_snapshot :
config_hash :
checkpoint_db_path :
mode :
(Default value = 'wb')
Returns
-------
"""
# orchestrator wrapper to the db
logging.debug("Opening the checkpoint orch database")
checkpoint_orch = Orchestrator(checkpoint_db_path, mode='r+')
# connection to the db
cursor = checkpoint_orch._db.cursor()
# we replicate the code for adding the snapshot here because
# we want it to occur transactionally the delete and add
# serialize the snapshot using the protocol for doing so
serialized_snapshot = self.serialize(checkpoint_snapshot)
# get the hash of the snapshot
snaphash = self.hash_snapshot(serialized_snapshot)
# the queries for deleting and inserting the new run record
delete_query = """
DELETE FROM runs
WHERE start_hash=?
AND end_hash=?
"""
insert_query = """
INSERT INTO runs (start_hash, end_hash, config_hash, last_cycle_idx)
VALUES (?, ?, ?, ?)
"""
# if there are any runs in the checkpoint orch remove the
# final snapshot
delete_params = None
if len(checkpoint_orch.run_hashes()) > 0:
start_hash, old_checkpoint_hash = checkpoint_orch.run_hashes()[0]
delete_params = (start_hash, old_checkpoint_hash)
else:
start_hash = list(checkpoint_orch.snapshot_kv.keys())[0]
# the config should already be in the orchestrator db
insert_params = (start_hash, snaphash, config_hash, cycle_idx)
# start this whole process as a transaction so we don't get
# something weird in between
logging.debug("Starting transaction for updating run table in checkpoint")
cursor.execute("BEGIN TRANSACTION")
# add the new one, using a special method for setting inside
# of a transaction
logging.debug("setting the new checkpoint snapshot into the KV")
cursor = checkpoint_orch.snapshot_kv.set_in_tx(cursor, snaphash, serialized_snapshot)
logging.debug("finished")
# if we need to delete the old end of the run snapshot and the
# run record for it
if delete_params is not None:
logging.debug("Old run record needs to be removed")
# remove the old run from the run table
logging.debug("Deleting the old run record")
cursor.execute(delete_query, delete_params)
logging.debug("finished")
# register the new run in the run table
logging.debug("Inserting the new run record")
cursor.execute(insert_query, insert_params)
logging.debug("finished")
# end the transaction
logging.debug("Finishing transaction")
cursor.execute("COMMIT")
logging.debug("Transaction committed")
# we do the removal of the old snapshot outside of the
# transaction since it is slow and can cause timeouts to
# occur. Furthermore, it is okay if it is in the checkpoint as
# the run record is what matters as long as the new checkpoint
# is there.
# delete the old snapshot if we need to
if delete_params is not None:
# WARN: occasionally and for unknown reasons we have found
# that the final checkpoint hash is the same as the one
# before. (The case where the last snapshot is on the same
# cycle as a backup is already covered). So as a last
# resort, we check that they don't have the same hash. If
# they do we don't delete it!
if snaphash != old_checkpoint_hash:
logging.debug("Deleting the old snapshot")
del checkpoint_orch.snapshot_kv[old_checkpoint_hash]
logging.debug("finished")
else:
logging.warn("Final snapshot has same hash as the previous checkpoint. Not deleting the previous one.")
checkpoint_orch.close()
logging.debug("closed the checkpoint orch connection")
@staticmethod
def gen_sim_manager(start_snapshot, configuration):
"""
Parameters
----------
start_snapshot :
configuration :
Returns
-------
"""
# construct the sim manager, in a wepy specific way
sim_manager = Manager(start_snapshot.walkers,
runner=start_snapshot.apparatus.filters[0],
boundary_conditions=start_snapshot.apparatus.filters[1],
resampler=start_snapshot.apparatus.filters[2],
# configuration options
work_mapper=configuration.work_mapper,
reporters=configuration.reporters,
sim_monitor=configuration.monitor,
)
return sim_manager
def run_snapshot_by_time(self, start_hash, run_time, n_steps,
checkpoint_freq=None,
checkpoint_dir=None,
configuration=None,
configuration_hash=None,
checkpoint_mode='x'):
"""For a finished run continue it but resetting all the state of the
resampler and boundary conditions
Parameters
----------
start_hash :
run_time :
n_steps :
checkpoint_freq :
(Default value = None)
checkpoint_dir :
(Default value = None)
configuration :
(Default value = None)
configuration_hash :
(Default value = None)
checkpoint_mode :
(Default value = None)
Returns
-------
"""
# you must have a checkpoint dir if you ask for a checkpoint
# frequency
if checkpoint_freq is not None and checkpoint_dir is None:
raise ValueError("Must provide a directory for the checkpoint file "
"is a frequency is specified")
if configuration_hash is not None and configuration is not None:
raise ValueError("Cannot specify both a hash of an existing configuration"
"and provide a runtime configuration")
# if no configuration was specified we use the default one, oth
elif (configuration is None) and (configuration_hash is None):
configuration = self.get_default_configuration()
# if a configuration hash was given only then we retrieve that
# configuration since we must pass configurations to the
# checkpoint DB initialization
elif configuration_hash is not None:
configuration = self.configuration_kv[configuration_hash]
# check that the directory for checkpoints exists, and create
# it if it doesn't and isn't already created
if checkpoint_dir is not None:
checkpoint_dir = osp.realpath(checkpoint_dir)
os.makedirs(checkpoint_dir, exist_ok=True)
# if the checkpoint dir is not specified don't create a
# checkpoint db orch
checkpoint_db_path = None
if checkpoint_dir is not None:
logging.debug("Initialization of checkpoint database is requested")
checkpoint_db_path, configuration_hash = self._init_checkpoint_db(start_hash,
configuration,
checkpoint_dir,
mode=checkpoint_mode)
logging.debug("finished initializing checkpoint database")
# get the snapshot and the configuration to use for the sim_manager
start_snapshot = self.get_snapshot(start_hash)
# generate the simulation manager given the snapshot and the
# configuration
sim_manager = self.gen_sim_manager(start_snapshot, configuration)
# handle and process the optional arguments for running simulation
if 'runner' in configuration.apparatus_opts:
runner_opts = configuration.apparatus_opts['runner']
else:
runner_opts = None
# run the init subroutine for the simulation manager
logging.debug("Running sim_manager.init")
sim_manager.init()
# run each cycle manually creating checkpoints when necessary
logging.debug("Starting run loop")
walkers = sim_manager.init_walkers
cycle_idx = 0
start_time = time.time()
while time.time() - start_time < run_time:
logging.debug("Running cycle {}".format(cycle_idx))
# run the cycle
walkers, filters = sim_manager.run_cycle(
walkers,
n_steps,
cycle_idx,
runner_opts=runner_opts,
)
# check to see if a checkpoint is necessary
if (checkpoint_freq is not None):
if (cycle_idx % checkpoint_freq == 0):
logging.debug("Checkpoint is required for this cycle")
# make the checkpoint snapshot
logging.debug("Generating the simulation snapshot")
checkpoint_snapshot = SimSnapshot(walkers, SimApparatus(filters))
# save the checkpoint (however that is implemented)
logging.debug("saving the checkpoint to the database")
self._save_checkpoint(checkpoint_snapshot,
configuration_hash,
checkpoint_db_path,
cycle_idx)
logging.debug("finished saving the checkpoint to the database")
# increase the cycle index for the next cycle
cycle_idx += 1
logging.debug("Finished the run cycle")
# the cycle index was set for the next cycle which didn't run
# so we decrement it
last_cycle_idx = cycle_idx - 1
logging.debug("Running sim_manager.cleanup")
# run the cleanup subroutine
sim_manager.cleanup()
# run the segment given the sim manager and run parameters
end_snapshot = SimSnapshot(walkers, SimApparatus(filters))
logging.debug("Run finished")
# return the things necessary for saving to the checkpoint if
# that is what is wanted later on
return end_snapshot, configuration_hash, checkpoint_db_path, last_cycle_idx
def orchestrate_snapshot_run_by_time(self, snapshot_hash, run_time, n_steps,
checkpoint_freq=None,
checkpoint_dir=None,
orchestrator_path=None,
configuration=None,
# these can reparametrize the paths
# for both the orchestrator produced
# files as well as the configuration
work_dir=None,
config_name=None,
narration=None,
mode=None,
# extra kwargs will be passed to the
# configuration.reparametrize method
**kwargs):
"""
Parameters
----------
snapshot_hash :
run_time :
n_steps :
checkpoint_freq :
(Default value = None)
checkpoint_dir :
(Default value = None)
orchestrator_path :
(Default value = None)
configuration :
(Default value = None)
# these can reparametrize the paths# for both the orchestrator produced# files as well as the configurationwork_dir :
(Default value = None)
config_name :
(Default value = None)
narration :
(Default value = None)
mode :
(Default value = None)
# extra kwargs will be passed to the# configuration.reparametrize method**kwargs :
Returns
-------
"""
# for writing the orchestration files we set the default mode
# if mode is not given
if mode is None:
# the orchestrator mode is used for pickling the
# orchestrator and so must be in bytes mode
orch_mode = self.DEFAULT_ORCHESTRATION_MODE
# there are two possible uses for the path reparametrizations:
# the configuration and the orchestrator file paths. If both
# of those are explicitly specified by passing in the whole
# configuration object or both of checkpoint_dir,
# orchestrator_path then those reparametrization kwargs will
# not be used. As this is likely not the intention of the user
# we will raise an error. If there is even one use for them no
# error will be raised.
# first check if any reparametrizations were even requested
parametrizations_requested = (True if work_dir is not None else False,
True if config_name is not None else False,
True if narration is not None else False,
True if mode is not None else False,)
# check if there are any available targets for reparametrization
reparametrization_targets = (True if configuration is None else False,
True if checkpoint_dir is None else False,
True if orchestrator_path is None else False)
# if paramatrizations were requested and there are no targets
# we need to raise an error
if any(parametrizations_requested) and not any(reparametrization_targets):
raise OrchestratorError("Reparametrizations were requested but none are possible,"
" due to all possible targets being already explicitly given")
# if any paths were not given and no defaults for path
# parameters we want to fill in the defaults for them. This
# will also fill in any missing parametrizations with defaults
# we do this by just setting the path parameters if they
# aren't set, then later the parametrization targets will be
# tested for if they have been set or not, and if they haven't
# then these will be used to generate paths for them.
if work_dir is None:
work_dir = self.DEFAULT_WORKDIR
if config_name is None:
config_name = self.DEFAULT_CONFIG_NAME
if narration is None:
narration = self.DEFAULT_NARRATION
if mode is None:
mode = self.DEFAULT_MODE
# if no configuration was specified use the default one
if configuration is None:
configuration = self.get_default_configuration()
# reparametrize the configuration with the given path
# parameters and anything else in kwargs. If they are none
# this will have no effect anyhow
logging.debug("Reparametrizing the configuration")
configuration = configuration.reparametrize(work_dir=work_dir,
config_name=config_name,
narration=narration,
mode=mode,
**kwargs)
# make parametric paths for the checkpoint directory and the
# orchestrator pickle to be made, unless they are explicitly given
if checkpoint_dir is None:
# the checkpoint directory will be in the work dir
logging.debug("checkpoint directory defaulted to the work_dir")
checkpoint_dir = work_dir
logging.debug("In the orchestrate run, calling to run_snapshot by time")
# then actually run the simulation with checkpointing. This
# returns the end snapshot and doesn't write out anything to
# orchestrators other than the checkpointing
(end_snapshot, configuration_hash, checkpoint_db_path, last_cycle_idx) =\
self.run_snapshot_by_time(snapshot_hash, run_time, n_steps,
checkpoint_freq=checkpoint_freq,
checkpoint_dir=checkpoint_dir,
configuration=configuration,
checkpoint_mode=orch_mode)
logging.debug("Finished running snapshot by time")
# if the last cycle in the run was a checkpoint skip this step
# of saving a checkpoint
do_final_checkpoint = True
# make sure the checkpoint_freq is defined before testing it
if checkpoint_freq is not None:
if checkpoint_freq % last_cycle_idx == 0:
logging.debug("Last cycle saved a checkpoint, no need to save one")
do_final_checkpoint = False
if do_final_checkpoint:
logging.debug("Saving a final checkpoint for the end of the run")
# now that it is finished we save the final snapshot to the
# checkpoint file. This is done transactionally using the
# SQLite transaction functionality (either succeeds or doesn't
# happen) that way we don't have worry about data integrity
# loss. Here we also don't have to worry about other processes
# interacting with the checkpoint which makes it isolated.
self._save_checkpoint(end_snapshot, configuration_hash,
checkpoint_db_path, last_cycle_idx)
logging.debug("Finished saving the final checkpoint for the run")
# then return the final orchestrator
logging.debug("Getting a connection to that orch to retun")
checkpoint_orch = Orchestrator(checkpoint_db_path,
mode='r+',
append_only=True)
return checkpoint_orch
def reconcile_orchestrators(host_path, *orchestrator_paths):
"""
Parameters
----------
template_orchestrator :
*orchestrators :
Returns
-------
"""
if not osp.exists(host_path):
assert len(orchestrator_paths) > 1, \
"If the host path is a new orchestrator, must give at least 2 orchestrators to merge."
# open the host orchestrator at the location which will have all
# of the new things put into it from the other orchestrators. If
# it doesn't already exist it will be created otherwise open
# read-write.
new_orch = Orchestrator(orch_path=host_path,
mode='a',
append_only=True)
# TODO deprecate, if there is no defaults we can't set them since
# the mode is append only, we don't really care about these so
# don't set them, otherwise do some mode logic to figure this out
# and open in write mode and set defaults, then change to append
# only
# # if this is an existing orchestrator copy the default
# # sim_apparatus and init_walkers
# try:
# default_app = new_orch.get_default_sim_apparatus()
# except KeyError:
# # no default apparatus, that is okay
# pass
# else:
# # set it
# new_orch.set_default_sim_apparatus(default_app)
# # same for the initial walkers
# try:
# default_walkers = new_orch.get_default_init_walkers()
# except KeyError:
# # no default apparatus, that is okay
# pass
# else:
# # set it
# new_orch.set_default_sim_apparatus(default_walkers)
for orch_path in orchestrator_paths:
# open it in read-write fail if doesn't exist
orch = Orchestrator(orch_path=orch_path,
mode='r+',
append_only=True)
# add in all snapshots from each orchestrator, by the hash not the
# snapshots themselves, we trust they are correct
for snaphash in orch.snapshot_hashes:
# check that the hash is not already in the snapshots
if any([True if snaphash == md5 else False for md5 in new_orch.snapshot_hashes]):
# skip it and move on
continue
# if it is not copy it over without deserializing
new_orch.snapshot_kv[snaphash] = orch.snapshot_kv[snaphash]
# add in the configurations for the runs from each
# orchestrator, by the hash not the snapshots themselves, we
# trust they are correct
for run_id in orch.run_hashes():
config_hash = orch.run_configuration_hash(*run_id)
# check that the hash is not already in the snapshots
if any([True if config_hash == md5 else False for md5 in new_orch.configuration_hashes]):
# skip it and move on
continue
# if it is not set it
new_orch.configuration_kv[config_hash] = orch.configuration_kv[config_hash]
# concatenate the run table with an SQL union from an attached
# database
attached_table_name = "other"
# query to attach the foreign database
attach_query = """
ATTACH '{}' AS {}
""".format(orch_path, attached_table_name)
# query to update the runs tabel with new unique runs
union_query = """
INSERT INTO runs
SELECT * FROM (
SELECT * FROM {}.runs
EXCEPT
SELECT * FROM runs
)
""".format(attached_table_name)
# query to detach the table
detach_query = """
DETACH {}
""".format(attached_table_name)
# then run the queries
cursor = new_orch._db.cursor()
try:
cursor.execute('BEGIN TRANSACTION')
cursor.execute(attach_query)
cursor.execute(union_query)
cursor.execute('COMMIT')
cursor.execute(detach_query)
except:
cursor.execute('COMMIT')
import pdb; pdb.set_trace()
cursor.execute("SELECT * FROM (SELECT * FROM other.runs EXCEPT SELECT * FROM runs)")
recs = cursor.fetchall()
return new_orch
| 32.319713
| 125
| 0.595883
| 5,164
| 45,086
| 5.030015
| 0.114253
| 0.018364
| 0.010626
| 0.014167
| 0.334052
| 0.272608
| 0.227911
| 0.196843
| 0.154533
| 0.144485
| 0
| 0.001885
| 0.341148
| 45,086
| 1,394
| 126
| 32.342898
| 0.872547
| 0.308455
| 0
| 0.23689
| 0
| 0
| 0.124179
| 0.009513
| 0
| 0
| 0
| 0.000717
| 0.001808
| 1
| 0.099458
| false
| 0.001808
| 0.028933
| 0.014467
| 0.24774
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
911d2626da51dec7964f3f20d1a80f93b2a0e8f3
| 2,681
|
py
|
Python
|
src/generate_class_specific_samples.py
|
HesterLim/pytorch-cnn-visualizations
|
59ddf0ef6ea2c9d4d69c1ac6b260cb399867d178
|
[
"MIT"
] | 6,725
|
2017-10-25T08:00:25.000Z
|
2022-03-31T15:25:46.000Z
|
src/generate_class_specific_samples.py
|
woojoo99/pytorch-cnn-visualizations
|
16eddfa055a9c618ba548e9fb4529e2ccbc79c35
|
[
"MIT"
] | 105
|
2017-11-26T11:59:24.000Z
|
2022-01-11T01:37:00.000Z
|
src/generate_class_specific_samples.py
|
woojoo99/pytorch-cnn-visualizations
|
16eddfa055a9c618ba548e9fb4529e2ccbc79c35
|
[
"MIT"
] | 1,419
|
2017-10-25T08:00:27.000Z
|
2022-03-30T08:28:35.000Z
|
"""
Created on Thu Oct 26 14:19:44 2017
@author: Utku Ozbulak - github.com/utkuozbulak
"""
import os
import numpy as np
import torch
from torch.optim import SGD
from torchvision import models
from misc_functions import preprocess_image, recreate_image, save_image
class ClassSpecificImageGeneration():
"""
Produces an image that maximizes a certain class with gradient ascent
"""
def __init__(self, model, target_class):
self.mean = [-0.485, -0.456, -0.406]
self.std = [1/0.229, 1/0.224, 1/0.225]
self.model = model
self.model.eval()
self.target_class = target_class
# Generate a random image
self.created_image = np.uint8(np.random.uniform(0, 255, (224, 224, 3)))
# Create the folder to export images if not exists
if not os.path.exists('../generated/class_'+str(self.target_class)):
os.makedirs('../generated/class_'+str(self.target_class))
def generate(self, iterations=150):
"""Generates class specific image
Keyword Arguments:
iterations {int} -- Total iterations for gradient ascent (default: {150})
Returns:
np.ndarray -- Final maximally activated class image
"""
initial_learning_rate = 6
for i in range(1, iterations):
# Process image and return variable
self.processed_image = preprocess_image(self.created_image, False)
# Define optimizer for the image
optimizer = SGD([self.processed_image], lr=initial_learning_rate)
# Forward
output = self.model(self.processed_image)
# Target specific class
class_loss = -output[0, self.target_class]
if i % 10 == 0 or i == iterations-1:
print('Iteration:', str(i), 'Loss',
"{0:.2f}".format(class_loss.data.numpy()))
# Zero grads
self.model.zero_grad()
# Backward
class_loss.backward()
# Update image
optimizer.step()
# Recreate image
self.created_image = recreate_image(self.processed_image)
if i % 10 == 0 or i == iterations-1:
# Save image
im_path = '../generated/class_'+str(self.target_class)+'/c_'+str(self.target_class)+'_'+'iter_'+str(i)+'.png'
save_image(self.created_image, im_path)
return self.processed_image
if __name__ == '__main__':
target_class = 130 # Flamingo
pretrained_model = models.alexnet(pretrained=True)
csig = ClassSpecificImageGeneration(pretrained_model, target_class)
csig.generate()
| 34.818182
| 125
| 0.613577
| 322
| 2,681
| 4.934783
| 0.425466
| 0.069226
| 0.056639
| 0.052863
| 0.085588
| 0.085588
| 0.025173
| 0.025173
| 0
| 0
| 0
| 0.037902
| 0.281611
| 2,681
| 76
| 126
| 35.276316
| 0.787124
| 0.218202
| 0
| 0.051282
| 0
| 0
| 0.049205
| 0
| 0.025641
| 0
| 0
| 0
| 0
| 1
| 0.051282
| false
| 0
| 0.153846
| 0
| 0.25641
| 0.025641
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
911d31b9a8a7937bf3f3cbbfb6a83e53d58e13d7
| 16,673
|
py
|
Python
|
sumo/tools/net/visum_mapDistricts.py
|
iltempe/osmosi
|
c0f54ecdbb7c7b5602d587768617d0dc50f1d75d
|
[
"MIT"
] | null | null | null |
sumo/tools/net/visum_mapDistricts.py
|
iltempe/osmosi
|
c0f54ecdbb7c7b5602d587768617d0dc50f1d75d
|
[
"MIT"
] | null | null | null |
sumo/tools/net/visum_mapDistricts.py
|
iltempe/osmosi
|
c0f54ecdbb7c7b5602d587768617d0dc50f1d75d
|
[
"MIT"
] | 2
|
2017-12-14T16:41:59.000Z
|
2020-10-16T17:51:27.000Z
|
#!/usr/bin/env python
"""
@file visum_mapDistricts.py
@author Daniel Krajzewicz
@author Michael Behrisch
@date 2007-10-25
@version $Id$
This script reads a network and a dump file and
draws the network, coloring it by the values
found within the dump-file.
SUMO, Simulation of Urban MObility; see http://sumo.dlr.de/
Copyright (C) 2008-2017 DLR (http://www.dlr.de/) and contributors
This file is part of SUMO.
SUMO is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import math
from optparse import OptionParser
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import sumolib.net
import netshiftadaptor
def computeDistance(n1, n2):
xd = n1._coord[0] - n2._coord[0]
yd = n1._coord[1] - n2._coord[1]
return math.sqrt(xd * xd + yd * yd)
def relAngle(angle1, angle2):
angle2 -= angle1
if angle2 > 180:
angle2 = (360. - angle2) * -1.
while angle2 < -180:
angle2 = 360 + angle2
return angle2
# initialise
optParser = OptionParser()
optParser.add_option("-v", "--verbose", action="store_true", dest="verbose",
default=False, help="tell me what you are doing")
# i/o
optParser.add_option("-1", "--net1", dest="net1",
help="SUMO network to use (mandatory)", metavar="FILE")
optParser.add_option("-2", "--net2", dest="net2",
help="SUMO network to use (mandatory)", metavar="FILE")
optParser.add_option("-a", "--nodes1", dest="nodes1",
help="The first matching nodes", metavar="NODELIST")
optParser.add_option("-b", "--nodes2", dest="nodes2",
help="The second matching nodes", metavar="NODELIST")
# parse options
(options, args) = optParser.parse_args()
# read networks
if options.verbose:
print("Reading net#1...")
net1 = sumolib.net.readNet(options.net1)
if options.verbose:
print("Reading net#2...")
net2 = sumolib.net.readNet(options.net2)
# reproject the visum net onto the navteq net
adaptor = netshiftadaptor.NetShiftAdaptor(
net1, net2, options.nodes1.split(","), options.nodes2.split(","))
adaptor.reproject(options.verbose)
# build a speed-up grid
xmin = 100000
xmax = -100000
ymin = 100000
ymax = -100000
for n in net1._nodes:
xmin = min(xmin, n._coord[0])
xmax = max(xmax, n._coord[0])
ymin = min(ymin, n._coord[1])
ymax = max(ymax, n._coord[1])
for n in net2._nodes:
xmin = min(xmin, n._coord[0])
xmax = max(xmax, n._coord[0])
ymin = min(ymin, n._coord[1])
ymax = max(ymax, n._coord[1])
xmin = xmin - .1
xmax = xmax + .1
ymin = ymin - .1
ymax = ymax + .1
CELLSIZE = 100
arr1 = []
arr2 = []
for y in range(0, CELLSIZE):
arr1.append([])
arr2.append([])
for x in range(0, CELLSIZE):
arr1[-1].append([])
arr2[-1].append([])
cw = (xmax - xmin) / float(CELLSIZE)
ch = (ymax - ymin) / float(CELLSIZE)
for n in net2._nodes:
cx = (n._coord[0] - xmin) / cw
cy = (n._coord[1] - ymin) / ch
arr1[int(cy)][int(cx)].append(n)
for n in net1._nodes:
cx = (n._coord[0] - xmin) / cw
cy = (n._coord[1] - ymin) / ch
arr2[int(cy)][int(cx)].append(n)
# map
nmap1to2 = {}
nmap2to1 = {}
nodes1 = net2._nodes
nodes2 = net1._nodes
highwayNodes2 = set()
highwaySinks2 = set()
highwaySources2 = set()
urbanNodes2 = set()
for n2 in nodes2:
noIncoming = 0
noOutgoing = 0
for e in n2._outgoing:
if e.getSpeed() > 80. / 3.6 and e.getSpeed() < 99:
highwayNodes2.add(n2)
if e.getSpeed() < 99:
noOutgoing = noOutgoing + 1
for e in n2._incoming:
if e.getSpeed() > 80. / 3.6 and e.getSpeed() < 99:
highwayNodes2.add(n2)
if e.getSpeed() < 99:
noIncoming = noIncoming + 1
if n2 in highwayNodes2:
if noOutgoing == 0:
highwaySinks2.add(n2)
if noIncoming == 0:
highwaySources2.add(n2)
else:
urbanNodes2.add(n2)
print("Found " + str(len(highwaySinks2)) + " highway sinks in net2")
cont = ""
for n in highwaySinks2:
cont = cont + n._id + ", "
print(cont)
cont = ""
print("Found " + str(len(highwaySources2)) + " highway sources in net2")
for n in highwaySources2:
cont = cont + n._id + ", "
print(cont)
fdd = open("dconns.con.xml", "w")
fdd.write("<connections>\n")
highwaySinks1 = set()
highwaySources1 = set()
origDistrictNodes = {}
nnn = {}
for n1 in nodes1:
if n1._id.find('-', 1) < 0:
continue
# if n1._id.find("38208387")<0:
# continue
un1 = None
for e in n1._outgoing:
un1 = e._to
for e in n1._incoming:
un1 = e._from
d = n1._id[:n1._id.find('-', 1)]
if d[0] == '-':
d = d[1:]
if d not in origDistrictNodes:
origDistrictNodes[d] = []
if options.verbose:
print("District: " + d)
isHighwayNode = False
isHighwaySink = False
isHighwaySource = False
noIncoming = 0
noOutgoing = 0
noInConns = 0
noOutConns = 0
for e in un1._outgoing:
if e.getSpeed() > 80. / 3.6 and e.getSpeed() < 99:
isHighwayNode = True
if e.getSpeed() < 99:
noOutgoing = noOutgoing + 1
if e.getSpeed() > 99:
noOutConns = noOutConns + 1
for e in un1._incoming:
if e.getSpeed() > 80. / 3.6 and e.getSpeed() < 99:
isHighwayNode = True
if e.getSpeed() < 99:
noIncoming = noIncoming + 1
if e.getSpeed() > 99:
noInConns = noInConns + 1
if options.verbose:
print("Check", un1._id, noOutgoing, noIncoming)
if isHighwayNode:
if noOutgoing == 0:
highwaySinks1.add(n1)
isHighwaySink = True
if noIncoming == 0:
highwaySources1.add(n1)
isHighwaySource = True
# the next is a hack for bad visum-networks
if noIncoming == 1 and noOutgoing == 1 and noInConns == 1 and noOutConns == 1:
highwaySinks1.add(n1)
isHighwaySink = True
highwaySources1.add(n1)
isHighwaySource = True
best = None
bestDist = -1
check = urbanNodes2
if n1 in highwaySinks1:
check = highwaySinks2
elif n1 in highwaySources1:
check = highwaySources2
elif isHighwayNode:
check = highwayNodes2
for n2 in check:
dist = computeDistance(un1, n2)
if bestDist == -1 or bestDist > dist:
best = n2
bestDist = dist
if best:
nnn[best] = n1
if d not in nmap1to2:
nmap1to2[d] = []
if best not in nmap1to2[d]:
nmap1to2[d].append(best)
if best not in nmap2to1:
nmap2to1[best] = []
if n1 not in nmap2to1[best]:
nmap2to1[best].append(n1)
if options.verbose:
print("a: " + d + "<->" + best._id)
if best not in origDistrictNodes[d]:
origDistrictNodes[d].append(best)
preBest = best
best = None
bestDist = -1
check = []
if n1 in highwaySinks1 or preBest in highwaySinks2:
check = highwaySources2
elif n1 in highwaySources1 or preBest in highwaySources2:
check = highwaySinks2
elif isHighwayNode:
check = highwayNodes2
for n2 in check:
dist = computeDistance(un1, n2)
if (bestDist == -1 or bestDist > dist) and n2 != preBest:
best = n2
bestDist = dist
if best:
nnn[best] = n1
if d not in nmap1to2:
nmap1to2[d] = []
if best not in nmap1to2[d]:
nmap1to2[d].append(best)
if best not in nmap2to1:
nmap2to1[best] = []
if n1 not in nmap2to1[best]:
nmap2to1[best].append(n1)
print("b: " + d + "<->" + best._id)
if best not in origDistrictNodes[d]:
origDistrictNodes[d].append(best)
if options.verbose:
print("Found " + str(len(highwaySinks1)) + " highway sinks in net1")
for n in highwaySinks1:
print(n._id)
print("Found " + str(len(highwaySources1)) + " highway sources in net1")
for n in highwaySources1:
print(n._id)
connectedNodesConnections = {}
for d in nmap1to2:
for n2 in nmap1to2[d]:
if n2 in connectedNodesConnections:
continue
n1i = net1.addNode("i" + n2._id, nnn[n2]._coord)
n1o = net1.addNode("o" + n2._id, nnn[n2]._coord)
haveIncoming = False
incomingLaneNo = 0
for e in n2._incoming:
if e._id[0] != "i" and e._id[0] != "o":
haveIncoming = True
incomingLaneNo = incomingLaneNo + e.getLaneNumber()
haveOutgoing = False
outgoingLaneNo = 0
for e in n2._outgoing:
if e._id[0] != "i" and e._id[0] != "o":
haveOutgoing = True
outgoingLaneNo = outgoingLaneNo + e.getLaneNumber()
if haveIncoming:
e1 = net1.addEdge("o" + n2._id, n2._id, n1o._id, -2)
if haveOutgoing:
net1.addLane(e1, 20, 100.)
else:
for i in range(0, incomingLaneNo):
net1.addLane(e1, 20, 100.)
if len(n2._incoming) == 1:
fdd.write(' <connection from="' + n2._incoming[
0]._id + '" to="' + e1._id + '" lane="' + str(i) + ':' + str(i) + '"/>\n')
if haveOutgoing:
if options.verbose:
print("has outgoing")
e2 = net1.addEdge("i" + n2._id, n1i._id, n2._id, -2)
if haveIncoming:
net1.addLane(e2, 20, 100.)
else:
for i in range(0, outgoingLaneNo):
net1.addLane(e2, 20, 100.)
if len(n2._outgoing) == 1:
fdd.write(' <connection from="' + e2._id + '" to="' +
n2._outgoing[0]._id + '" lane="' + str(i) + ':' + str(i) + '"/>\n')
connectedNodesConnections[n2] = [n1i, n1o]
newDistricts = {}
districtSources = {}
districtSinks = {}
mappedDistrictNodes = {}
connNodes = {}
dRemap = {}
for d in nmap1to2:
newDistricts[d] = []
if len(nmap1to2[d]) == 1:
n = nmap1to2[d][0]
if n in dRemap:
districtSources[d] = districtSources[dRemap[n]]
districtSinks[d] = districtSinks[dRemap[n]]
newDistricts[d] = []
newDistricts[d].append(n._id)
continue
else:
dRemap[n] = d
[ni, no] = connectedNodesConnections[n]
if len(ni._outgoing) > 0:
districtSources[d] = ni._outgoing[0]._id
if len(no._incoming) > 0:
districtSinks[d] = no._incoming[0]._id
fdd.write(' <connection from="' + no._incoming[0]._id + '"/>\n')
else:
incomingLaneNoG = 0
outgoingLaneNoG = 0
for n in nmap1to2[d]:
for e in n._incoming:
if e._id[0] != "i" and e._id[0] != "o":
incomingLaneNoG = incomingLaneNoG + e.getLaneNumber()
for e in n._outgoing:
if e._id[0] != "i" and e._id[0] != "o":
outgoingLaneNoG = outgoingLaneNoG + e.getLaneNumber()
p1 = [0, 0]
p11 = [0, 0]
p12 = [0, 0]
p2 = [0, 0]
for n in nmap1to2[d]:
p1[0] = p1[0] + n._coord[0]
p1[1] = p1[1] + n._coord[1]
p2[0] = p2[0] + nnn[n]._coord[0]
p2[1] = p2[1] + nnn[n]._coord[1]
p2[0] = (p1[0] + p2[0]) / float(len(origDistrictNodes[d]) * 2)
p2[1] = (p1[1] + p2[1]) / float(len(origDistrictNodes[d]) * 2)
dn2i = net1.addNode("cci" + d, p2)
dn2o = net1.addNode("cci" + d, p2)
p11[0] = p1[0] / float(len(origDistrictNodes[d]))
p11[1] = p1[1] / float(len(origDistrictNodes[d]))
dn1o = net1.addNode("co" + d, p11)
e1 = net1.addEdge("co" + d, dn1o._id, dn2o._id, -2)
for i in range(0, incomingLaneNoG):
net1.addLane(e1, 22, 100.)
districtSinks[d] = e1._id
p12[0] = p1[0] / float(len(origDistrictNodes[d]))
p12[1] = p1[1] / float(len(origDistrictNodes[d]))
dn1i = net1.addNode("ci" + d, p12)
e2 = net1.addEdge("ci" + d, dn2i._id, dn1i._id, -2)
for i in range(0, outgoingLaneNoG):
net1.addLane(e2, 21, 100.)
districtSources[d] = e2._id
runningOutLaneNumber = 0
runningInLaneNumber = 0
for n2 in nmap1to2[d]:
[ni, no] = connectedNodesConnections[n2]
print("In: " + ni._id + " " + str(len(ni._incoming)) +
" " + str(len(ni._outgoing)))
print("Out: " + no._id + " " + str(len(no._incoming)) +
" " + str(len(no._outgoing)))
if len(no._incoming) > 0:
incomingLaneNo = 0
for e in n2._incoming:
if e._id[0] != "i" and e._id[0] != "o":
incomingLaneNo = incomingLaneNo + e.getLaneNumber()
e1 = net1.addEdge("o" + d + "#" + n2._id, no._id, dn1o._id, -2)
for i in range(0, incomingLaneNo):
net1.addLane(e1, 19, 100.)
fdd.write(' <connection from="' + "o" + d + "#" + n2._id + '" to="' + dn1o._outgoing[
0]._id + '" lane="' + str(i) + ':' + str(runningOutLaneNumber) + '"/>\n')
runningOutLaneNumber = runningOutLaneNumber + 1
fdd.write(
' <connection from="' + dn1o._outgoing[0]._id + '"/>\n')
if incomingLaneNo == 0:
net1.addLane(e1, 19, 100.)
runningOutLaneNumber = runningOutLaneNumber + 1
if len(ni._outgoing) > 0:
outgoingLaneNo = 0
for e in n2._outgoing:
if e._id[0] != "i" and e._id[0] != "o":
outgoingLaneNo = outgoingLaneNo + e.getLaneNumber()
e2 = net1.addEdge("i" + d + "#" + n2._id, dn1i._id, ni._id, -2)
for i in range(0, outgoingLaneNo):
net1.addLane(e2, 18, 100.)
fdd.write(' <connection from="' + dn1i._incoming[
0]._id + '" to="' + "i" + d + "#" + n2._id + '" lane="' + str(runningInLaneNumber) + ':' + str(i) + '"/>\n')
runningInLaneNumber = runningInLaneNumber + 1
if outgoingLaneNo == 0:
net1.addLane(e2, 18, 100.)
runningInLaneNumber = runningInLaneNumber + 1
fd = open("districts.xml", "w")
fd.write("<tazs>\n")
for d in newDistricts:
fd.write(' <taz id="' + d + '">\n')
if d in districtSources:
fd.write(
' <tazSource id="' + districtSources[d] + '" weight="1"/>\n')
if d in districtSinks:
fd.write(
' <tazSink id="' + districtSinks[d] + '" weight="1"/>\n')
fd.write(' </taz>\n')
fd.write("</tazs>\n")
fd.close()
def writeNode(fd, node):
fd.write(" <node id=\"" + node._id + "\" x=\"" +
str(node._coord[0]) + "\" y=\"" + str(node._coord[1]) + "\"/>\n")
def writeEdge(fd, edge, withGeom=True):
fd.write(" <edge id=\"" + edge._id + "\" fromNode=\"" +
edge._from._id + "\" toNode=\"" + edge._to._id)
fd.write("\" speed=\"" + str(edge._speed))
fd.write("\" priority=\"" + str(edge._priority))
if withGeom:
fd.write("\" spreadType=\"center")
fd.write("\" numLanes=\"" + str(len(edge._lanes)) + "\"")
shape = edge.getShape()
if withGeom:
fd.write(" shape=\"")
for i, c in enumerate(shape):
if i != 0:
fd.write(" ")
fd.write(str(c[0]) + "," + str(c[1]))
fd.write("\"")
fd.write("/>\n")
def writeNodes(net):
fd = open("nodes.xml", "w")
fd.write("<nodes>\n")
for node in net._nodes:
writeNode(fd, node)
fd.write("</nodes>\n")
fd.close()
def writeEdges(net):
fd = open("edges.xml", "w")
fd.write("<edges>\n")
for edge in net._edges:
if edge._id.find("#") > 0 or edge._id.find("c") >= 0 or edge._id.find("i") >= 0:
writeEdge(fd, edge, False)
else:
writeEdge(fd, edge)
fd.write("</edges>\n")
fd.close()
fdd.write("</connections>\n")
writeNodes(net1)
writeEdges(net1)
| 33.346
| 138
| 0.537156
| 2,038
| 16,673
| 4.317959
| 0.153091
| 0.016705
| 0.008182
| 0.016705
| 0.349432
| 0.245682
| 0.226591
| 0.19625
| 0.187955
| 0.170227
| 0
| 0.054722
| 0.31608
| 16,673
| 499
| 139
| 33.412826
| 0.717004
| 0.051101
| 0
| 0.417249
| 0
| 0
| 0.066759
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.013986
| false
| 0
| 0.018648
| 0
| 0.037296
| 0.041958
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
911d404601c245497e0b927e48a8d554d335993b
| 42,222
|
py
|
Python
|
BKPMediaDetector.py
|
bkpifc/BKPMediaDetector
|
51858b45e218e0c4b5ed4d6aac6d751e029d850e
|
[
"Apache-2.0"
] | 5
|
2019-04-03T08:04:06.000Z
|
2019-10-01T12:08:30.000Z
|
BKPMediaDetector.py
|
bkpifc/BKPMediaDetector
|
51858b45e218e0c4b5ed4d6aac6d751e029d850e
|
[
"Apache-2.0"
] | 13
|
2019-04-08T14:24:15.000Z
|
2022-03-11T23:50:32.000Z
|
BKPMediaDetector.py
|
bkpifc/BKPMediaDetector
|
51858b45e218e0c4b5ed4d6aac6d751e029d850e
|
[
"Apache-2.0"
] | 2
|
2019-04-04T11:20:27.000Z
|
2019-04-04T14:51:11.000Z
|
#!/usr/bin/env python3
######
# General Detector
# 06.12.2018 / Last Update: 20.05.2021
# LRB
######
import numpy as np
import os
import sys
import tensorflow as tf
import hashlib
import cv2
import magic
import PySimpleGUI as sg
import csv
import imagehash
import face_recognition
import subprocess
from itertools import groupby
from distutils.version import StrictVersion
from PIL import Image
from datetime import datetime
from time import strftime
from time import gmtime
from multiprocessing import Pool
from Models.Face import detect_face
from pathlib import Path
from openvino.inference_engine import IENetwork, IECore
from AudioAnalysis import audioAnalysis
######
# Worker function to check the input provided via the GUI
#######
def validateInput(gui_input):
error = False
#Validate input
# for element in gui_input[1][0:7]:
# if element == '' or []:
# error = True
if gui_input[0] == "Cancel" or len(gui_input[1][8]) == 0:
error = True
if bool(gui_input[1][5]) == True and gui_input[1][12] == "":
error = True
if error == True:
sg.Popup('You have not populated all required fields. Aborting!', title='Error', button_color=('black', 'red'), background_color=('grey'))
exit()
######
# Worker function to update the progress bar
######
def updateProgressMeter(step, customText):
if sg.OneLineProgressMeter('BKP Media Detector', step, 12, 'key', customText, orientation='h', size=(50, 25)) == False:
exit()
######
# Worker function to prepare and reshape the input images into a Numpy array
# and to calculate the MD5 hashes of them.
######
def load_image_into_numpy_array(image_path):
try:
image_path = str(image_path)
# Open, measure and convert image to RGB channels
image = Image.open(image_path)
(im_width, im_height) = image.size
if int(im_width) < 34 or int(im_height) < 34:
logfile.write("Insufficient file dimensions: " + str(image_path) + "\n")
return None
if int(im_width) > 4512 or int(im_height) > 3008:
maxheight = int(3008)
maxwidth = int(4512)
resize_ratio = min(maxwidth/im_width, maxheight/im_height)
im_width = int(im_width * resize_ratio)
im_height = int(im_height * resize_ratio)
image = image.resize((im_width, im_height))
image = image.convert('RGB')
np_array = np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
image.close()
# Hash the image in byte-chunks of 4096
hash_md5 = hashlib.md5()
with open(image_path, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
f.close()
hashvalue = hash_md5.hexdigest()
return image_path, hashvalue, np_array
#Throw errors to stdout
except IOError or OSError:
magictype = str(magic.from_file((image_path), mime=True))
# If image file cannot be read, check if it is a video
if magictype[:5] == 'video': #or magictype[12:17] == 'octet':
# If so, return a video flag instead of numpy array
flag = "VIDEO"
elif magictype[:5] == 'audio':
flag = "AUDIO"
elif magictype[12:17] == 'octet':
flag = "OCTET"
else:
image_path = "Could not open file: " + str(image_path) + " (" + str(magictype) + ")\n"
flag = "ERROR"
return image_path, flag
except:
magictype = str(magic.from_file((image_path), mime=True))
logfile.write("General error with file: " + str(image_path) + " (" + str(magictype) + ")\n")
def check_video_orientation(image_path):
# Function to check video rotation with ffprobe and return corresponding CV2 rotation code
try:
cmnd = ['ffprobe', '-loglevel', 'error', '-select_streams', 'v:0', '-show_entries', 'stream_tags=rotate', '-of',
'default=nw=1:nk=1', image_path]
p = subprocess.Popen(cmnd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
orientation = out.decode('utf-8')
if orientation == '':
rotation = 3
elif int(orientation) == 180:
rotation = 1
elif int(orientation) == 90:
rotation = 0
else:
rotation = 2
return rotation
except:
logfile.write("Cannot determine video rotation: " + str(image_path) + "\n")
######
# Worker function to prepare and reshape the input videos to a Numpy array
# and to calculate the MD5 hashes of them.
# The function analyzes as much frames as indicated in the variable "frames_per_second" (Default = 0.5)
######
def load_video_into_numpy_array(image_path):
videoframes = []
old_hash = None
# Loading the video via the OpenCV framework
try:
rotation = check_video_orientation(image_path)
vidcap = cv2.VideoCapture(image_path)
im_width = int(vidcap.get(cv2.CAP_PROP_FRAME_WIDTH))
im_height = int(vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT))
# Switch height/width if video is to be rotated 90/270 degrees
if rotation == 0 or rotation == 2:
im_width_new = im_height
im_height_new = im_width
im_width = im_width_new
im_height = im_height_new
# Calculating frames per second, total frame count and analyze rate
fps = int(vidcap.get(cv2.CAP_PROP_FPS))
framecount = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
analyze_rate = int(framecount / fps * frames_per_second)
if 0 < analyze_rate < max_frames_per_video:
int(analyze_rate)
elif analyze_rate >= int(max_frames_per_video):
analyze_rate = int(max_frames_per_video) #Limiting maximum frames per video
else:
videoerror = 'Unable to extract frames from video: ' + str(image_path) + '\n'
return videoerror
# Hashing the video once
hash_md5 = hashlib.md5()
with open(image_path, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
hashvalue = hash_md5.hexdigest()
# Extracting the frames from the video
for percentile in range(0, analyze_rate):
vidcap.set(cv2.CAP_PROP_POS_FRAMES, (framecount / analyze_rate) * percentile)
success, extracted_frame = vidcap.read()
if rotation != 3:
extracted_frame = cv2.rotate(extracted_frame, rotation)
extracted_frame = cv2.cvtColor(extracted_frame, cv2.COLOR_BGR2RGB)
timecode = ((framecount / analyze_rate) * percentile) / fps
timecode = str(strftime("%H:%M:%S", gmtime(timecode)))
# And reshape them into a numpy array
np_array = np.array(extracted_frame).reshape(
(im_height, im_width, 3)).astype(np.uint8)
if video_sensitivity > 0:
# Compare the frame with the previous one for similarity, and drop if similar
frame_to_check = Image.fromarray(np_array)
new_hash = imagehash.phash(frame_to_check)
if old_hash is None or (new_hash - old_hash > video_sensitivity):
cluster = str(image_path + ";" + str(timecode)), hashvalue, np_array
videoframes.append(cluster)
old_hash = new_hash
else:
cluster = str(image_path + ";" + str(timecode)), hashvalue, np_array
videoframes.append(cluster)
vidcap.release()
return videoframes
except cv2.error:
videoerror = 'Could not process video: ' + str(image_path) + '\n'
return videoerror
except:
videoerror = 'General error processing video: ' + str(image_path) + '\n'
return videoerror
######
# Detection within loaded images with Tensorflow framework
# Creation of output file with hashes, detection scores and class
######
def run_inference_for_multiple_images(image_paths, images, hashvalues):
# Open the results file again
detectionresults_path = PATH_TO_RESULTS / 'Detection_Results.csv'
detectionresults = open(str(detectionresults_path), 'a')
for y in range(0, len(graphlist)):
# Create TF Session with loaded graph
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
logfile.write("*" + str(datetime.now()) + ": \tStarting detection with model " + str(y + 1) + " of " + str(len(graphlist)) + "*\n")
# Update progress indicator
updateProgressMeter(7 + y, 'Detecting with model {}'.format(graphlist[y]))
# Load the respective detetion graph from file
with tf.gfile.GFile(graphlist[y], 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# Create TF session
with tf.Session() as sess:
# Get handles to input and output tensors
ops = tf.get_default_graph().get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
tensor_dict = {}
for key in [
'num_detections', 'detection_scores', 'detection_classes'
]:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(
tensor_name)
image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')
# Setting the detection limit of the different models.
if "ISLogo" not in graphlist[y]:
detectionlimit = 0.5
else:
detectionlimit = 0.90
# Loading the label map of the corresponding graph
category_index = indexlist[y]
# Conduct actual detection within single image
for index, image in enumerate(images):
updateProgressMeter(7 + y, str(graphlist[y]) + '\nFile ' + str(index) + ' of ' + str(len(images)))
try:
output_dict = sess.run(tensor_dict,
feed_dict={image_tensor: np.expand_dims(image, 0)})
# all outputs are float32 numpy arrays, so convert types as appropriate
output_dict['num_detections'] = int(output_dict['num_detections'][0])
output_dict['detection_scores'] = output_dict['detection_scores'][0]
detectionhit = output_dict['num_detections']
output_dict['detection_classes'] = output_dict['detection_classes'][0]
hashvalue = hashvalues[index]
image_path = image_paths[index]
# Validate against detection limit (default: 65%) and write hash/score if above
for j in range(detectionhit):
score = output_dict['detection_scores'][j]
category = category_index[output_dict['detection_classes'][j]]
# Validate against the preconfigured minimum detection assurance and write to result file
if (score >= detectionlimit):
scorestring = str(score)
if REPORT_FORMAT[0] == 'Nuix':
line = ",".join([category['name'], "md5:" + hashvalue])
else:
line = ",".join([Path(image_path).name, hashvalue, scorestring, category['name']])
detectionresults.write(line + "\n")
except tf.errors.InvalidArgumentError:
logfile.write("Unable to process file dimensions of file with hash: \t" + str(hashvalue) + "\n")
logfile.write("*" + str(datetime.now()) + ": \tFinished detection with model " + str(y + 1) + "*\n")
detectionresults.flush()
detectionresults.close()
######
# Detect and count faces in loaded images
# Prepare and call age/gender detection once done
######
def faceDetection(image_paths, images, hashvalues):
detectionresults_path = PATH_TO_RESULTS / 'Detection_Results.csv'
detectionresults = open(str(detectionresults_path), 'a')
# Updating progress bar and logfile
updateProgressMeter(10, 'Detecting with Face/Age/Gender Detector')
logfile.write("*" + str(datetime.now()) + ": \tStarting detection with face/age/gender detection model*\n")
# Applying constants as defined in Facenet
minsize = 20
threshold = [0.6, 0.7, 0.7]
factor = 0.709
# Creating different TF Session
with tf.Session() as sess:
# read pnet, rnet, onet models from Models/Face directory
facemodel_path = Path('Models/Face')
pnet, rnet, onet = detect_face.create_mtcnn(sess, str(facemodel_path))
# Helperlists for age/gender detection
facelist = []
imagelist = []
# Inference for all images
for index, image in enumerate(images):
updateProgressMeter(10, 'Detecting with Face/Age/Gender Detector' + '\nFile ' + str(index) + ' of ' + str(len(images)))
try:
bounding_boxes, _ = detect_face.detect_face(image, minsize, pnet, rnet, onet, threshold, factor)
nrof_faces = bounding_boxes.shape[0]
# If a face was detected, go on
if nrof_faces > 0:
detectedFaces = bounding_boxes[:, 0:4]
detectedFacesArray = []
img_size = np.asarray(image.shape)[0:2]
if nrof_faces > 1:
for single_face in range(nrof_faces):
detectedFacesArray.append(np.squeeze(detectedFaces[single_face]))
else:
detectedFacesArray.append(np.squeeze(detectedFaces))
# Crop the detected face and add it to the list to conduct age/gender identification
for x, detectedFaces in enumerate(detectedFacesArray):
detectedFaces = np.squeeze(detectedFaces)
bb = np.zeros(4, dtype=np.int32)
bb[0] = np.maximum(detectedFaces[0], 0)
bb[1] = np.maximum(detectedFaces[1], 0)
bb[2] = np.minimum(detectedFaces[2], img_size[1])
bb[3] = np.minimum(detectedFaces[3], img_size[0])
cropped_Face = image[bb[1]:bb[3], bb[0]:bb[2], :]
facelist.append(cropped_Face)
imagelist.append(index)
# Write the results of the face detection into the resultsfile
if not len(bounding_boxes) == 0:
hashvalue = hashvalues[index]
number_of_faces = len(bounding_boxes)
if REPORT_FORMAT[0] == 'Nuix':
line = "Face,md5:" + hashvalue
else:
line = str(Path(image_paths[index]).name) + "," + str(hashvalue) + ",FACES," + str(
number_of_faces) + "Faces"
detectionresults.write(line + "\n")
except tf.errors.InvalidArgumentError:
errorcount += 1
logfile.write("Unable to detect faces in file with hash: \t" + str(hashvalue) + "\n")
# Conduct age/gender recognition based on the list of detected & cropped faces
if len(facelist) != 0:
age_gender_detection(imagelist, facelist, hashvalues, image_paths)
logfile.write("*" + str(datetime.now()) + ": \tFinished detection with face/age/gender detection model*\n")
detectionresults.flush()
detectionresults.close()
######
# Detection with the OPEN VINO Framework
# Evaluate Age & Gender based on input faces
######
def age_gender_detection(imagelist, facelist, hashvalues, image_paths):
# Acquire the age-gender detection model
model_path = Path('Models/OpenVINO/age-gender')
model_xml = str(model_path / 'model.xml')
model_bin = str(model_path / 'model.bin')
# Reopen the results file
detectionresults_path = PATH_TO_RESULTS / 'Detection_Results.csv'
detectionresults = open(str(detectionresults_path), 'a')
# Plugin initialization for specified device and load extensions library if specified
ie = IECore()
# Read IR
net = IENetwork(model=model_xml, weights=model_bin)
input_blob = next(iter(net.inputs))
net.batch_size = len(facelist)
# Read and pre-process input images
n, c, h, w = net.inputs[input_blob].shape
images = np.ndarray(shape=(n, c, h, w))
# Loading model to the plugin
exec_net = ie.load_network(network=net, device_name='CPU')
# Resize and reshape input faces
for i in range(n):
image = facelist[i]
if image.shape[:-1] != (62, 62):
h, w = image.shape[:2]
# interpolation method
if h > 62 or w > 62: # shrinking image
interp = cv2.INTER_AREA
else: # stretching image
interp = cv2.INTER_CUBIC
# aspect ratio of image
aspect = w / h
# compute scaling and pad sizing
if aspect > 1: # horizontal image
new_w = 62
new_h = np.round(new_w / aspect).astype(int)
pad_vert = (62 - new_h) / 2
pad_top, pad_bot = np.floor(pad_vert).astype(int), np.ceil(pad_vert).astype(int)
pad_left, pad_right = 0, 0
elif aspect < 1: # vertical image
new_h = 62
new_w = np.round(new_h * aspect).astype(int)
pad_horz = (62 - new_w) / 2
pad_left, pad_right = np.floor(pad_horz).astype(int), np.ceil(pad_horz).astype(int)
pad_top, pad_bot = 0, 0
else: # square image
new_h, new_w = 62, 62
pad_left, pad_right, pad_top, pad_bot = 0, 0, 0, 0
# set pad color
padColor = 0
if len(image.shape) is 3 and not isinstance(padColor, (
list, tuple, np.ndarray)): # color image but only one color provided
padColor = [padColor] * 3
# scale and pad
scaled_img = cv2.resize(image, (new_w, new_h), interpolation=interp)
scaled_img = cv2.cvtColor(scaled_img, cv2.COLOR_BGR2RGB)
scaled_img = cv2.copyMakeBorder(scaled_img, pad_top, pad_bot, pad_left, pad_right,
borderType=cv2.BORDER_CONSTANT, value=padColor)
image = scaled_img.transpose((2, 0, 1)) # Change data layout from HWC to CHW
images[i] = image
# Conduct inference
res = exec_net.infer(inputs={input_blob: images})
# Process inference results
for y in range(len(facelist)):
probable_age = int(np.squeeze(res['age_conv3'][y]) * 100)
if np.squeeze(res['prob'][y][0]) > 0.5:
gender = "Female"
else:
gender = "Male"
age_gender_combo = str(probable_age) + str(gender)
# Write inference results to resultsfile
hashvalue = hashvalues[imagelist[y]]
if REPORT_FORMAT[0] == 'Nuix':
line = str(age_gender_combo) + ",md5:" + hashvalue
else:
line = str(Path(image_paths[imagelist[y]]).name) + "," + str(hashvalue) + ",AGE-GENDER," + str(
age_gender_combo)
detectionresults.write(line + "\n")
######
# Detection with the OPEN VINO Framework
# Creation of output file with hashes, detection scores and class
######
def run_inference_openvino(image_paths, images, hashvalue):
# Update progress meter and reopen results file
updateProgressMeter(6, 'Detecting with OpenVINO Object Detector')
logfile.write("*" + str(datetime.now()) + ": \tStarting detection with OpenVINO object detection model*\n")
detectionresults_path = PATH_TO_RESULTS / 'Detection_Results.csv'
detectionresults = open(str(detectionresults_path), 'a')
# Fetch paths for openvino model
model_path = Path('Models/OpenVINO/vgg19')
model_xml = str(model_path / 'model.xml')
model_bin = str(model_path / 'model.bin')
model_labels = str(model_path / 'model.labels')
temp_bilder = images
# Plugin initialization for specified device and load extensions library if specified
ie = IECore()
# Read IR
net = IENetwork(model=model_xml, weights=model_bin)
input_blob = next(iter(net.inputs))
out_blob = next(iter(net.outputs))
net.batch_size = 4000
# Read and pre-process input images
n, c, h, w = net.inputs[input_blob].shape
images = np.ndarray(shape=(n, c, h, w))
# Loading model to the plugin
exec_net = ie.load_network(network=net, device_name='CPU')
# Create batches to prevent RAM overload
batches = tuple(temp_bilder[x:x + net.batch_size] for x in range(0, len(temp_bilder), net.batch_size))
# Start sync inference
for batch in batches:
for index, temp_pic in enumerate(batch):
temp_pic = cv2.resize(temp_pic, (w, h))
temp_pic = temp_pic.transpose((2, 0, 1))
images[index] = temp_pic
res = exec_net.infer(inputs={input_blob: images})
# Processing output blob
res = res[out_blob]
# Prepare label file
with open(model_labels, 'r') as f:
labels_map = [x.split(sep=' ', maxsplit=1)[-1].strip() for x in f]
# Clean inference results and write them to resultsfile
for i, probs in enumerate(res):
probs = np.squeeze(probs)
top_ind = np.argsort(probs)[-3:][::-1]
for id in top_ind:
if probs[id] >= 0.3:
# det_label = labels_map[id] if labels_map else "{}".format(id)
det_label = labels_map[id].split(sep=' ', maxsplit=1)[1]
if REPORT_FORMAT[0] == 'Nuix':
line = ",".join([det_label, "md5:" + hashvalue])
else:
line = ",".join([Path(image_paths[i]).name, hashvalue[i], str(probs[id]), str(det_label)])
detectionresults.write(line + "\n")
logfile.write("*" + str(datetime.now()) + ": \tFinished detection with OpenVINO object detection model*\n")
######
# Worker function to load and encode known faces and to compare them against
# the provided input material
######
def faceRecognition(known_faces_path, image_paths, images, hashvalues):
# Update progress bar
updateProgressMeter(5, 'Conducting Face Recognition')
known_face_counter = 0
# Open the results file
detectionresults_path = PATH_TO_RESULTS / 'Detection_Results.csv'
detectionresults = open(str(detectionresults_path), 'a')
OutputPictureFolder = PATH_TO_RESULTS / 'DetectedFaces'
if not OutputPictureFolder.exists(): os.mkdir(str(OutputPictureFolder))
# Initiate array to store known faces
known_face_encodings = []
known_face_names = []
known_faces = Path.iterdir(Path(known_faces_path))
# Create encodings and store them with names
for known_face in known_faces:
known_person_image = face_recognition.load_image_file(known_face)
known_face_encodings.extend(face_recognition.face_encodings(known_person_image))
known_face_names.append(Path(known_face).stem)
logfile.write("*" + str(datetime.now()) + ": \tStarting face recognition with " + str(len(known_face_names)) + " known faces*\n")
# Load images, detect faces, encode and compare them to the known faces
for index, image_to_detect in enumerate(images):
hashvalue = hashvalues[index]
image_path = image_paths[index]
updateProgressMeter(5, 'Face Reco Image ' + str(index) + ' of ' + str(len(images)))
# Use GPU based model to detect & encode
face_locations = face_recognition.face_locations(image_to_detect, model="cnn")
face_encodings = face_recognition.face_encodings(image_to_detect, face_locations)
# Loop through each face in this frame of video
for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(known_face_encodings, face_encoding, tolerance=facereq_tolerance)
name = "Unknown"
# Check the face distance and get best match
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = known_face_names[best_match_index]
# If there is a match, write it to the output file
if name != "Unknown":
known_face_counter += 1
if REPORT_FORMAT[0] == 'Nuix':
line = ",".join([name, "md5:" + hashvalue])
else:
line = ",".join([Path(image_path).name, hashvalue, "FACE-Match", name])
detectionresults.write(line + "\n")
if output_detFaces:
# Export detected face with bounding box
cv2.rectangle(image_to_detect, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(image_to_detect, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(image_to_detect, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
savePath = str(OutputPictureFolder / str(Path(image_path).name)) + '.jpg'
detectedFace = Image.fromarray(image_to_detect)
detectedFace.save(savePath)
logfile.write("*" + str(datetime.now()) + ": \tFace Recognition completed.*\n")
detectionresults.flush()
detectionresults.close()
# Return amount of detected known faces
return known_face_counter
######
# Worker function to conduct speech detection in audio files
# for all audio files detected
######
def audioSpeechDetection(audiolist):
logfile.write("*" + str(datetime.now()) + ": \tStarting audio speech detection*\n")
updateProgressMeter(11, 'Processing Audio Files')
audiocounter = 0
# Open the results file
detectionresults_path = PATH_TO_RESULTS / 'Detection_Results.csv'
detectionresults = open(str(detectionresults_path), 'a')
pool = Pool(maxtasksperchild=100)
result = pool.map(audioAnalysis.segmentSpeechDetection, audiolist, chunksize=10)
pool.close()
# Synchronize after completion
pool.join()
pool.terminate()
result = [x for x in result if x != None]
for processedAudio in result:
speechPercentage, audiopath = processedAudio
# Check for the video flag
if not isinstance(speechPercentage, float):
logfile.write("Unsupported audio file: " + str(audiopath) + "\n")
else:
speechPercentage, audiopath = processedAudio
# Hashing the video once
hash_md5 = hashlib.md5()
with open(audiopath, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
hashvalue = hash_md5.hexdigest()
audiocounter += 1
if REPORT_FORMAT[0] == 'Nuix':
if speechPercentage != 0.0:
line = ",".join(["AUDIO-SPEECH", "md5:" + hashvalue])
else:
line = ",".join([Path(audiopath).name, hashvalue, str(speechPercentage), "AUDIO-SPEECH"])
detectionresults.write(line + "\n")
logfile.write("*" + str(datetime.now()) + ": \tAudio speech detection completed.*\n")
detectionresults.flush()
detectionresults.close()
return audiocounter
######
# Split the report file to allow seamless integration into XWays Hash Database per category
######
def createXWaysReport():
detectionresults_path = str(PATH_TO_RESULTS / 'Detection_Results.csv')
xways_folder = PATH_TO_RESULTS / 'XWaysOutput'
if not xways_folder.exists(): os.mkdir(str(xways_folder))
for key, rows in groupby(csv.reader(open(detectionresults_path)),
lambda row: row[3]):
# Replace special characters in categories
if str(key) != 'category':
key = str(key).replace("/","-")
key = str(key).replace(".", "")
key = str(key).replace("(", "")
key = str(key).replace(")", "")
key = key + '.txt'
detectionresults_single_path = xways_folder / key
with open(str(detectionresults_single_path), 'a') as rf:
for row in rows:
rf.write(row[1] + "\n")
rf.flush()
# Get a list of all files in results directory
resultsfiles = os.listdir(str(xways_folder))
# Prepend them with MD5 for seamless import into XWays
for file in resultsfiles:
line = "md5"
if file[-3:] == 'txt' and file != 'Logfile.txt':
with open(str(xways_folder / file), 'r+') as ff:
content = ff.read()
ff.seek(0,0)
ff.write(line.rstrip('\r\n') + '\n' + content)
######
#
# Main program function
# First initiates required parameters and variables, then loads the GUI
# After which the image and video load functions are triggered based on the input parameters
# Finally, the detection is executed and results written to the place requested
#
######
# Prevent execution when externally called
if __name__ == '__main__':
######
# Collecting parameters via GUI
######
sg.ChangeLookAndFeel('Dark')
layout = [[sg.Text('General Settings', font=("Helvetica", 13), text_color='sea green')],
[sg.Text('Please specify the folder holding the media data:')],
[sg.Input(), sg.FolderBrowse('Browse', initial_folder='/home/b/Desktop/TestBilder', button_color=('black', 'grey'))], #Path.home() = Initial folder
[sg.Text('Where shall I place the results?')],
[sg.Input(), sg.FolderBrowse('Browse', initial_folder='/home/b/Desktop/TestResults', button_color=('black', 'grey'))], #Path.home()
[sg.Text('TENSORFLOW DETECTORS')],
[sg.Checkbox('Objects/Persons', size=(15, 2)),
sg.Checkbox('Actions'),
sg.Checkbox('IS Logos'),
sg.Checkbox("Face Recognition")],
[sg.Text('OPEN VINO DETECTORS')],
[sg.Checkbox('Objects-fast', size=(15, 2)),
sg.Checkbox('Faces/Age/Gender')],
[sg.Text('Output Format:'), sg.Listbox(values=('Nuix', 'XWays', 'csv'), size=(29, 3))],
[sg.Text('Video Settings', font=("Helvetica", 13), text_color='sea green')],
[sg.Text('# of frames to be analyzed per Minute:', size=(36, 0))],
[sg.Slider(range=(1, 120), orientation='h', size=(29, 20), default_value=30)],
[sg.Text('Max. # of frames to be analyzed per Video:', size=(36, 0))],
[sg.Slider(range=(1, 500), orientation='h', size=(29, 20), default_value=100)],
[sg.Text('Check for & discard similar frames?'),
sg.InputCombo(('Yes', 'No'), default_value='No', size=(10, 2))],
[sg.Text('Face Recognition', font=("Helvetica", 13), text_color='sea green')],
[sg.Text('Specify folder with known faces (if FaceReq selected): ')],
[sg.Input(), sg.FolderBrowse('Browse', initial_folder='/home/b/Desktop/known', button_color=('black', 'grey'))],
[sg.Text('Specify face recognition tolerance (Default: 60%):', size=(48, 0))],
[sg.Slider(range=(0, 100), orientation='h', size=(29, 20), default_value=60)],
[sg.Checkbox('Output detected faces as jpg', size=(25, 2))],
[sg.Text('Audio Settings', font=("Helvetica", 13), text_color='sea green')],
[sg.Text('AUDIO PROCESSING')],
[sg.Checkbox('Speech Detection', size=(15, 2))],
[sg.OK(button_color=('black', 'sea green')), sg.Cancel(button_color=('black', 'grey'))]]
layout_progress = [[sg.Text('Detection in progress')],
[sg.ProgressBar(12, orientation='h', size=(20, 20), key='progressbar')],
[sg.Cancel()]]
# Render the GUI
gui_input = sg.Window('BKP Media Detector').Layout(layout).Read()
error = False
# Validate input
validateInput(gui_input)
# Initiating progress meter
updateProgressMeter(1, 'Initializing variables & parameters...')
startTime = datetime.now()
# Variable to determine minimum GPU Processor requirement & to disable TF log output
# os.environ['TF_MIN_GPU_MULTIPROCESSOR_COUNT'] = '5'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# Validating TF version
if StrictVersion(tf.__version__) < StrictVersion('1.9.0'):
raise ImportError('Please upgrade your TensorFlow installation to v1.9.* or later!')
# Defining multiple needed variables based on GUI input & adding TF/OpenVINO directory to path
PATH_TO_INPUT = Path(gui_input[1][0])
TEST_IMAGE_PATHS = Path.iterdir(PATH_TO_INPUT)
number_of_input = 0
for elements in Path.iterdir(PATH_TO_INPUT):
number_of_input += 1
PATH_TO_RESULTS = Path(gui_input[1][1])
PATH_TO_OBJECT_DETECTION_DIR = '/home/b/Programs/tensorflow/models/research' # PLACEHOLDER-tobereplacedWithPathtoDirectory
sys.path.append(PATH_TO_OBJECT_DETECTION_DIR)
REPORT_FORMAT = gui_input[1][8]
frames_per_second = gui_input[1][9] / 60
max_frames_per_video = gui_input[1][10]
video_sensitivity_text = gui_input[1][11]
KNOWN_FACES_PATH = gui_input[1][12]
facereq_tolerance = int(gui_input[1][13])/100
output_detFaces = gui_input[1][14]
if video_sensitivity_text == "Yes":
video_sensitivity = 20
else:
video_sensitivity = 0
# Check which models to apply and load their corresponding label maps
from object_detection.utils import label_map_util
graphlist = []
indexlist = []
MODEL1 = bool(gui_input[1][2])
if MODEL1:
OPEN_IMAGES_GRAPH = str(Path('Models/OpenImages/openimages.pb'))
OPEN_IMAGES_LABELS = str(OPEN_IMAGES_GRAPH)[:-3] + '.pbtxt'
OPEN_IMAGES_INDEX = label_map_util.create_category_index_from_labelmap(OPEN_IMAGES_LABELS)
graphlist.append(OPEN_IMAGES_GRAPH)
indexlist.append(OPEN_IMAGES_INDEX)
MODEL2 = bool(gui_input[1][3])
if MODEL2:
AVA_GRAPH = str(Path('Models/AVA/ava.pb'))
AVA_LABELS = str(AVA_GRAPH)[:-3] + '.pbtxt'
AVA_INDEX = label_map_util.create_category_index_from_labelmap(AVA_LABELS)
graphlist.append(AVA_GRAPH)
indexlist.append(AVA_INDEX)
MODEL3 = bool(gui_input[1][4])
if MODEL3:
SPECIAL_DETECTOR_GRAPH = str(Path('Models/ISLogos/islogos.pb'))
SPECIAL_DETECTOR_LABELS = str(SPECIAL_DETECTOR_GRAPH)[:-3] + '.pbtxt'
SPECIAL_DETECTOR_INDEX = label_map_util.create_category_index_from_labelmap(SPECIAL_DETECTOR_LABELS)
graphlist.append(SPECIAL_DETECTOR_GRAPH)
indexlist.append(SPECIAL_DETECTOR_INDEX)
FACE_RECOGNITION = bool(gui_input[1][5])
OPEN_VINO_vgg19 = bool(gui_input[1][6])
FACE_MODEL = bool(gui_input[1][7])
AUDIO_SPEECH_DETECTION = bool(gui_input[1][15])
# Update the progress indicator
updateProgressMeter(2, 'Process started. Loading ' + str(number_of_input) + ' media files...')
# Create logfile
logfile = open(str(PATH_TO_RESULTS / 'Logfile.txt'), 'w')
logfile.write('***DETECTION LOG***\n')
logfile.write("*" + str(datetime.now()) + ': \tProcess started. Loading images...*\n')
# Create resultsfile
detectionresults_path = PATH_TO_RESULTS / 'Detection_Results.csv'
detectionresults = open(str(detectionresults_path), 'w')
if REPORT_FORMAT[0] == 'Nuix':
detectionresults.write("tag,searchterm\n")
else:
detectionresults.write("name,hash,score,category\n")
detectionresults.flush()
detectionresults.close()
# Initiate needed variables
vidlist = []
audiolist = []
final_images = []
errors = []
# Multiprocess the image load function on all CPU cores available
pool = Pool(maxtasksperchild=100)
processed_images = pool.map(load_image_into_numpy_array, TEST_IMAGE_PATHS, chunksize=10)
pool.close()
# Synchronize after completion
pool.join()
pool.terminate()
# Clean the result for None types (where image conversion failed)
processed_images = [x for x in processed_images if x != None]
# Check for the different flags set by mimetype
for processed_image in processed_images:
if str(processed_image[1]) == "VIDEO":
# If present, populate the video list
vidlist.append(processed_image[0])
elif str(processed_image[1]) == "AUDIO":
audiolist.append(processed_image[0])
elif str(processed_image[1]) == "OCTET":
if processed_image[0][-3:] in ["mp4", "mov", "mpg", "avi", "exo", "mkv", "m4v", "ebm"]:
vidlist.append(processed_image[0])
else:
audiolist.append(processed_image[0])
elif str(processed_image[1]) == "ERROR":
errors.append(processed_image[0])
else:
# If not, put it to the final images list
final_images.append(processed_image)
for error in errors:
logfile.write(error)
logfile.flush()
# Count the number of images before adding the videoframes
number_of_images = len(final_images)
# Update the progress indicator
updateProgressMeter(3, 'Loading ' + str(len(vidlist)) + ' Videos...')
# Multiprocess the video load function on all CPU cores available
pool = Pool(maxtasksperchild=10)
videoframes = pool.map(load_video_into_numpy_array, vidlist, chunksize=2)
pool.close()
# Synchronize after completion
pool.join()
pool.terminate()
number_of_videos = 0
# Clean the result for None types (where video conversion failed)
for video in videoframes:
if type(video) is str:
errors.append(video)
if type(video) is list:
final_images.extend(video)
number_of_videos += 1
for error in errors:
logfile.write(error)
logfile.flush()
# Split the result from the loading function into hashes and image arrays
if len(final_images) != 0:
image_path, hashvalues, image_nps = zip(*final_images)
# Update the progress indicator & logfile
updateProgressMeter(4, 'Starting detection of ' + str(len(final_images)) + ' media files')
logfile.write("*" + str(datetime.now()) + ": \tLoading completed. Detecting...*\n")
# Conduct Face Recognition if needed
if FACE_RECOGNITION:
known_face_counter = faceRecognition(KNOWN_FACES_PATH, image_path, image_nps, hashvalues)
# Conduct OpenVino VGG19 Model if needed
if OPEN_VINO_vgg19:
run_inference_openvino(image_path, image_nps, hashvalues)
# Execute all other detection models
if len(final_images) != 0:
run_inference_for_multiple_images(image_path, image_nps, hashvalues)
# Conduct face/age/gender detection
if FACE_MODEL:
faceDetection(image_path, image_nps, hashvalues)
if AUDIO_SPEECH_DETECTION:
audiofiles_processed = audioSpeechDetection(audiolist)
else:
audiofiles_processed = 0
# Check whether an Xways report needs to be created
if REPORT_FORMAT[0] == 'XWays':
createXWaysReport()
# Write process statistics to logfile
logfile.write("*Results:\t\t\t" + str(PATH_TO_RESULTS / 'Detection_Results.csv*\n'))
logfile.write("*Total Amount of Files:\t\t" + str(number_of_input) + " (of which " + str(number_of_images + number_of_videos + audiofiles_processed) + " were processed.)*\n")
logfile.write("*Processed Images:\t\t" + str(number_of_images) + "*\n")
logfile.write("*Processed Videos: \t\t" + str(number_of_videos) + " (analyzed " + str(frames_per_second * 60) + " frames per minute, up to max. 500) with the check for content-based duplicates set to " + video_sensitivity_text + "\n")
logfile.write("*Processed Audio Files:\t\t" + str(audiofiles_processed) + "*\n")
logfile.write("*Applied models:\n")
for y in range(0, len(graphlist)): logfile.write("\t\t\t\t" + graphlist[y] + "\n")
if OPEN_VINO_vgg19: logfile.write("\t\t\t\tOpenVINO Object Detector\n")
if FACE_MODEL: logfile.write("\t\t\t\tFace-Age-Gender Detector\n")
if FACE_RECOGNITION: logfile.write("\t\t\t\tFace Recognition (Known faces detected: " + str(known_face_counter) + ")\n")
logfile.write("*Processing time:\t\t" + str(datetime.now() - startTime) + "*\n")
logfile.write("*Time per processed file:\t" + str((datetime.now() - startTime) / (number_of_images + number_of_videos + audiofiles_processed)) + "*\n")
logfile.flush()
logfile.close()
# Update progress indicator
sg.OneLineProgressMeter('BKP Media Detector', 12, 12, 'key', 'Detection finished',orientation='h',size=(100, 10))
# Deliver final success pop up to user
sg.Popup('The detection was successful',
'The results are placed here:',
'Path: "{}"'.format(str(PATH_TO_RESULTS)))
| 40.676301
| 238
| 0.613093
| 5,107
| 42,222
| 4.914431
| 0.158606
| 0.012551
| 0.007172
| 0.010997
| 0.330823
| 0.272372
| 0.240617
| 0.199139
| 0.150928
| 0.118496
| 0
| 0.017518
| 0.27398
| 42,222
| 1,037
| 239
| 40.715526
| 0.801233
| 0.157525
| 0
| 0.231951
| 0
| 0
| 0.115759
| 0.013083
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018433
| false
| 0
| 0.039939
| 0
| 0.073733
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
911e4f54a8e9fbbfd53aa376d04e2f253bbddbd8
| 2,252
|
py
|
Python
|
src/BruteForce.py
|
stevenwalton/Retro-Learner
|
74586c57b5dd5f6e82abaff99344285731f1fc56
|
[
"MIT"
] | null | null | null |
src/BruteForce.py
|
stevenwalton/Retro-Learner
|
74586c57b5dd5f6e82abaff99344285731f1fc56
|
[
"MIT"
] | null | null | null |
src/BruteForce.py
|
stevenwalton/Retro-Learner
|
74586c57b5dd5f6e82abaff99344285731f1fc56
|
[
"MIT"
] | null | null | null |
import time
import retro
import FrameSkip
import TimeLimit
import Brute
class BruteForce():
def __init__(self,
game='Airstriker-Genesis',
max_episode_steps=4500,
timestep_limit=100_000_000,
state=retro.State.DEFAULT,
scenario=None,
save=False,
savename="best.bk2",
fs_skip=4,
render=False,
time=False,
):
self.game = game
self.max_episode_steps = max_episode_steps
self.timestep_limit = timestep_limit
self.state = state
self.scenario = scenario
self.save=save
self.savename = savename
self.fs_skip=fs_skip
self.render=render
self.time=time
if ".bk2" not in self.savename[-4:]:
self.savename += ".bk2"
self.timesteps = 0
self.best_reward = float('-inf')
self.env = retro.make(game=game,
state=state,
use_restricted_actions=retro.Actions.DISCRETE,
scenario=scenario)
self.env = FrameSkip.Frameskip(self.env, skip=self.fs_skip)
self.env = TimeLimit.TimeLimit(self.env, max_episode_steps=self.max_episode_steps)
def start(self):
brute = Brute.Brute(self.env, max_episode_steps=self.max_episode_steps,render=self.render)
if self.time:
startTime = time.time()
while True:
acts, reward = brute.run()
self.timesteps += len(acts)
if reward > self.best_reward:
print(f"New best reward {reward} from {self.best_reward}")
if self.time:
print(f"Elapsed time {time.time() - startTime}")
self.best_reward = reward
if (self.save):
self.env.unwrapped.record_movie(self.savename)
self.env.reset()
for act in acts:
self.env.step(act)
self.env.unwrapped.stop_record()
if self.timesteps > self.timestep_limit:
print("Timed out")
break
| 34.121212
| 98
| 0.525311
| 239
| 2,252
| 4.799163
| 0.301255
| 0.061029
| 0.091543
| 0.049695
| 0.071491
| 0.071491
| 0.071491
| 0.071491
| 0.071491
| 0
| 0
| 0.013738
| 0.385879
| 2,252
| 65
| 99
| 34.646154
| 0.815618
| 0
| 0
| 0.033898
| 0
| 0
| 0.059059
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033898
| false
| 0
| 0.084746
| 0
| 0.135593
| 0.050847
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
911fe80423c3725cffb5c649027000c3b8755a5f
| 5,429
|
py
|
Python
|
tutorials/04-advanced/03-super-resolution-onnx/main.py
|
yakhyo/PyTorch-Tutorials
|
163287bc735b09c366dbdfa3989e81acaef6fa1f
|
[
"MIT"
] | 7
|
2021-05-16T14:36:20.000Z
|
2021-12-30T07:07:31.000Z
|
tutorials/04-advanced/03-super-resolution-onnx/main.py
|
yakhyo/PyTorch-Tutorials
|
163287bc735b09c366dbdfa3989e81acaef6fa1f
|
[
"MIT"
] | null | null | null |
tutorials/04-advanced/03-super-resolution-onnx/main.py
|
yakhyo/PyTorch-Tutorials
|
163287bc735b09c366dbdfa3989e81acaef6fa1f
|
[
"MIT"
] | 3
|
2021-05-17T12:11:11.000Z
|
2021-11-25T10:06:14.000Z
|
import io
import numpy as np
import torch.utils.model_zoo as model_zoo
import torch.onnx
import torch.nn as nn
import torch.nn.init as init
# ================================================================ #
# Building the Model #
# ================================================================ #
class SuperResolutionNet(nn.Module):
def __init__(self, upscale_factor, inplace=False):
super(SuperResolutionNet, self).__init__()
self.relu = nn.ReLU(inplace=inplace)
self.conv1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=5, padding=2)
self.conv2 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, padding=1)
self.conv3 = nn.Conv2d(in_channels=64, out_channels=32, kernel_size=3, padding=1)
self.conv4 = nn.Conv2d(in_channels=32, out_channels=upscale_factor ** 2, kernel_size=3, padding=1)
self.pixel_shuffle = nn.PixelShuffle(upscale_factor)
self._initialize_weights()
def forward(self, x):
x = self.relu(self.conv1(x))
x = self.relu(self.conv2(x))
x = self.relu(self.conv3(x))
x = self.pixel_shuffle(self.conv4(x))
return x
def _initialize_weights(self):
init.orthogonal_(self.conv1.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv2.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv3.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv4.weight)
# Creating an instance from SuperResolutionNet
net = SuperResolutionNet(upscale_factor=3)
# ================================================================ #
# Downloading Pretrained Weights #
# ================================================================ #
model_url = 'https://s3.amazonaws.com/pytorch/test_data/export/superres_epoch100-44c6958e.pth'
# Initialize model with the pretrained weights
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
net.load_state_dict(model_zoo.load_url(model_url, map_location=device))
net.eval() # Changing to eval mode to save it onnx format
# onnx input shape: x.shape : (batch_size=1, channel=1, H, W)
# The model expects the Y component of the YCbCr of an image as an input so it has one channel
x = torch.randn(1, 1, 224, 224, requires_grad=True)
onnx_model = net(x)
# Export the onnx model
torch.onnx.export(onnx_model, # model being run
x, # model input (or a tuple for multiple inputs)
"super_resolution.onnx", # where to save the model
export_params=True, # store the trained parameter weights inside the model file
opset_version=10, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names=['input'], # the model's input names
output_names=['output'], # the model's output names
dynamic_axes={'input': {0: 'batch_size'}, # variable length axes
'output': {0: 'batch_size'}})
# ================================================================ #
# Loading ONNX model #
# ================================================================ #
import onnx
import onnxruntime
onnx_model = onnx.load("super_resolution.onnx")
onnx.checker.check_model(onnx_model)
ort_session = onnxruntime.InferenceSession("super_resolution.onnx")
def to_numpy(tensor):
return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy()
# compute ONNX Runtime output prediction
ort_inputs = {ort_session.get_inputs()[0].name: to_numpy(x)}
ort_outs = ort_session.run(None, ort_inputs)
# compare ONNX Runtime and PyTorch results
np.testing.assert_allclose(to_numpy(torch_out), ort_outs[0], rtol=1e-03, atol=1e-05)
print("Exported model has been tested with ONNXRuntime, and the result looks good!")
# ================================================================ #
# Reading Original Image and Feed it to Model #
# ================================================================ #
from PIL import Image
import torchvision.transforms as transforms
img = Image.open("../../../cat_224x224.jpg")
resize = transforms.Resize([224, 224])
img = resize(img)
# The model expects the Y component of the YCbCr of an image as an input
img_ycbcr = img.convert('YCbCr')
img_y, img_cb, img_cr = img_ycbcr.split()
to_tensor = transforms.ToTensor()
img_y = to_tensor(img_y)
img_y.unsqueeze_(0)
ort_inputs = {ort_session.get_inputs()[0].name: to_numpy(img_y)}
ort_outs = ort_session.run(None, ort_inputs)
img_out_y = ort_outs[0]
img_out_y = Image.fromarray(np.uint8((img_out_y[0] * 255.0).clip(0, 255)[0]), mode='L')
# get the output image follow post-processing step from PyTorch implementation
output = Image.merge(
"YCbCr",
[img_out_y, img_cb.resize(img_out_y.size, Image.BICUBIC), img_cr.resize(img_out_y.size, Image.BICUBIC), ]
).convert("RGB")
# Save the image, we will compare this with the output image from mobile device
output.save("../../../cat_superres_with_ort.jpg")
| 40.514925
| 120
| 0.592374
| 679
| 5,429
| 4.559647
| 0.318115
| 0.020672
| 0.013566
| 0.023256
| 0.209625
| 0.187985
| 0.165698
| 0.126938
| 0.062016
| 0.062016
| 0
| 0.022775
| 0.215509
| 5,429
| 133
| 121
| 40.819549
| 0.704156
| 0.315528
| 0
| 0.027027
| 0
| 0
| 0.095614
| 0.032961
| 0
| 0
| 0
| 0
| 0.013514
| 1
| 0.054054
| false
| 0
| 0.135135
| 0.013514
| 0.22973
| 0.013514
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91209eac140dfeb3483e2df389892eaa71a76d66
| 8,963
|
py
|
Python
|
features/steps/section.py
|
revvsales/python-docx-1
|
5b3ff2b828cc30f1567cb1682a8cb399143732d7
|
[
"MIT"
] | 3,031
|
2015-01-02T11:11:24.000Z
|
2022-03-30T00:57:17.000Z
|
features/steps/section.py
|
revvsales/python-docx-1
|
5b3ff2b828cc30f1567cb1682a8cb399143732d7
|
[
"MIT"
] | 934
|
2015-01-06T20:53:56.000Z
|
2022-03-28T10:08:03.000Z
|
features/steps/section.py
|
revvsales/python-docx-1
|
5b3ff2b828cc30f1567cb1682a8cb399143732d7
|
[
"MIT"
] | 901
|
2015-01-07T18:22:07.000Z
|
2022-03-31T18:38:51.000Z
|
# encoding: utf-8
"""
Step implementations for section-related features
"""
from __future__ import absolute_import, print_function, unicode_literals
from behave import given, then, when
from docx import Document
from docx.enum.section import WD_ORIENT, WD_SECTION
from docx.section import Section
from docx.shared import Inches
from helpers import test_docx
# given ====================================================
@given("a Section object as section")
def given_a_Section_object_as_section(context):
context.section = Document(test_docx("sct-section-props")).sections[-1]
@given("a Section object {with_or_without} a distinct first-page header as section")
def given_a_Section_object_with_or_without_first_page_header(context, with_or_without):
section_idx = {"with": 1, "without": 0}[with_or_without]
context.section = Document(test_docx("sct-first-page-hdrftr")).sections[section_idx]
@given('a section collection containing 3 sections')
def given_a_section_collection_containing_3_sections(context):
document = Document(test_docx('doc-access-sections'))
context.sections = document.sections
@given('a section having known page dimension')
def given_a_section_having_known_page_dimension(context):
document = Document(test_docx('sct-section-props'))
context.section = document.sections[-1]
@given('a section having known page margins')
def given_a_section_having_known_page_margins(context):
document = Document(test_docx('sct-section-props'))
context.section = document.sections[0]
@given('a section having start type {start_type}')
def given_a_section_having_start_type(context, start_type):
section_idx = {
'CONTINUOUS': 0,
'NEW_PAGE': 1,
'ODD_PAGE': 2,
'EVEN_PAGE': 3,
'NEW_COLUMN': 4,
}[start_type]
document = Document(test_docx('sct-section-props'))
context.section = document.sections[section_idx]
@given('a section known to have {orientation} orientation')
def given_a_section_having_known_orientation(context, orientation):
section_idx = {
'landscape': 0,
'portrait': 1
}[orientation]
document = Document(test_docx('sct-section-props'))
context.section = document.sections[section_idx]
# when =====================================================
@when("I assign {bool_val} to section.different_first_page_header_footer")
def when_I_assign_value_to_section_different_first_page_hdrftr(context, bool_val):
context.section.different_first_page_header_footer = eval(bool_val)
@when('I set the {margin_side} margin to {inches} inches')
def when_I_set_the_margin_side_length(context, margin_side, inches):
prop_name = {
'left': 'left_margin',
'right': 'right_margin',
'top': 'top_margin',
'bottom': 'bottom_margin',
'gutter': 'gutter',
'header': 'header_distance',
'footer': 'footer_distance',
}[margin_side]
new_value = Inches(float(inches))
setattr(context.section, prop_name, new_value)
@when('I set the section orientation to {orientation}')
def when_I_set_the_section_orientation(context, orientation):
new_orientation = {
'WD_ORIENT.PORTRAIT': WD_ORIENT.PORTRAIT,
'WD_ORIENT.LANDSCAPE': WD_ORIENT.LANDSCAPE,
'None': None,
}[orientation]
context.section.orientation = new_orientation
@when('I set the section page height to {y} inches')
def when_I_set_the_section_page_height_to_y_inches(context, y):
context.section.page_height = Inches(float(y))
@when('I set the section page width to {x} inches')
def when_I_set_the_section_page_width_to_x_inches(context, x):
context.section.page_width = Inches(float(x))
@when('I set the section start type to {start_type}')
def when_I_set_the_section_start_type_to_start_type(context, start_type):
new_start_type = {
'None': None,
'CONTINUOUS': WD_SECTION.CONTINUOUS,
'EVEN_PAGE': WD_SECTION.EVEN_PAGE,
'NEW_COLUMN': WD_SECTION.NEW_COLUMN,
'NEW_PAGE': WD_SECTION.NEW_PAGE,
'ODD_PAGE': WD_SECTION.ODD_PAGE,
}[start_type]
context.section.start_type = new_start_type
# then =====================================================
@then('I can access a section by index')
def then_I_can_access_a_section_by_index(context):
sections = context.sections
for idx in range(3):
section = sections[idx]
assert isinstance(section, Section)
@then('I can iterate over the sections')
def then_I_can_iterate_over_the_sections(context):
sections = context.sections
actual_count = 0
for section in sections:
actual_count += 1
assert isinstance(section, Section)
assert actual_count == 3
@then('len(sections) is 3')
def then_len_sections_is_3(context):
sections = context.sections
assert len(sections) == 3, (
'expected len(sections) of 3, got %s' % len(sections)
)
@then("section.different_first_page_header_footer is {bool_val}")
def then_section_different_first_page_header_footer_is(context, bool_val):
actual = context.section.different_first_page_header_footer
expected = eval(bool_val)
assert actual == expected, (
"section.different_first_page_header_footer is %s" % actual
)
@then("section.even_page_footer is a _Footer object")
def then_section_even_page_footer_is_a_Footer_object(context):
actual = type(context.section.even_page_footer).__name__
expected = "_Footer"
assert actual == expected, "section.even_page_footer is a %s object" % actual
@then("section.even_page_header is a _Header object")
def then_section_even_page_header_is_a_Header_object(context):
actual = type(context.section.even_page_header).__name__
expected = "_Header"
assert actual == expected, "section.even_page_header is a %s object" % actual
@then("section.first_page_footer is a _Footer object")
def then_section_first_page_footer_is_a_Footer_object(context):
actual = type(context.section.first_page_footer).__name__
expected = "_Footer"
assert actual == expected, "section.first_page_footer is a %s object" % actual
@then("section.first_page_header is a _Header object")
def then_section_first_page_header_is_a_Header_object(context):
actual = type(context.section.first_page_header).__name__
expected = "_Header"
assert actual == expected, "section.first_page_header is a %s object" % actual
@then("section.footer is a _Footer object")
def then_section_footer_is_a_Footer_object(context):
actual = type(context.section.footer).__name__
expected = "_Footer"
assert actual == expected, "section.footer is a %s object" % actual
@then("section.header is a _Header object")
def then_section_header_is_a_Header_object(context):
actual = type(context.section.header).__name__
expected = "_Header"
assert actual == expected, "section.header is a %s object" % actual
@then("section.{propname}.is_linked_to_previous is True")
def then_section_hdrftr_prop_is_linked_to_previous_is_True(context, propname):
actual = getattr(context.section, propname).is_linked_to_previous
expected = True
assert actual == expected, (
"section.%s.is_linked_to_previous is %s" % (propname, actual)
)
@then('the reported {margin_side} margin is {inches} inches')
def then_the_reported_margin_is_inches(context, margin_side, inches):
prop_name = {
'left': 'left_margin',
'right': 'right_margin',
'top': 'top_margin',
'bottom': 'bottom_margin',
'gutter': 'gutter',
'header': 'header_distance',
'footer': 'footer_distance',
}[margin_side]
expected_value = Inches(float(inches))
actual_value = getattr(context.section, prop_name)
assert actual_value == expected_value
@then('the reported page orientation is {orientation}')
def then_the_reported_page_orientation_is_orientation(context, orientation):
expected_value = {
'WD_ORIENT.LANDSCAPE': WD_ORIENT.LANDSCAPE,
'WD_ORIENT.PORTRAIT': WD_ORIENT.PORTRAIT,
}[orientation]
assert context.section.orientation == expected_value
@then('the reported page width is {x} inches')
def then_the_reported_page_width_is_width(context, x):
assert context.section.page_width == Inches(float(x))
@then('the reported page height is {y} inches')
def then_the_reported_page_height_is_11_inches(context, y):
assert context.section.page_height == Inches(float(y))
@then('the reported section start type is {start_type}')
def then_the_reported_section_start_type_is_type(context, start_type):
expected_start_type = {
'CONTINUOUS': WD_SECTION.CONTINUOUS,
'EVEN_PAGE': WD_SECTION.EVEN_PAGE,
'NEW_COLUMN': WD_SECTION.NEW_COLUMN,
'NEW_PAGE': WD_SECTION.NEW_PAGE,
'ODD_PAGE': WD_SECTION.ODD_PAGE,
}[start_type]
assert context.section.start_type == expected_start_type
| 34.340996
| 88
| 0.716278
| 1,197
| 8,963
| 5.015038
| 0.103592
| 0.058304
| 0.030318
| 0.018324
| 0.712477
| 0.666
| 0.53923
| 0.402299
| 0.334666
| 0.239547
| 0
| 0.003337
| 0.164119
| 8,963
| 260
| 89
| 34.473077
| 0.797918
| 0.027111
| 0
| 0.326203
| 0
| 0
| 0.255827
| 0.047652
| 0
| 0
| 0
| 0
| 0.090909
| 1
| 0.15508
| false
| 0
| 0.037433
| 0
| 0.192513
| 0.005348
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9120d4c7c58950a1c79165874f5716c1d3e76e4c
| 4,421
|
py
|
Python
|
scipy/sparse/csgraph/_laplacian.py
|
seberg/scipy
|
d8081cdd40ed8cbebd5905c0ad6c323c57d5da6e
|
[
"BSD-3-Clause"
] | 1
|
2018-10-04T15:34:14.000Z
|
2018-10-04T15:34:14.000Z
|
scipy/sparse/csgraph/_laplacian.py
|
seberg/scipy
|
d8081cdd40ed8cbebd5905c0ad6c323c57d5da6e
|
[
"BSD-3-Clause"
] | null | null | null |
scipy/sparse/csgraph/_laplacian.py
|
seberg/scipy
|
d8081cdd40ed8cbebd5905c0ad6c323c57d5da6e
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Laplacian of a compressed-sparse graph
"""
# Authors: Aric Hagberg <hagberg@lanl.gov>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Jake Vanderplas <vanderplas@astro.washington.edu>
# License: BSD
import numpy as np
from scipy.sparse import isspmatrix, coo_matrix
###############################################################################
# Graph laplacian
def laplacian(csgraph, normed=False, return_diag=False):
""" Return the Laplacian matrix of a directed graph.
For non-symmetric graphs the out-degree is used in the computation.
Parameters
----------
csgraph : array_like or sparse matrix, 2 dimensions
compressed-sparse graph, with shape (N, N).
normed : bool, optional
If True, then compute normalized Laplacian.
return_diag : bool, optional
If True, then return diagonal as well as laplacian.
Returns
-------
lap : ndarray
The N x N laplacian matrix of graph.
diag : ndarray
The length-N diagonal of the laplacian matrix.
diag is returned only if return_diag is True.
Notes
-----
The Laplacian matrix of a graph is sometimes referred to as the
"Kirchoff matrix" or the "admittance matrix", and is useful in many
parts of spectral graph theory. In particular, the eigen-decomposition
of the laplacian matrix can give insight into many properties of the graph.
For non-symmetric directed graphs, the laplacian is computed using the
out-degree of each node.
Examples
--------
>>> from scipy.sparse import csgraph
>>> G = np.arange(5) * np.arange(5)[:, np.newaxis]
>>> G
array([[ 0, 0, 0, 0, 0],
[ 0, 1, 2, 3, 4],
[ 0, 2, 4, 6, 8],
[ 0, 3, 6, 9, 12],
[ 0, 4, 8, 12, 16]])
>>> csgraph.laplacian(G, normed=False)
array([[ 0, 0, 0, 0, 0],
[ 0, 9, -2, -3, -4],
[ 0, -2, 16, -6, -8],
[ 0, -3, -6, 21, -12],
[ 0, -4, -8, -12, 24]])
"""
if csgraph.ndim != 2 or csgraph.shape[0] != csgraph.shape[1]:
raise ValueError('csgraph must be a square matrix or array')
if normed and (np.issubdtype(csgraph.dtype, np.int)
or np.issubdtype(csgraph.dtype, np.uint)):
csgraph = csgraph.astype(np.float)
if isspmatrix(csgraph):
return _laplacian_sparse(csgraph, normed=normed,
return_diag=return_diag)
else:
return _laplacian_dense(csgraph, normed=normed,
return_diag=return_diag)
def _laplacian_sparse(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
if not graph.format == 'coo':
lap = (-graph).tocoo()
else:
lap = -graph.copy()
diag_mask = (lap.row == lap.col)
if not diag_mask.sum() == n_nodes:
# The sparsity pattern of the matrix has holes on the diagonal,
# we need to fix that
diag_idx = lap.row[diag_mask]
diagonal_holes = list(set(range(n_nodes)).difference(
diag_idx))
new_data = np.concatenate([lap.data, np.ones(len(diagonal_holes))])
new_row = np.concatenate([lap.row, diagonal_holes])
new_col = np.concatenate([lap.col, diagonal_holes])
lap = coo_matrix((new_data, (new_row, new_col)), shape=lap.shape)
diag_mask = (lap.row == lap.col)
lap.data[diag_mask] = 0
w = -np.asarray(lap.sum(axis=1)).squeeze()
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap.data /= w[lap.row]
lap.data /= w[lap.col]
lap.data[diag_mask] = (1 - w_zeros[lap.row[diag_mask]]).astype(lap.data.dtype)
else:
lap.data[diag_mask] = w[lap.row[diag_mask]]
if return_diag:
return lap, w
return lap
def _laplacian_dense(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
lap = -np.asarray(graph) # minus sign leads to a copy
# set diagonal to zero
lap.flat[::n_nodes + 1] = 0
w = -lap.sum(axis=0)
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap /= w
lap /= w[:, np.newaxis]
lap.flat[::n_nodes + 1] = 1 - w_zeros
else:
lap.flat[::n_nodes + 1] = w
if return_diag:
return lap, w
return lap
| 32.507353
| 86
| 0.570007
| 605
| 4,421
| 4.072727
| 0.276033
| 0.044643
| 0.00974
| 0.00974
| 0.261364
| 0.163961
| 0.133117
| 0.092532
| 0.06737
| 0.06737
| 0
| 0.025232
| 0.291789
| 4,421
| 135
| 87
| 32.748148
| 0.761737
| 0.406922
| 0
| 0.393443
| 0
| 0
| 0.018251
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04918
| false
| 0
| 0.032787
| 0
| 0.180328
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
912495f93184573b9203df22fc8bb27548652827
| 14,605
|
py
|
Python
|
coltran/run.py
|
DionysisChristopoulos/google-research
|
7f59ef421beef32ca16c2a7215be74f7eba01a0f
|
[
"Apache-2.0"
] | 23,901
|
2018-10-04T19:48:53.000Z
|
2022-03-31T21:27:42.000Z
|
coltran/run.py
|
DionysisChristopoulos/google-research
|
7f59ef421beef32ca16c2a7215be74f7eba01a0f
|
[
"Apache-2.0"
] | 891
|
2018-11-10T06:16:13.000Z
|
2022-03-31T10:42:34.000Z
|
coltran/run.py
|
admariner/google-research
|
7cee4b22b925581d912e8d993625c180da2a5a4f
|
[
"Apache-2.0"
] | 6,047
|
2018-10-12T06:31:02.000Z
|
2022-03-31T13:59:28.000Z
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ColTran: Training and Continuous Evaluation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import os
import time
from absl import app
from absl import flags
from absl import logging
from ml_collections import config_flags
import tensorflow as tf
import tensorflow_datasets as tfds
from coltran import datasets
from coltran.models import colorizer
from coltran.models import upsampler
from coltran.utils import train_utils
# pylint: disable=g-direct-tensorflow-import
# pylint: disable=missing-docstring
# pylint: disable=not-callable
# pylint: disable=g-long-lambda
flags.DEFINE_enum('mode', 'train', [
'train', 'eval_train', 'eval_valid', 'eval_test'], 'Operation mode.')
flags.DEFINE_string('logdir', '/tmp/svt', 'Main directory for logs.')
flags.DEFINE_string('master', 'local',
'BNS name of the TensorFlow master to use.')
flags.DEFINE_enum('accelerator_type', 'GPU', ['CPU', 'GPU', 'TPU'],
'Hardware type.')
flags.DEFINE_enum('dataset', 'imagenet', ['imagenet', 'custom'], 'Dataset')
flags.DEFINE_string('data_dir', None, 'Data directory for custom images.')
flags.DEFINE_string('tpu_worker_name', 'tpu_worker', 'Name of the TPU worker.')
flags.DEFINE_string(
'pretrain_dir', None, 'Finetune from a pretrained checkpoint.')
flags.DEFINE_string('summaries_log_dir', 'summaries', 'Summaries parent.')
flags.DEFINE_integer('steps_per_summaries', 100, 'Steps per summaries.')
flags.DEFINE_integer('devices_per_worker', 1, 'Number of devices per worker.')
flags.DEFINE_integer('num_workers', 1, 'Number workers.')
config_flags.DEFINE_config_file(
'config',
default='test_configs/colorizer.py',
help_string='Training configuration file.')
FLAGS = flags.FLAGS
def restore_checkpoint(model, ema, strategy, latest_ckpt=None, optimizer=None):
if optimizer is None:
ckpt_func = functools.partial(
train_utils.create_checkpoint, models=model, ema=ema)
else:
ckpt_func = functools.partial(
train_utils.create_checkpoint, models=model, ema=ema,
optimizer=optimizer)
checkpoint = train_utils.with_strategy(ckpt_func, strategy)
if latest_ckpt:
logging.info('Restoring from pretrained directory: %s', latest_ckpt)
train_utils.with_strategy(lambda: checkpoint.restore(latest_ckpt), strategy)
return checkpoint
def is_tpu():
return FLAGS.accelerator_type == 'TPU'
def loss_on_batch(inputs, model, config, training=False):
"""Loss on a batch of inputs."""
logits, aux_output = model.get_logits(
inputs_dict=inputs, train_config=config, training=training)
loss, aux_loss_dict = model.loss(
targets=inputs, logits=logits, train_config=config, training=training,
aux_output=aux_output)
loss_factor = config.get('loss_factor', 1.0)
loss_dict = collections.OrderedDict()
loss_dict['loss'] = loss
total_loss = loss_factor * loss
for aux_key, aux_loss in aux_loss_dict.items():
aux_loss_factor = config.get(f'{aux_key}_loss_factor', 1.0)
loss_dict[aux_key] = aux_loss
total_loss += aux_loss_factor * aux_loss
loss_dict['total_loss'] = total_loss
extra_info = collections.OrderedDict([
('scalar', loss_dict),
])
return total_loss, extra_info
def train_step(config,
model,
optimizer,
metrics,
ema=None,
strategy=None):
"""Training StepFn."""
def step_fn(inputs):
"""Per-Replica StepFn."""
with tf.GradientTape() as tape:
loss, extra = loss_on_batch(inputs, model, config, training=True)
scaled_loss = loss
if strategy:
scaled_loss /= float(strategy.num_replicas_in_sync)
grads = tape.gradient(scaled_loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
for metric_key, metric in metrics.items():
metric.update_state(extra['scalar'][metric_key])
if ema is not None:
ema.apply(model.trainable_variables)
return loss
return train_utils.step_with_strategy(step_fn, strategy)
def build(config, batch_size, is_train=False):
optimizer = train_utils.build_optimizer(config)
ema_vars = []
downsample = config.get('downsample', False)
downsample_res = config.get('downsample_res', 64)
h, w = config.resolution
if config.model.name == 'coltran_core':
if downsample:
h, w = downsample_res, downsample_res
zero = tf.zeros((batch_size, h, w, 3), dtype=tf.int32)
model = colorizer.ColTranCore(config.model)
model(zero, training=is_train)
c = 1 if is_train else 3
if config.model.name == 'color_upsampler':
if downsample:
h, w = downsample_res, downsample_res
zero_slice = tf.zeros((batch_size, h, w, c), dtype=tf.int32)
zero = tf.zeros((batch_size, h, w, 3), dtype=tf.int32)
model = upsampler.ColorUpsampler(config.model)
model(zero, inputs_slice=zero_slice, training=is_train)
elif config.model.name == 'spatial_upsampler':
zero_slice = tf.zeros((batch_size, h, w, c), dtype=tf.int32)
zero = tf.zeros((batch_size, h, w, 3), dtype=tf.int32)
model = upsampler.SpatialUpsampler(config.model)
model(zero, inputs_slice=zero_slice, training=is_train)
ema_vars = model.trainable_variables
ema = train_utils.build_ema(config, ema_vars)
return model, optimizer, ema
###############################################################################
## Train.
###############################################################################
def train(logdir):
config = FLAGS.config
steps_per_write = FLAGS.steps_per_summaries
train_utils.write_config(config, logdir)
strategy, batch_size = train_utils.setup_strategy(
config, FLAGS.master,
FLAGS.devices_per_worker, FLAGS.mode, FLAGS.accelerator_type)
def input_fn(input_context=None):
read_config = None
if input_context is not None:
read_config = tfds.ReadConfig(input_context=input_context)
dataset = datasets.get_dataset(
name=FLAGS.dataset,
config=config,
batch_size=config.batch_size,
subset='train',
read_config=read_config,
data_dir=FLAGS.data_dir)
return dataset
# DATASET CREATION.
logging.info('Building dataset.')
train_dataset = train_utils.dataset_with_strategy(input_fn, strategy)
data_iterator = iter(train_dataset)
# MODEL BUILDING
logging.info('Building model.')
model, optimizer, ema = train_utils.with_strategy(
lambda: build(config, batch_size, True), strategy)
model.summary(120, print_fn=logging.info)
# METRIC CREATION.
metrics = {}
metric_keys = ['loss', 'total_loss']
metric_keys += model.metric_keys
for metric_key in metric_keys:
func = functools.partial(tf.keras.metrics.Mean, metric_key)
curr_metric = train_utils.with_strategy(func, strategy)
metrics[metric_key] = curr_metric
# CHECKPOINTING LOGIC.
if FLAGS.pretrain_dir is not None:
pretrain_ckpt = tf.train.latest_checkpoint(FLAGS.pretrain_dir)
assert pretrain_ckpt
# Load the entire model without the optimizer from the checkpoints.
restore_checkpoint(model, ema, strategy, pretrain_ckpt, optimizer=None)
# New tf.train.Checkpoint instance with a reset optimizer.
checkpoint = restore_checkpoint(
model, ema, strategy, latest_ckpt=None, optimizer=optimizer)
else:
latest_ckpt = tf.train.latest_checkpoint(logdir)
checkpoint = restore_checkpoint(
model, ema, strategy, latest_ckpt, optimizer=optimizer)
checkpoint = tf.train.CheckpointManager(
checkpoint, directory=logdir, checkpoint_name='model', max_to_keep=10)
if optimizer.iterations.numpy() == 0:
checkpoint_name = checkpoint.save()
logging.info('Saved checkpoint to %s', checkpoint_name)
train_summary_dir = os.path.join(logdir, 'train_summaries')
writer = tf.summary.create_file_writer(train_summary_dir)
start_time = time.time()
logging.info('Start Training.')
# This hack of wrapping up multiple train steps with a tf.function call
# speeds up training significantly.
# See: https://www.tensorflow.org/guide/tpu#improving_performance_by_multiple_steps_within_tffunction # pylint: disable=line-too-long
@tf.function
def train_multiple_steps(iterator, steps_per_epoch):
train_step_f = train_step(config, model, optimizer, metrics, ema,
strategy)
for _ in range(steps_per_epoch):
train_step_f(iterator)
while optimizer.iterations.numpy() < config.get('max_train_steps', 1000000):
num_train_steps = optimizer.iterations
for metric_key in metric_keys:
metrics[metric_key].reset_states()
start_run = time.time()
train_multiple_steps(data_iterator, tf.convert_to_tensor(steps_per_write))
steps_per_sec = steps_per_write / (time.time() - start_run)
with writer.as_default():
for metric_key, metric in metrics.items():
metric_np = metric.result().numpy()
tf.summary.scalar(metric_key, metric_np, step=num_train_steps)
if metric_key == 'total_loss':
logging.info('Loss: %.3f bits/dim, Speed: %.3f steps/second',
metric_np, steps_per_sec)
if time.time() - start_time > config.save_checkpoint_secs:
checkpoint_name = checkpoint.save()
logging.info('Saved checkpoint to %s', checkpoint_name)
start_time = time.time()
###############################################################################
## Evaluating.
###############################################################################
def evaluate(logdir, subset):
"""Executes the evaluation loop."""
config = FLAGS.config
strategy, batch_size = train_utils.setup_strategy(
config, FLAGS.master,
FLAGS.devices_per_worker, FLAGS.mode, FLAGS.accelerator_type)
def input_fn(_=None):
return datasets.get_dataset(
name=config.dataset,
config=config,
batch_size=config.eval_batch_size,
subset=subset)
model, optimizer, ema = train_utils.with_strategy(
lambda: build(config, batch_size, False), strategy)
metric_keys = ['loss', 'total_loss']
# metric_keys += model.metric_keys
metrics = {}
for metric_key in metric_keys:
func = functools.partial(tf.keras.metrics.Mean, metric_key)
curr_metric = train_utils.with_strategy(func, strategy)
metrics[metric_key] = curr_metric
checkpoints = train_utils.with_strategy(
lambda: train_utils.create_checkpoint(model, optimizer, ema),
strategy)
dataset = train_utils.dataset_with_strategy(input_fn, strategy)
def step_fn(batch):
_, extra = loss_on_batch(batch, model, config, training=False)
for metric_key in metric_keys:
curr_metric = metrics[metric_key]
curr_scalar = extra['scalar'][metric_key]
curr_metric.update_state(curr_scalar)
num_examples = config.eval_num_examples
eval_step = train_utils.step_with_strategy(step_fn, strategy)
ckpt_path = None
wait_max = config.get(
'eval_checkpoint_wait_secs', config.save_checkpoint_secs * 100)
is_ema = True if ema else False
eval_summary_dir = os.path.join(
logdir, 'eval_{}_summaries_pyk_{}'.format(subset, is_ema))
writer = tf.summary.create_file_writer(eval_summary_dir)
while True:
ckpt_path = train_utils.wait_for_checkpoint(logdir, ckpt_path, wait_max)
logging.info(ckpt_path)
if ckpt_path is None:
logging.info('Timed out waiting for checkpoint.')
break
train_utils.with_strategy(
lambda: train_utils.restore(model, checkpoints, logdir, ema),
strategy)
data_iterator = iter(dataset)
num_steps = num_examples // batch_size
for metric_key, metric in metrics.items():
metric.reset_states()
logging.info('Starting evaluation.')
done = False
for i in range(0, num_steps, FLAGS.steps_per_summaries):
start_run = time.time()
for k in range(min(num_steps - i, FLAGS.steps_per_summaries)):
try:
if k % 10 == 0:
logging.info('Step: %d', (i + k + 1))
eval_step(data_iterator)
except (StopIteration, tf.errors.OutOfRangeError):
done = True
break
if done:
break
bits_per_dim = metrics['loss'].result()
logging.info('Bits/Dim: %.3f, Speed: %.3f seconds/step, Step: %d/%d',
bits_per_dim,
(time.time() - start_run) / FLAGS.steps_per_summaries,
i + k + 1, num_steps)
# logging.info('Final Bits/Dim: %.3f', bits_per_dim)
with writer.as_default():
for metric_key, metric in metrics.items():
curr_scalar = metric.result().numpy()
tf.summary.scalar(metric_key, curr_scalar, step=optimizer.iterations)
def main(_):
logging.info('Logging to %s.', FLAGS.logdir)
if FLAGS.mode == 'train':
logging.info('[main] I am the trainer.')
try:
train(FLAGS.logdir)
# During TPU Preemeption, the coordinator hangs with the error below.
# the exception forces the coordinator to fail, and it will be restarted.
except (tf.errors.UnavailableError, tf.errors.CancelledError):
os._exit(os.EX_TEMPFAIL) # pylint: disable=protected-access
elif FLAGS.mode.startswith('train'):
logging.info('[main] I am the trainer.')
train(os.path.join(FLAGS.logdir, FLAGS.mode))
elif FLAGS.mode == 'eval_train':
logging.info('[main] I am the training set evaluator.')
evaluate(FLAGS.logdir, subset='train')
elif FLAGS.mode == 'eval_valid':
logging.info('[main] I am the validation set evaluator.')
evaluate(FLAGS.logdir, subset='valid')
elif FLAGS.mode == 'eval_test':
logging.info('[main] I am the test set evaluator.')
evaluate(FLAGS.logdir, subset='test')
else:
raise ValueError(
'Unknown mode {}. '
'Must be one of [train, eval_train, eval_valid, eval_test]'.format(
FLAGS.mode))
if __name__ == '__main__':
app.run(main)
| 34.940191
| 135
| 0.692503
| 1,912
| 14,605
| 5.065377
| 0.190377
| 0.023748
| 0.011564
| 0.018172
| 0.317914
| 0.299226
| 0.2508
| 0.218379
| 0.179866
| 0.14951
| 0
| 0.005181
| 0.180692
| 14,605
| 417
| 136
| 35.023981
| 0.804195
| 0.106676
| 0
| 0.228956
| 0
| 0.003367
| 0.114146
| 0.007499
| 0
| 0
| 0
| 0
| 0.003367
| 1
| 0.043771
| false
| 0
| 0.057239
| 0.006734
| 0.127946
| 0.006734
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9125319261fb94bc69a897401585fdd40320b1d2
| 25,070
|
py
|
Python
|
train_multi_human.py
|
wenliangdai/sunets-reproduce
|
d92efa80e8314aea153d498cce3c9c6e30c252bd
|
[
"MIT"
] | 2
|
2018-07-02T16:03:07.000Z
|
2018-07-02T16:03:07.000Z
|
train_multi_human.py
|
wenliangdai/sunets-reproduce
|
d92efa80e8314aea153d498cce3c9c6e30c252bd
|
[
"MIT"
] | null | null | null |
train_multi_human.py
|
wenliangdai/sunets-reproduce
|
d92efa80e8314aea153d498cce3c9c6e30c252bd
|
[
"MIT"
] | null | null | null |
import argparse
import math
import os
import pickle
import random
import sys
import numpy as np
import torch
import torch.backends.cudnn as cudnn
from torch import nn
from torch.optim import lr_scheduler
from torch.utils import data
import torchvision.transforms as transforms
import transforms as extended_transforms
from loss import prediction_stat
from main import get_data_path
from main.loader import get_loader
from main.models import get_model
from utils import dotdict, float2str
# paths
ROOT = '/home/wenlidai/sunets-reproduce/'
RESULT = 'results'
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def main(args):
print('='*10, 'Starting', '='*10, '\n')
print(device)
# Set the seed for reproducing the results
random.seed(args.manual_seed)
np.random.seed(args.manual_seed)
torch.manual_seed(args.manual_seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(args.manual_seed)
cudnn.benchmark = True
# Set up results folder
if not os.path.exists(os.path.join(ROOT, RESULT, 'saved_val_images')):
os.makedirs(os.path.join(ROOT, RESULT, 'saved_val_images'))
if not os.path.exists(os.path.join(ROOT, RESULT, 'saved_train_images')):
os.makedirs(os.path.join(ROOT, RESULT, 'saved_train_images'))
# Setup Dataloader
data_loader = get_loader(args.dataset)
input_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
target_transform = extended_transforms.MaskToTensor()
traindata = data_loader('train', n_classes=args.n_classes, transform=input_transform, target_transform=target_transform, do_transform=True)
trainloader = data.DataLoader(traindata, batch_size=args.batch_size, num_workers=2, shuffle=True)
valdata = data_loader('val', n_classes=args.n_classes, transform=input_transform, target_transform=target_transform)
valloader = data.DataLoader(valdata, batch_size=args.batch_size, num_workers=2, shuffle=False)
n_classes = traindata.n_classes
n_trainsamples = len(traindata)
n_iters_per_epoch = np.ceil(n_trainsamples / float(args.batch_size * args.iter_size))
# Setup Model
model = get_model(
name=args.arch,
n_classes=n_classes,
ignore_index=traindata.ignore_index,
output_stride=args.output_stride,
pretrained=args.pretrained,
momentum_bn=args.momentum_bn,
dprob=args.dprob
).to(device)
epochs_done=0
X=[]
Y1=[]
Y1_test=[]
Y2=[]
Y2_test=[]
avg_pixel_acc = 0
mean_class_acc = 0
mIoU = 0
avg_pixel_acc_test = 0
mean_class_acc_test = 0
mIoU_test = 0
best_mIoU = 0
best_epoch = 0
if args.model_path:
model_name = args.model_path.split('.')
checkpoint_name = model_name[0] + '_optimizer.pkl'
checkpoint = torch.load(os.path.join(ROOT, RESULT, checkpoint_name))
optm = checkpoint['optimizer']
model.load_state_dict(checkpoint['state_dict'])
split_str = model_name[0].split('_')
epochs_done = int(split_str[-1])
saved_loss = pickle.load( open(os.path.join(ROOT, RESULT, "saved_loss.p"), "rb") )
saved_accuracy = pickle.load( open(os.path.join(ROOT, RESULT, "saved_accuracy.p"), "rb") )
X=saved_loss["X"][:epochs_done]
Y=saved_loss["Y"][:epochs_done]
Y_test=saved_loss["Y_test"][:epochs_done]
avg_pixel_acc = saved_accuracy["P"][:epochs_done,:]
mean_class_acc = saved_accuracy["M"][:epochs_done,:]
mIoU = saved_accuracy["I"][:epochs_done,:]
avg_pixel_acc_test = saved_accuracy["P_test"][:epochs_done,:]
mean_class_acc_test = saved_accuracy["M_test"][:epochs_done,:]
mIoU_test = saved_accuracy["I_test"][:epochs_done,:]
if args.best_model_path:
best_model_name = args.best_model_path.split('_')
best_mIoU = float(best_model_name[-2])
best_epoch = int(best_model_name[-3])
# Learning rates: For new layers (such as final layer), we set lr to be 10x the learning rate of layers already trained
bias_10x_params = filter(lambda x: ('bias' in x[0]) and ('final' in x[0]) and ('conv' in x[0]),
model.named_parameters())
bias_10x_params = list(map(lambda x: x[1], bias_10x_params))
bias_params = filter(lambda x: ('bias' in x[0]) and ('final' not in x[0]),
model.named_parameters())
bias_params = list(map(lambda x: x[1], bias_params))
nonbias_10x_params = filter(lambda x: (('bias' not in x[0]) or ('bn' in x[0])) and ('final' in x[0]),
model.named_parameters())
nonbias_10x_params = list(map(lambda x: x[1], nonbias_10x_params))
nonbias_params = filter(lambda x: ('bias' not in x[0]) and ('final' not in x[0]),
model.named_parameters())
nonbias_params = list(map(lambda x: x[1], nonbias_params))
optimizer = torch.optim.SGD([{'params': bias_params, 'lr': args.lr},
{'params': bias_10x_params, 'lr': 20 * args.lr if args.pretrained else args.lr},
{'params': nonbias_10x_params, 'lr': 10 * args.lr if args.pretrained else args.lr},
{'params': nonbias_params, 'lr': args.lr},],
lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay,
nesterov=(args.optim == 'Nesterov'))
num_param_groups = 4
# optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
# Setting up scheduler
if args.model_path and args.restore:
# Here we restore all states of optimizer
optimizer.load_state_dict(optm)
total_iters = n_iters_per_epoch * args.epochs
lambda1 = lambda step: 0.5 + 0.5 * math.cos(np.pi * step / total_iters)
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=[lambda1]*num_param_groups, last_epoch=epochs_done*n_iters_per_epoch)
# scheduler = lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.1, last_epoch=epochs_done)
else:
# scheduler = lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.1)
# Here we simply restart the training
# if args.T0:
# total_iters = args.T0 * n_iters_per_epoch
# else:
total_iters = ((args.epochs - epochs_done) * n_iters_per_epoch)
lambda1 = lambda step: 0.5 + 0.5 * math.cos(np.pi * step / total_iters)
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=[lambda1]*num_param_groups)
global l_avg, totalclasswise_pixel_acc, totalclasswise_gtpixels, totalclasswise_predpixels
global l_avg_test, totalclasswise_pixel_acc_test, totalclasswise_gtpixels_test, totalclasswise_predpixels_test
global steps, steps_test
criterion_sbd = nn.CrossEntropyLoss(size_average=False, ignore_index=traindata.ignore_index)
criterion_lip = nn.CrossEntropyLoss(size_average=False, ignore_index=traindata.ignore_index)
criterions = [criterion_sbd, criterion_lip]
for epoch in range(epochs_done, args.epochs):
print('='*10, 'Epoch %d' % (epoch + 1), '='*10)
l_avg = [0, 0]
totalclasswise_pixel_acc = [0, 0]
totalclasswise_gtpixels = [0, 0]
totalclasswise_predpixels = [0, 0]
l_avg_test = [0, 0]
totalclasswise_pixel_acc_test = [0, 0]
totalclasswise_gtpixels_test = [0, 0]
totalclasswise_predpixels_test = [0, 0]
steps = [0, 0]
steps_test = [0, 0]
# scheduler.step()
train(model, optimizer, criterions, trainloader, epoch, scheduler, traindata)
val(model, criterions, valloader, epoch, valdata)
# save the model every 5 epochs
if (epoch + 1) % 5 == 0 or epoch == args.epochs - 1:
if (epoch + 1) > 5:
os.remove(os.path.join(ROOT, RESULT, "{}_{}_{}.pkl".format(args.arch, args.dataset, epoch - 4)))
os.remove(os.path.join(ROOT, RESULT, "{}_{}_{}_optimizer.pkl".format(args.arch, args.dataset, epoch - 4)))
torch.save(model, os.path.join(ROOT, RESULT, "{}_{}_{}.pkl".format(args.arch, args.dataset, epoch + 1)))
torch.save({'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict()},
os.path.join(ROOT, RESULT, "{}_{}_{}_optimizer.pkl".format(args.arch, args.dataset, epoch + 1)))
# remove old loss & accuracy files
if os.path.isfile(os.path.join(ROOT, RESULT, "saved_loss.p")):
os.remove(os.path.join(ROOT, RESULT, "saved_loss.p"))
if os.path.isfile(os.path.join(ROOT, RESULT, "saved_accuracy.p")):
os.remove(os.path.join(ROOT, RESULT, "saved_accuracy.p"))
# save train and validation loss
X.append(epoch + 1)
Y1.append(l_avg[0] / steps[0])
Y1_test.append(l_avg_test[0] / steps_test[0])
Y2.append(l_avg[1] / steps[1])
Y2_test.append(l_avg_test[1] / steps_test[1])
saved_loss={"X": X, "Y1": Y1, "Y2": Y2, "Y1_test": Y1_test, "Y2_test": Y2_test}
pickle.dump(saved_loss, open(os.path.join(ROOT, RESULT, "saved_loss.p"), "wb"))
# pixel accuracy
totalclasswise_pixel_acc[0] = totalclasswise_pixel_acc[0].reshape((-1, n_classes[0])).astype(np.float32)
totalclasswise_gtpixels[0] = totalclasswise_gtpixels[0].reshape((-1, n_classes[0]))
totalclasswise_predpixels[0] = totalclasswise_predpixels[0].reshape((-1, n_classes[0]))
totalclasswise_pixel_acc_test[0] = totalclasswise_pixel_acc_test[0].reshape((-1, n_classes[0])).astype(np.float32)
totalclasswise_gtpixels_test[0] = totalclasswise_gtpixels_test[0].reshape((-1, n_classes[0]))
totalclasswise_predpixels_test[0] = totalclasswise_predpixels_test[0].reshape((-1, n_classes[0]))
totalclasswise_pixel_acc[1] = totalclasswise_pixel_acc[1].reshape((-1, n_classes[1])).astype(np.float32)
totalclasswise_gtpixels[1] = totalclasswise_gtpixels[1].reshape((-1, n_classes[1]))
totalclasswise_predpixels[1] = totalclasswise_predpixels[1].reshape((-1, n_classes[1]))
totalclasswise_pixel_acc_test[1] = totalclasswise_pixel_acc_test[1].reshape((-1, n_classes[1])).astype(np.float32)
totalclasswise_gtpixels_test[1] = totalclasswise_gtpixels_test[1].reshape((-1, n_classes[1]))
totalclasswise_predpixels_test[1] = totalclasswise_predpixels_test[1].reshape((-1, n_classes[1]))
if isinstance(avg_pixel_acc, list):
avg_pixel_acc[0] = np.vstack((avg_pixel_acc[0], np.sum(totalclasswise_pixel_acc[0], axis=1) / np.sum(totalclasswise_gtpixels[0], axis=1)))
mean_class_acc[0] = np.vstack((mean_class_acc[0], np.mean(totalclasswise_pixel_acc[0] / totalclasswise_gtpixels[0], axis=1)))
mIoU[0] = np.vstack((mIoU[0], np.mean(totalclasswise_pixel_acc[0] / (totalclasswise_gtpixels[0] + totalclasswise_predpixels[0] - totalclasswise_pixel_acc[0]), axis=1)))
avg_pixel_acc[1] = np.vstack((avg_pixel_acc[1], np.sum(totalclasswise_pixel_acc[1], axis=1) / np.sum(totalclasswise_gtpixels[1], axis=1)))
mean_class_acc[1] = np.vstack((mean_class_acc[1], np.mean(totalclasswise_pixel_acc[1] / totalclasswise_gtpixels[1], axis=1)))
mIoU[1] = np.vstack((mIoU[1], np.mean(totalclasswise_pixel_acc[1] / (totalclasswise_gtpixels[1] + totalclasswise_predpixels[1] - totalclasswise_pixel_acc[1]), axis=1)))
avg_pixel_acc_test[0] = np.vstack((avg_pixel_acc_test[0], np.sum(totalclasswise_pixel_acc_test[0],axis=1) / np.sum(totalclasswise_gtpixels_test[0], axis=1)))
mean_class_acc_test[0] = np.vstack((mean_class_acc_test[0], np.mean(totalclasswise_pixel_acc_test[0] / totalclasswise_gtpixels_test[0], axis=1)))
mIoU_test[0] = np.vstack((mIoU_test[0], np.mean(totalclasswise_pixel_acc_test[0] / (totalclasswise_gtpixels_test[0] + totalclasswise_predpixels_test[0] - totalclasswise_pixel_acc_test[0]), axis=1)))
avg_pixel_acc_test[1] = np.vstack((avg_pixel_acc_test[1], np.sum(totalclasswise_pixel_acc_test[1],axis=1) / np.sum(totalclasswise_gtpixels_test[1], axis=1)))
mean_class_acc_test[1] = np.vstack((mean_class_acc_test[1], np.mean(totalclasswise_pixel_acc_test[1] / totalclasswise_gtpixels_test[1], axis=1)))
mIoU_test[1] = np.vstack((mIoU_test[1], np.mean(totalclasswise_pixel_acc_test[1] / (totalclasswise_gtpixels_test[1] + totalclasswise_predpixels_test[1] - totalclasswise_pixel_acc_test[1]), axis=1)))
else:
avg_pixel_acc = []
mean_class_acc = []
mIoU = []
avg_pixel_acc.append( np.sum(totalclasswise_pixel_acc[0], axis=1) / np.sum(totalclasswise_gtpixels[0], axis=1) )
mean_class_acc.append( np.mean(totalclasswise_pixel_acc[0] / totalclasswise_gtpixels[0], axis=1) )
mIoU.append( np.mean(totalclasswise_pixel_acc[0] / (totalclasswise_gtpixels[0] + totalclasswise_predpixels[0] - totalclasswise_pixel_acc[0]), axis=1) )
avg_pixel_acc.append( np.sum(totalclasswise_pixel_acc[1], axis=1) / np.sum(totalclasswise_gtpixels[1], axis=1) )
mean_class_acc.append( np.mean(totalclasswise_pixel_acc[1] / totalclasswise_gtpixels[1], axis=1) )
mIoU.append( np.mean(totalclasswise_pixel_acc[1] / (totalclasswise_gtpixels[1] + totalclasswise_predpixels[1] - totalclasswise_pixel_acc[1]), axis=1) )
avg_pixel_acc_test = []
mean_class_acc_test = []
mIoU_test = []
avg_pixel_acc_test.append( np.sum(totalclasswise_pixel_acc_test[0], axis=1) / np.sum(totalclasswise_gtpixels_test[0], axis=1) )
mean_class_acc_test.append( np.mean(totalclasswise_pixel_acc_test[0] / totalclasswise_gtpixels_test[0], axis=1) )
mIoU_test.append( np.mean(totalclasswise_pixel_acc_test[0] / (totalclasswise_gtpixels_test[0] + totalclasswise_predpixels_test[0] - totalclasswise_pixel_acc_test[0]), axis=1) )
avg_pixel_acc_test.append( np.sum(totalclasswise_pixel_acc_test[1], axis=1) / np.sum(totalclasswise_gtpixels_test[1], axis=1) )
mean_class_acc_test.append( np.mean(totalclasswise_pixel_acc_test[1] / totalclasswise_gtpixels_test[1], axis=1) )
mIoU_test.append( np.mean(totalclasswise_pixel_acc_test[1] / (totalclasswise_gtpixels_test[1] + totalclasswise_predpixels_test[1] - totalclasswise_pixel_acc_test[1]), axis=1) )
saved_accuracy = {
"X": X,
"P1": avg_pixel_acc[0], "P2": avg_pixel_acc[1],
"M1": mean_class_acc[0], "M2": mean_class_acc[1],
"I1": mIoU[0], "I2": mIoU[1],
"P1_test": avg_pixel_acc_test[0], "P2_test": avg_pixel_acc_test[1],
"M1_test": mean_class_acc_test[0], "M2_test": mean_class_acc_test[1],
"I1_test": mIoU_test[0], "I2_test": mIoU_test[1]
}
pickle.dump(saved_accuracy, open(os.path.join(ROOT, RESULT, "saved_accuracy.p"), "wb"))
# print validation mIoU of both tasks
this_mIoU1 = np.mean(totalclasswise_pixel_acc_test[0] / (totalclasswise_gtpixels_test[0] + totalclasswise_predpixels_test[0] - totalclasswise_pixel_acc_test[0]), axis=1)[0]
this_mIoU2 = np.mean(totalclasswise_pixel_acc_test[1] / (totalclasswise_gtpixels_test[1] + totalclasswise_predpixels_test[1] - totalclasswise_pixel_acc_test[1]), axis=1)[0]
print('Val: mIoU_sbd = {}, mIoU_lip = {}'.format(this_mIoU1, this_mIoU2))
def train(model, optimizer, criterions, trainloader, epoch, scheduler, data):
global l_avg, totalclasswise_pixel_acc, totalclasswise_gtpixels, totalclasswise_predpixels
global steps
model.train()
for i, (images, sbd_labels, lip_labels) in enumerate(trainloader):
sbd_valid_pixel = float( (sbd_labels.data != criterions[0].ignore_index).long().sum() )
lip_valid_pixel = float( (lip_labels.data != criterions[1].ignore_index).long().sum() )
images = images.to(device)
sbd_labels = sbd_labels.to(device)
lip_labels = lip_labels.to(device)
sbd_outputs, lip_outputs = model(images, task=2)
sbd_loss = criterions[0](sbd_outputs, sbd_labels)
classwise_pixel_acc, classwise_gtpixels, classwise_predpixels = prediction_stat([sbd_outputs], sbd_labels, data.n_classes[0])
classwise_pixel_acc = torch.FloatTensor([classwise_pixel_acc])
classwise_gtpixels = torch.FloatTensor([classwise_gtpixels])
classwise_predpixels = torch.FloatTensor([classwise_predpixels])
totalclasswise_pixel_acc[0] += classwise_pixel_acc.sum(0).data.numpy()
totalclasswise_gtpixels[0] += classwise_gtpixels.sum(0).data.numpy()
totalclasswise_predpixels[0] += classwise_predpixels.sum(0).data.numpy()
sbd_total_loss = sbd_loss.sum()
sbd_total_loss = sbd_total_loss / float(sbd_valid_pixel)
sbd_total_loss.backward(retain_graph=True)
lip_loss = criterions[1](lip_outputs, lip_labels)
classwise_pixel_acc, classwise_gtpixels, classwise_predpixels = prediction_stat([lip_outputs], lip_labels, data.n_classes[1])
classwise_pixel_acc = torch.FloatTensor([classwise_pixel_acc])
classwise_gtpixels = torch.FloatTensor([classwise_gtpixels])
classwise_predpixels = torch.FloatTensor([classwise_predpixels])
totalclasswise_pixel_acc[1] += classwise_pixel_acc.sum(0).data.numpy()
totalclasswise_gtpixels[1] += classwise_gtpixels.sum(0).data.numpy()
totalclasswise_predpixels[1] += classwise_predpixels.sum(0).data.numpy()
lip_total_loss = lip_loss.sum()
lip_total_loss = lip_total_loss / float(lip_valid_pixel)
lip_total_loss.backward()
l_avg[0] += sbd_loss.sum().data.cpu().numpy()
steps[0] += sbd_valid_pixel
l_avg[1] += lip_loss.sum().data.cpu().numpy()
steps[1] += lip_valid_pixel
optimizer.step()
optimizer.zero_grad()
scheduler.step()
# if (i + 1) % args.log_size == 0:
# pickle.dump(images[0].cpu().numpy(),
# open(os.path.join(ROOT, RESULT, "saved_train_images/" + str(epoch) + "_" + str(i) + "_input.p"), "wb"))
# pickle.dump(np.transpose(data.decode_segmap(outputs[0].data.cpu().numpy().argmax(0)), [2, 0, 1]),
# open(os.path.join(ROOT, RESULT, "saved_train_images/" + str(epoch) + "_" + str(i) + "_output.p"), "wb"))
# pickle.dump(np.transpose(data.decode_segmap(labels[0].cpu().numpy()), [2, 0, 1]),
# open(os.path.join(ROOT, RESULT, "saved_train_images/" + str(epoch) + "_" + str(i) + "_target.p"), "wb"))
def val(model, criterions, valloader, epoch, data):
global l_avg_test, totalclasswise_pixel_acc_test, totalclasswise_gtpixels_test, totalclasswise_predpixels_test
global steps_test
model.eval()
for i, (images, sbd_labels, lip_labels) in enumerate(valloader):
sbd_valid_pixel = float( (sbd_labels.data != criterions[0].ignore_index).long().sum() )
lip_valid_pixel = float( (lip_labels.data != criterions[1].ignore_index).long().sum() )
images = images.to(device)
sbd_labels = sbd_labels.to(device)
lip_labels = lip_labels.to(device)
with torch.no_grad():
sbd_outputs, lip_outputs = model(images, task=2)
sbd_loss = criterions[0](sbd_outputs, sbd_labels)
lip_loss = criterions[1](lip_outputs, lip_labels)
classwise_pixel_acc, classwise_gtpixels, classwise_predpixels = prediction_stat([sbd_outputs], sbd_labels, data.n_classes[0])
classwise_pixel_acc = torch.FloatTensor([classwise_pixel_acc])
classwise_gtpixels = torch.FloatTensor([classwise_gtpixels])
classwise_predpixels = torch.FloatTensor([classwise_predpixels])
totalclasswise_pixel_acc_test[0] += classwise_pixel_acc.sum(0).data.numpy()
totalclasswise_gtpixels_test[0] += classwise_gtpixels.sum(0).data.numpy()
totalclasswise_predpixels_test[0] += classwise_predpixels.sum(0).data.numpy()
classwise_pixel_acc, classwise_gtpixels, classwise_predpixels = prediction_stat([lip_outputs], lip_labels, data.n_classes[1])
classwise_pixel_acc = torch.FloatTensor([classwise_pixel_acc])
classwise_gtpixels = torch.FloatTensor([classwise_gtpixels])
classwise_predpixels = torch.FloatTensor([classwise_predpixels])
totalclasswise_pixel_acc_test[1] += classwise_pixel_acc.sum(0).data.numpy()
totalclasswise_gtpixels_test[1] += classwise_gtpixels.sum(0).data.numpy()
totalclasswise_predpixels_test[1] += classwise_predpixels.sum(0).data.numpy()
l_avg_test[0] += sbd_loss.sum().data.cpu().numpy()
steps_test[0] += sbd_valid_pixel
l_avg_test[1] += lip_loss.sum().data.cpu().numpy()
steps_test[1] += lip_valid_pixel
# if (i + 1) % 800 == 0:
# pickle.dump(images[0].cpu().numpy(),
# open(os.path.join(ROOT, RESULT, "saved_val_images/" + str(epoch) + "_" + str(i) + "_input.p"), "wb"))
# pickle.dump(np.transpose(data.decode_segmap(outputs[0].data.cpu().numpy().argmax(0)), [2, 0, 1]),
# open(os.path.join(ROOT, RESULT, "saved_val_images/" + str(epoch) + "_" + str(i) + "_output.p"), "wb"))
# pickle.dump(np.transpose(data.decode_segmap(labels[0].cpu().numpy()), [2, 0, 1]),
# open(os.path.join(ROOT, RESULT, "saved_val_images/" + str(epoch) + "_" + str(i) + "_target.p"), "wb"))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Hyperparams')
parser.add_argument('--arch', nargs='?', type=str, default='sunet64_multi',
help='Architecture to use [\'sunet64, sunet128, sunet7128 etc\']')
parser.add_argument('--model_path', help='Path to the saved model', type=str)
parser.add_argument('--best_model_path', help='Path to the saved best model', type=str)
parser.add_argument('--dataset', nargs='?', type=str, default='human',
help='Dataset to use [\'sbd, coco, cityscapes etc\']')
parser.add_argument('--img_rows', nargs='?', type=int, default=512,
help='Height of the input image')
parser.add_argument('--img_cols', nargs='?', type=int, default=512,
help='Width of the input image')
parser.add_argument('--epochs', nargs='?', type=int, default=90,
help='# of the epochs')
parser.add_argument('--batch_size', nargs='?', type=int, default=10,
help='Batch Size')
parser.add_argument('--lr', nargs='?', type=float, default=0.0005,
help='Learning Rate')
parser.add_argument('--manual_seed', default=0, type=int,
help='manual seed')
parser.add_argument('--iter_size', type=int, default=1,
help='number of batches per weight updates')
parser.add_argument('--log_size', type=int, default=400,
help='iteration period of logging segmented images')
parser.add_argument('--dprob', nargs='?', type=float, default=1e-7,
help='Dropout probability')
parser.add_argument('--momentum', nargs='?', type=float, default=0.95,
help='Momentum for SGD')
parser.add_argument('--momentum_bn', nargs='?', type=float, default=0.01,
help='Momentum for BN')
parser.add_argument('--weight_decay', nargs='?', type=float, default=1e-4,
help='Weight decay')
parser.add_argument('--output_stride', nargs='?', type=str, default='16',
help='Output stride to use [\'32, 16, 8 etc\']')
parser.add_argument('--freeze', action='store_true',
help='Freeze BN params')
parser.add_argument('--restore', action='store_true',
help='Restore Optimizer params')
parser.add_argument('--epoch_log_size', nargs='?', type=str, default=20,
help='Every [epoch_log_size] iterations to print loss in each epoch')
parser.add_argument('--pretrained', action='store_true',
help='Use pretrained ImageNet initialization or not')
parser.add_argument('--n_classes', nargs='?', type=int, action='append',
help='number of classes of the labels')
parser.add_argument('--optim', nargs='?', type=str, default='SGD',
help='Optimizer to use [\'SGD, Nesterov etc\']')
global args
args = parser.parse_args()
RESULT = '{}_{}_{}'.format(RESULT, args.arch, args.dataset)
if args.pretrained:
RESULT = RESULT + '_pretrained'
main(args)
| 55.835189
| 210
| 0.657359
| 3,311
| 25,070
| 4.704017
| 0.098762
| 0.047769
| 0.076276
| 0.048411
| 0.657785
| 0.608411
| 0.574767
| 0.538299
| 0.506196
| 0.451043
| 0
| 0.025395
| 0.206781
| 25,070
| 448
| 211
| 55.959821
| 0.75782
| 0.081611
| 0
| 0.129032
| 0
| 0
| 0.065947
| 0.003306
| 0
| 0
| 0
| 0
| 0
| 1
| 0.008798
| false
| 0
| 0.055718
| 0
| 0.064516
| 0.014663
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9125a2258a5cbeeafce52644773c51a924d107ac
| 392
|
py
|
Python
|
exemplos/exemplo-aula-14-01.py
|
quitaiskiluisf/TI4F-2021-LogicaProgramacao
|
d12e5c389a43c98f27726df5618fe529183329a8
|
[
"Unlicense"
] | null | null | null |
exemplos/exemplo-aula-14-01.py
|
quitaiskiluisf/TI4F-2021-LogicaProgramacao
|
d12e5c389a43c98f27726df5618fe529183329a8
|
[
"Unlicense"
] | null | null | null |
exemplos/exemplo-aula-14-01.py
|
quitaiskiluisf/TI4F-2021-LogicaProgramacao
|
d12e5c389a43c98f27726df5618fe529183329a8
|
[
"Unlicense"
] | null | null | null |
# Apresentação
print('Programa para somar 8 valores utilizando vetores/listas')
print()
# Declaração do vetor
valores = [0, 0, 0, 0, 0, 0, 0, 0]
# Solicita os valores
for i in range(len(valores)):
valores[i] = int(input('Informe o valor: '))
# Cálculo da soma
soma = 0
for i in range(len(valores)):
soma += valores[i]
# Apresenta o resultado
print(f'A soma dos valores é {soma}')
| 20.631579
| 64
| 0.67602
| 64
| 392
| 4.140625
| 0.546875
| 0.05283
| 0.067925
| 0.075472
| 0.188679
| 0.188679
| 0.030189
| 0
| 0
| 0
| 0
| 0.031447
| 0.188776
| 392
| 18
| 65
| 21.777778
| 0.801887
| 0.229592
| 0
| 0.222222
| 0
| 0
| 0.334459
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.333333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9125c9f61c337477b68228ea1ba426e48ce06b1a
| 333
|
py
|
Python
|
day3/p1.py
|
pwicks86/adventofcode2015
|
fba7cc8f6942f43f5b0226a0ac70365630f14cbd
|
[
"MIT"
] | null | null | null |
day3/p1.py
|
pwicks86/adventofcode2015
|
fba7cc8f6942f43f5b0226a0ac70365630f14cbd
|
[
"MIT"
] | null | null | null |
day3/p1.py
|
pwicks86/adventofcode2015
|
fba7cc8f6942f43f5b0226a0ac70365630f14cbd
|
[
"MIT"
] | null | null | null |
from collections import defaultdict
f = open("input.txt")
d = f.read()
houses = defaultdict(int,{(0,0):1})
cur = [0,0]
for c in d:
if c == "<":
cur[0] -= 1
if c == ">":
cur[0] += 1
if c == "v":
cur[1] += 1
if c == "^":
cur[1] -= 1
houses[tuple(cur)]+=1
print(len(houses.keys()))
| 18.5
| 35
| 0.456456
| 53
| 333
| 2.867925
| 0.45283
| 0.078947
| 0.118421
| 0.092105
| 0.125
| 0.125
| 0.125
| 0
| 0
| 0
| 0
| 0.061135
| 0.312312
| 333
| 17
| 36
| 19.588235
| 0.60262
| 0
| 0
| 0
| 0
| 0
| 0.039039
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.0625
| 0
| 0.0625
| 0.0625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91266dc2fa03da47339e3882e71342b1ee45462b
| 2,326
|
py
|
Python
|
pbr/config/blend_config.py
|
NUbots/NUpbr
|
49b0d2abd15512a93bfe21157269288c9ec4c54d
|
[
"MIT"
] | 1
|
2019-03-25T04:37:06.000Z
|
2019-03-25T04:37:06.000Z
|
pbr/config/blend_config.py
|
NUbots/NUpbr
|
49b0d2abd15512a93bfe21157269288c9ec4c54d
|
[
"MIT"
] | 3
|
2020-07-24T11:55:48.000Z
|
2022-02-20T20:49:17.000Z
|
pbr/config/blend_config.py
|
NUbots/NUpbr
|
49b0d2abd15512a93bfe21157269288c9ec4c54d
|
[
"MIT"
] | null | null | null |
# Blender-specific Configuration Settings
from math import pi
render = {
"render_engine": "CYCLES",
"render": {"cycles_device": "GPU"},
"dimensions": {"resolution": [1280, 1024], "percentage": 100.0},
"sampling": {"cycles_samples": 256, "cycles_preview_samples": 16},
"light_paths": {
"transparency": {"max_bounces": 1, "min_bounces": 1},
"bounces": {"max_bounces": 1, "min_bounces": 1},
"diffuse": 1,
"glossy": 1,
"transmission": 1,
"volume": 0,
"reflective_caustics": False,
"refractive_caustics": False,
},
"performance": {
"render_tile": [512, 512],
"threads": {"mode": "FIXED", "num_threads": 8},
},
"layers": {"use_hair": False},
}
scene = {"units": {"length_units": "METRIC", "rotation_units": "DEGREES"}}
layers = {"denoising": {"use_denoising": False}}
field = {
"material": {
"mapping": {
"translation": (0.0, 0.05, 0.0),
"rotation": (0.0, -pi / 2.0, 0.0),
"scale": (1.0, 0.6, 1.0),
},
"mix_lower_grass": {
"inp1": (0.000, 0.012, 0.00076, 1.0),
"inp2": (0.020, 0.011, 0.0, 1.0),
},
"mix_upper_grass": {
"inp1": (0.247, 0.549, 0.0, 1),
"inp2": (0.257, 0.272, 0.0, 1),
},
"noise": {"inp": [5.0, 2.0, 0.0]},
"hsv": {"inp": [0.0, 0.0, 1.9, 1.0]},
"mix_up_grass_hsv": {"inp0": 0.455},
"mix_low_grass_field_lines": {"inp0": 0.4},
"mix_grass": {"inp0": 0.391},
"principled": {"specular": 0.225, "roughness": 0.625},
},
"lower_plane": {
"colour": (0.003, 0.04, 0.0, 1.0),
"principled": {"specular": 0.225, "roughness": 1.0},
"mapping": {"scale": (0.1, 0.1, 1.0)},
},
}
ball = {
"initial_cond": {"segments": 16, "ring_count": 10, "calc_uvs": True},
"material": {"metallic": 0.0, "roughness": 0.35},
"subsurf_mod": {"levels": 1, "rend_levels": 4},
}
goal = {
"initial_cond": {"vertices": 32, "calc_uvs": True},
"corner_curve": {"fill": "FULL"},
"material": {"metallic": 0.0, "roughness": 0.35, "colour": (0.8, 0.8, 0.8, 1.0)},
"subsurf_mod": {"levels": 1, "rend_levels": 4},
}
robot = {"material": {"specular": 0.742, "metallic": 0.0, "roughness": 0.9}}
| 31.432432
| 85
| 0.503439
| 296
| 2,326
| 3.817568
| 0.429054
| 0.033628
| 0.013274
| 0.050442
| 0.214159
| 0.141593
| 0.102655
| 0
| 0
| 0
| 0
| 0.115362
| 0.258383
| 2,326
| 73
| 86
| 31.863014
| 0.53971
| 0.016767
| 0
| 0.03125
| 0
| 0
| 0.377243
| 0.020569
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.015625
| 0
| 0.015625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
912692288f987cd8f54127db16d2b577edc80fc1
| 7,022
|
py
|
Python
|
simglucose/controller/basal_bolus_ctrller.py
|
mia-jingyi/simglucose
|
a90bd8750fce362be91668ed839b3b252bc0d58d
|
[
"MIT"
] | null | null | null |
simglucose/controller/basal_bolus_ctrller.py
|
mia-jingyi/simglucose
|
a90bd8750fce362be91668ed839b3b252bc0d58d
|
[
"MIT"
] | null | null | null |
simglucose/controller/basal_bolus_ctrller.py
|
mia-jingyi/simglucose
|
a90bd8750fce362be91668ed839b3b252bc0d58d
|
[
"MIT"
] | null | null | null |
from .base import Controller
from .base import Action
import numpy as np
import pandas as pd
import logging
from collections import namedtuple
from tqdm import tqdm
logger = logging.getLogger(__name__)
CONTROL_QUEST = 'simglucose/params/Quest.csv'
PATIENT_PARA_FILE = 'simglucose/params/vpatient_params.csv'
ParamTup = namedtuple('ParamTup', ['basal', 'cf', 'cr'])
class BBController(Controller):
"""
This is a Basal-Bolus Controller that is typically practiced by a Type-1
Diabetes patient. The performance of this controller can serve as a
baseline when developing a more advanced controller.
"""
def __init__(self, target=140):
self.quest = pd.read_csv(CONTROL_QUEST)
self.patient_params = pd.read_csv(PATIENT_PARA_FILE)
self.target = target
def policy(self, observation, reward, done, **kwargs):
sample_time = kwargs.get('sample_time', 1)
pname = kwargs.get('patient_name')
meal = kwargs.get('meal') # unit: g/min
action = self._bb_policy(pname, meal, observation.CGM, sample_time)
return action
def _bb_policy(self, name, meal, glucose, env_sample_time):
"""
Helper function to compute the basal and bolus amount.
The basal insulin is based on the insulin amount to keep the blood
glucose in the steady state when there is no (meal) disturbance.
basal = u2ss (pmol/(L*kg)) * body_weight (kg) / 6000 (U/min)
The bolus amount is computed based on the current glucose level, the
target glucose level, the patient's correction factor and the patient's
carbohydrate ratio.
bolus = ((carbohydrate / carbohydrate_ratio) +
(current_glucose - target_glucose) / correction_factor)
/ sample_time
NOTE the bolus computed from the above formula is in unit U. The
simulator only accepts insulin rate. Hence the bolus is converted to
insulin rate.
"""
if any(self.quest.Name.str.match(name)):
quest = self.quest[self.quest.Name.str.match(name)]
params = self.patient_params[self.patient_params.Name.str.match(
name)]
u2ss = params.u2ss.values.item() # unit: pmol/(L*kg)
BW = params.BW.values.item() # unit: kg
else:
quest = pd.DataFrame([['Average', 13.5, 23.52, 50, 30]],
columns=['Name', 'CR', 'CF', 'TDI', 'Age'])
u2ss = 1.43 # unit: pmol/(L*kg)
BW = 57.0 # unit: kg
basal = u2ss * BW / 6000 # unit: U/min
if meal > 0:
logger.info('Calculating bolus ...')
logger.info(f'Meal = {meal} g/min')
logger.info(f'glucose = {glucose}')
bolus = (
(meal * env_sample_time) / quest.CR.values + (glucose > 150) *
(glucose - self.target) / quest.CF.values).item() # unit: U
else:
bolus = 0 # unit: U
# This is to convert bolus in total amount (U) to insulin rate (U/min).
# The simulation environment does not treat basal and bolus
# differently. The unit of Action.basal and Action.bolus are the same
# (U/min).
bolus = bolus / env_sample_time # unit: U/min
return Action(basal=basal, bolus=bolus)
def reset(self):
pass
class ManualBBController(Controller):
def __init__(self, target, cr, cf, basal, sample_rate=5, use_cf=True, use_bol=True, cooldown=0,
corrected=True, use_low_lim=False, low_lim=70):
super().__init__(self)
self.target = target
self.orig_cr = self.cr = cr
self.orig_cf = self.cf = cf
self.orig_basal = self.basal = basal
self.sample_rate = sample_rate
self.use_cf = use_cf
self.use_bol = use_bol
self.cooldown = cooldown
self.last_cf = np.inf
self.corrected = corrected
self.use_low_lim = use_low_lim
self.low_lim = low_lim
def increment(self, cr_incr=0, cf_incr=0, basal_incr=0):
self.cr += cr_incr
self.cf += cf_incr
self.basal += basal_incr
def policy(self, observation, reward, done, **kwargs):
carbs = kwargs.get('carbs')
glucose = kwargs.get('glucose')
action = self.manual_bb_policy(carbs, glucose)
return action
def manual_bb_policy(self, carbs, glucose, log=False):
if carbs > 0:
if self.corrected:
carb_correct = carbs / self.cr
else:
# assuming carbs are already multiplied by sampling rate
carb_correct = (carbs/self.sample_rate) / self.cr
hyper_correct = (glucose > self.target) * (glucose - self.target) / self.cf
hypo_correct = (glucose < self.low_lim) * (self.low_lim - glucose) / self.cf
bolus = 0
if self.use_low_lim:
bolus -= hypo_correct
if self.use_cf:
if self.last_cf > self.cooldown and hyper_correct > 0:
bolus += hyper_correct
self.last_cf = 0
if self.use_bol:
bolus += carb_correct
bolus = bolus / self.sample_rate
else:
bolus = 0
carb_correct = 0
hyper_correct = 0
hypo_correct = 0
self.last_cf += self.sample_rate
if log:
return Action(basal=self.basal, bolus=bolus), hyper_correct, hypo_correct, carb_correct
else:
return Action(basal=self.basal, bolus=bolus)
def get_params(self):
return ParamTup(basal=self.basal, cf=self.cf, cr=self.cr)
def adjust(self, basal_adj, cr_adj):
self.basal += self.orig_basal + basal_adj
self.cr = self.orig_cr * cr_adj
def reset(self):
self.cr = self.orig_cr
self.cf = self.orig_cf
self.basal = self.orig_basal
self.last_cf = np.inf
def bb_test(bbc, env, n_days, seed, full_save=False):
env.seeds['sensor'] = seed
env.seeds['scenario'] = seed
env.seeds['patient'] = seed
env.reset()
full_patient_state = []
carb_error_mean = 0
carb_error_std = 0.2
carb_miss_prob = 0.05
action = bbc.manual_bb_policy(carbs=0, glucose=140)
for _ in tqdm(range(n_days*288)):
obs, reward, done, info = env.step(action=action.basal+action.bolus)
bg = env.env.CGM_hist[-1]
carbs = info['meal']
if np.random.uniform() < carb_miss_prob:
carbs = 0
err = np.random.normal(carb_error_mean, carb_error_std)
carbs = carbs + carbs * err
action = bbc.manual_bb_policy(carbs=carbs, glucose=bg)
full_patient_state.append(info['patient_state'])
full_patient_state = np.stack(full_patient_state)
if full_save:
return env.env.show_history(), full_patient_state
else:
return {'hist': env.env.show_history()[288:]}
| 38.582418
| 99
| 0.602108
| 929
| 7,022
| 4.386437
| 0.234661
| 0.013252
| 0.01227
| 0.011779
| 0.11681
| 0.063313
| 0.037301
| 0
| 0
| 0
| 0
| 0.014783
| 0.296782
| 7,022
| 182
| 100
| 38.582418
| 0.81045
| 0.182569
| 0
| 0.139706
| 0
| 0
| 0.043393
| 0.011476
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088235
| false
| 0.007353
| 0.051471
| 0.007353
| 0.213235
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
912788fe05c2b0029d03454b315f2758ce890c5a
| 6,025
|
py
|
Python
|
ceilometer/event/trait_plugins.py
|
redhat-openstack/ceilometer
|
9e503d7068889e52e9144079de331ed51676e535
|
[
"Apache-2.0"
] | 1
|
2016-03-10T06:55:45.000Z
|
2016-03-10T06:55:45.000Z
|
ceilometer/event/trait_plugins.py
|
redhat-openstack/ceilometer
|
9e503d7068889e52e9144079de331ed51676e535
|
[
"Apache-2.0"
] | null | null | null |
ceilometer/event/trait_plugins.py
|
redhat-openstack/ceilometer
|
9e503d7068889e52e9144079de331ed51676e535
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2013 Rackspace Hosting.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class TraitPluginBase(object):
"""Base class for plugins.
It converts notification fields to Trait values.
"""
def __init__(self, **kw):
"""Setup the trait plugin.
For each Trait definition a plugin is used on in a conversion
definition, a new instance of the plugin will be created, and
initialized with the parameters (if any) specified in the
config file.
:param kw: the parameters specified in the event definitions file.
"""
super(TraitPluginBase, self).__init__()
@abc.abstractmethod
def trait_value(self, match_list):
"""Convert a set of fields to a Trait value.
This method is called each time a trait is attempted to be extracted
from a notification. It will be called *even if* no matching fields
are found in the notification (in that case, the match_list will be
empty). If this method returns None, the trait *will not* be added to
the event. Any other value returned by this method will be used as
the value for the trait. Values returned will be coerced to the
appropriate type for the trait.
:param match_list: A list (may be empty if no matches) of *tuples*.
Each tuple is (field_path, value) where field_path is the jsonpath
for that specific field.
Example::
trait's fields definition: ['payload.foobar',
'payload.baz',
'payload.thing.*']
notification body:
{
'message_id': '12345',
'publisher': 'someservice.host',
'payload': {
'foobar': 'test',
'thing': {
'bar': 12,
'boing': 13,
}
}
}
match_list will be: [('payload.foobar','test'),
('payload.thing.bar',12),
('payload.thing.boing',13)]
Here is a plugin that emulates the default (no plugin) behavior:
.. code-block:: python
class DefaultPlugin(TraitPluginBase):
"Plugin that returns the first field value."
def __init__(self, **kw):
super(DefaultPlugin, self).__init__()
def trait_value(self, match_list):
if not match_list:
return None
return match_list[0][1]
"""
class SplitterTraitPlugin(TraitPluginBase):
"""Plugin that splits a piece off of a string value."""
def __init__(self, separator=".", segment=0, max_split=None, **kw):
"""Setup how do split the field.
:param separator: String to split on. default "."
:param segment: Which segment to return. (int) default 0
:param max_split: Limit number of splits. Default: None (no limit)
"""
self.separator = separator
self.segment = segment
self.max_split = max_split
super(SplitterTraitPlugin, self).__init__(**kw)
def trait_value(self, match_list):
if not match_list:
return None
value = six.text_type(match_list[0][1])
if self.max_split is not None:
values = value.split(self.separator, self.max_split)
else:
values = value.split(self.separator)
try:
return values[self.segment]
except IndexError:
return None
class BitfieldTraitPlugin(TraitPluginBase):
"""Plugin to set flags on a bitfield."""
def __init__(self, initial_bitfield=0, flags=None, **kw):
"""Setup bitfield trait.
:param initial_bitfield: (int) initial value for the bitfield
Flags that are set will be OR'ed with this.
:param flags: List of dictionaries defining bitflags to set depending
on data in the notification. Each one has the following
keys:
path: jsonpath of field to match.
bit: (int) number of bit to set (lsb is bit 0)
value: set bit if corresponding field's value
matches this. If value is not provided,
bit will be set if the field exists (and
is non-null), regardless of it's value.
"""
self.initial_bitfield = initial_bitfield
if flags is None:
flags = []
self.flags = flags
super(BitfieldTraitPlugin, self).__init__(**kw)
def trait_value(self, match_list):
matches = dict(match_list)
bitfield = self.initial_bitfield
for flagdef in self.flags:
path = flagdef['path']
bit = 2 ** int(flagdef['bit'])
if path in matches:
if 'value' in flagdef:
if matches[path] == flagdef['value']:
bitfield |= bit
else:
bitfield |= bit
return bitfield
| 37.42236
| 77
| 0.550539
| 685
| 6,025
| 4.748905
| 0.308029
| 0.0332
| 0.013526
| 0.020904
| 0.070704
| 0.052874
| 0.044882
| 0.044882
| 0.044882
| 0.030741
| 0
| 0.007966
| 0.374938
| 6,025
| 160
| 78
| 37.65625
| 0.855815
| 0.616266
| 0
| 0.195652
| 0
| 0
| 0.010274
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.130435
| false
| 0
| 0.043478
| 0
| 0.326087
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
912c4617e4d0718d34c2b278ca0d1aef755136f4
| 59,222
|
py
|
Python
|
nonlinear/aorta/nonlinearCasesCreation_aorta.py
|
HaolinCMU/Soft_tissue_tracking
|
8592b87066ddec84a3aefc18240303cb085cf34c
|
[
"MIT"
] | 3
|
2020-08-25T05:10:34.000Z
|
2020-09-18T01:50:33.000Z
|
nonlinear/aorta/nonlinearCasesCreation_aorta.py
|
HaolinCMU/Soft_tissue_tracking
|
8592b87066ddec84a3aefc18240303cb085cf34c
|
[
"MIT"
] | null | null | null |
nonlinear/aorta/nonlinearCasesCreation_aorta.py
|
HaolinCMU/Soft_tissue_tracking
|
8592b87066ddec84a3aefc18240303cb085cf34c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 25 13:08:16 2020
@author: haolinl
"""
import copy
import os
import time
import numpy as np
import random
import scipy.io # For extracting data from .mat file
class inputFileGenerator(object):
"""
Generate input file for Abaqus.
Unit system:
Length: m
Force: N
Pressure: Pa
"""
def __init__(self, data_file_name, write_path, material_type, fix_indices_list, node_variable_name, elem_variable_name, user_prescribed_force_field=[]):
"""
Initialize parameters.
Parameters:
----------
data_file_name: String.
The file path of information of node, element, etc.
write_path: String.
The path to write the inp file.
material_type: String.
The type of material.
Used to indicate whether to consider material nonlinearity.
fix_indices_list: List of ints.
The node indices to be fixed.
node_variable_name: String.
The variable name of the nodes matrix in the data file.
elem_variable_name: String.
The variable name of the elements matrix in the data file.
user_prescribed_force_field (optional): List of floats.
The user-prescribed vector of laplacian force field.
Size: nSurfI x 3.
Default: [].
"""
# Data & Variables.
self.data_file_name = data_file_name
self.data_mat = scipy.io.loadmat(self.data_file_name)
self._surface_mat = self.data_mat["FaceI"]
self._surface_nodes = self.data_mat["idxSurfI"]
self._surface_nodes_num = self.data_mat["nSurfI"][0,0]
self._outer_surface_regionNum = 22 # Int. The region number of outer surface.
self._outer_surface_nodes_list = self._extractOuterSurfaceNodes(self.data_mat["faces"], self._outer_surface_regionNum) # List of sorted ints. The indices of outer surface nodes. Indexed from 1.
self._outer_surface_nodes_num = len(self._outer_surface_nodes_list)
self._triangle_nodes_list = []
self._coupled_list = []
self._node_variable_name = node_variable_name
self._elem_variable_name = elem_variable_name
self._inputFile_lines_total = []
self.writePath = write_path
self._modulus = 1e7 # Young's modulus. Unit: Pa. Default: 1e7.
self._poisson_ratio = 0.48 # Poisson's ratio. Linear elastic default: 0.3; neo-Hookean default: 0.48.
self._isCoupleOn = False # Boolean. True: use coupling constraint; False: do not use coupling constraint. Must not turn on if applying Laplacian smoothing.
self._coupling_type = "Kinematic" # String. "Kinematic" / "Distributing".
self._coupling_neighbor_layers = 1 # How deep does the neighborhood searching go. Default: 1.
self._isLaplacianSmoothingOn = True # Boolean. True: use laplacian smoothing. False: do not use laplacian smoothing.
self._laplacian_variable_name = "laplacianMatrixI3"
self._massMatrix_variable_name = "massMatrixI3"
self._laplacian_iter_num = 20 # Default: 3.
self._smoothing_rate = 0.1 # Default: 0.1 (Previous: 1e-4).
self.loads_num = 3 # For initial testing.
self._load_sampling_style = "gaussian" # String. Indicating the type of random sampling for force components. "uniform" / "gaussian".
self._load_scale = (0.0, 10.0) # Absolute range of the force for uniform sampling. Case and BC specific. (min, max). Unit: N.
self._gaussian_params = (4.0, 0.8) # Mean and deviation of the force for Gaussian sampling. Case and BC specific. (mean, deviation). Unit: N.
self._load_params_tuple = None
self._initial_force_component_vector = [] # List of floats. Default: []. Example: [5., 5., 5.].
self.autoIncrementNum = 5000 # Int. The maximum increment number of the AutoSolver.
self.initIncrem = 0.001 # Float. The initial length of the increment (for fixed-step, this is also the length per increm).
self.minIncrem = 1e-20 # Float. The minimum increment length for the AutoSolver (ueless for the StaticSolver).
self.maxIncrem = 1.0 # Float. The maximum increment length for the AutoSolver (useless for the StaticSovler).
self.totalTime = 1.0 # Float. The total time for one simulation step.
self.frameNum = 1 # Int. The number of frames intending to extract from the nodal file.
# ================== Load sampling variables ================== #
if self._isCoupleOn: self._couple_region_num = self.loads_num
else: self._couple_region_num = 0
if self._load_sampling_style == "gaussian": self._load_params_tuple = self._gaussian_params
elif self._load_sampling_style == "uniform": self._load_params_tuple = self._load_scale
else:
self._load_sampling_style = "uniform"
self._load_params_tuple = self._load_scale
# ============================================================= #
# Header.
self._header = ["*Heading"]
# Part definition.
self._part_name = "part-1"
self._material_name = "tissue"
self._part_initial = ["*Part, name={}".format(self._part_name)] # Total list of Part definition.
self._node = ["*Node"]
self._elem = ["*Element, type=C3D10"] # Nonlinear tetrahedron. http://web.mit.edu/calculix_v2.7/CalculiX/ccx_2.7/doc/ccx/node33.html#tennode.
self._nset_all = []
self._elset_all = []
self._section = ["*Solid Section, elset=allElems, material={}".format(self._material_name),
","]
self._part_end = ["*End Part"]
self._new_node_list = []
self._new_node_dict = {}
self._node_num = None
self._orig_node_num = None
self._elem_num = None
self._part = self.generatePart()
# Load settings.
self._loads_nset_name_list = []
self._rf_name_list = []
self._rf_nset_name_list = []
self._rf_nsets = []
self._load_nsets = [] # Nset definition of loads.
self._load = self.generateLoadSetting()
# Assembly definition.
self._assembly_name = "assembly-1"
self._instance_name = "instance-1"
self._assembly_initial = ["*Assembly, name={}".format(self._assembly_name)] # Total list of Assembly definition.
self._instance = ["*Instance, name={}, part={}".format(self._instance_name, self._part_name),
"*End Instance"]
self._ref_nodes_list = []
self._fix_nset_name = "fix"
self._fix_indices_list = fix_indices_list
self._fix_nset = self.generateNset(self._fix_indices_list, self._fix_nset_name, self._instance_name) # Nset definition of fix BC.
self._loads_posi_indices_list = self._generateLoadPositions(self.loads_num, self._fix_indices_list) # Generate load positions. Randomly. For fixed mode: style="fix", input_posi_indices_list=[415, 470, 107].
self._laplacian_initial_loads_posi = None # List. Containing the original position of concentrated forces.
self._laplacian_force_field = None # 2D Array of floats. Size: nSurfI * 3. The force field on the outer surface.
self._user_prescribed_force_field = user_prescribed_force_field # List of floats. Size: nSurfI * 3. The prescribed force field on the outer surface. Default: [].
self._surface_list = []
self._coupling_list = []
self._nset_boundary = [] # All nsets definitions in assembly. Boundary conditions
self._assembly_end = ["*End Assembly"]
self._assembly = self.generateAssembly()
# Material.
self.material_type = material_type # String. Indicate material type. "linear"/"neo_hookean_fitting"/"neo_hookean_solid".
self._material_def_file_name = "" # Default: "". If there is a file of stress strain definition, please specify here (must not be "").
self._material = self.generateMaterial(self.material_type)
# Boundary condition.
self._boundary_initial = ["*Boundary"]
self._boundary = self.generateBoundaryCondition_fixAll()
# Step settings.
self.freq = int(self.autoIncrementNum / self.frameNum) # Int. The data frame extraction frequency (also refers to the number of increments. Extract one frame per "self.freq" increments). Especially for StaticSolver case.
self._step = ["*Step, name=step-1, nlgeom=YES, inc={}".format(self.autoIncrementNum),
"*Static",
"{}, {}, {}, {}".format(self.initIncrem, self.totalTime,
self.minIncrem, self.maxIncrem)] # Auto solver.
self._step_end = ["*End Step"]
# Rest settings.
self._restart = ["*Restart, write, frequency=0"]
self._output = ["*Output, field, variable=PRESELECT",
"*Output, history, variable=PRESELECT"]
self._fil = ["*FILE FORMAT, ASCII",
"*node file, frequency={}".format(self.freq),
"U, COORD",
"*El file, frequency={}".format(self.freq),
"S, COORD"]
self._resSettings = self._restart + self._output + self._fil
def readFile(self, read_path):
"""
Read files from specific path.
Parameters:
----------
read_path: String.
Path of the original inp file.
Return:
----------
lines: List of strings.
The list of lines from the file.
"""
with open(read_path, "rt") as f: lines = f.read().splitlines()
return lines
def writeFile(self, write_status):
"""
Write 'self.write_lines' into a new inp file.
Parameters:
----------
write_status: String.
"Normal" / "Fast".
"Normal": generate all definitions;
"Fast": generate nodes and elements definition only.
"""
if write_status == "Normal":
self._inputFile_lines_total = (self._header + self._part + self._assembly +
self._material + self._boundary + self._step +
self._load + self._resSettings + self._step_end)
content = '\n'.join(self._inputFile_lines_total)
with open(self.writePath, 'w') as f: f.write(content)
elif write_status == "Fast":
self._inputFile_lines_total = self._header + self._part
content = '\n'.join(self._inputFile_lines_total)
with open(self.writePath, 'w') as f: f.write(content)
else:
self.writeFile("Normal")
def generatePart(self):
"""
Generate part definition.
Returns:
----------
The list collection of all sub-definition lists, including:
part_initial: header part of "Part definition".
node: Node definition.
elem: Element definition.
elset_all: The elset containing all elements. For material definition specifically.
section: Section definition.
part_end: The endline of "Part definition".
"""
self.generateNodes(self.data_mat[self._node_variable_name], self._node)
self.generateElements(self.data_mat[self._elem_variable_name], self._elem)
self.nonlinearization()
# Generate all element elset.
allElem_list, allElem_list_name = [], "allElems"
for i in range(len(self._elem[1:])): allElem_list.append(str(i+1))
self._elset_all = self.generateElset(allElem_list, allElem_list_name)
# Generate Section.
self._section = self.generateSection(allElem_list_name, self._material_name)
# Collection.
return (self._part_initial + self._node + self._elem + self._elset_all +
self._section + self._part_end)
def generateNodes(self, node_mat, target_node_list, specified_indices_list=[]):
"""
Generate nodes information.
Parameters:
----------
node_mat: 2D Array of ints.
The matrix containing the coordinates of the nodes to-be-defined under "*Node".
targer_node_list: List of strings.
The definition of node list.
specified_indices_list (optional): List of ints.
List the indices of the input node list, following the exact order of the node_mat.
Default: [].
"""
for i in range(node_mat.shape[0]):
if specified_indices_list == []: node_list_temp = ["{}".format(i+1)]
else: node_list_temp = ["{}".format(specified_indices_list[i])]
node_list_temp += [str(coord) for coord in list(node_mat[i,:])]
target_node_list.append(', '.join(node_list_temp))
def _extractOuterSurfaceNodes(self, faces_def_matrix, outer_surface_regionNum):
"""
Extract the nodes on the outer surface of the geometry (for force application in next step).
Parameters:
----------
faces_def_matrix: 2D Array of ints.
The definition of all faces, including the information of surface region number.
outer_surface_regionNum: Int.
The region number of outer surface of the geometry.
Returns:
----------
outer_surface_nodes_list: List of ints.
The indices of nodes on the outer surface. Indexed from 1. Sorted.
"""
outer_surface_nodes_list = []
for i in range(faces_def_matrix.shape[0]):
if faces_def_matrix[i,0] == outer_surface_regionNum: # The region number of outer surface.
outer_surface_nodes_list += [int(ind) for ind in faces_def_matrix[i,1:]] # Indexed from 1.
outer_surface_nodes_list = list(set(outer_surface_nodes_list))
outer_surface_nodes_list.sort()
return outer_surface_nodes_list
def generateElements(self, elem_mat, target_elem_list, specified_indices_list=[]):
"""
Generate elements information.
Parameters:
----------
elem_mat: 2D Array of ints.
The matrix containing the indices of each element to-be-defined under "*Element".
targer_elem_list: List of strings.
The definition of element list.
specified_indices_list (optional): List of ints.
List the indices of the input element list, following the exact order of the elem_mat.
Default: [].
"""
for i in range(elem_mat.shape[0]):
if specified_indices_list == []: elem_list_temp = ["{}".format(i+1)]
else: elem_list_temp = ["{}".format(specified_indices_list[i])]
elem_line_temp = [str(ind) for ind in list(elem_mat[i,:])]
# Make sure the order of nodes for tetrahedron definition is counter-clockwise, otherwise resulting in negative volume.
ind_temp = elem_line_temp[1]
elem_line_temp[1] = elem_line_temp[2]
elem_line_temp[2] = ind_temp
elem_list_temp += elem_line_temp
target_elem_list.append(', '.join(elem_list_temp))
def generateNset(self, node_list, nset_name, instance_name=None):
"""
Generate node set information.
Parameters:
----------
node_list: List of ints.
The list of nodes to be contained in the node list.
nset_name: String.
The name of the to-be-defined node list.
instance_name (optional): String.
The name of specified instance.
Only use in assembly definition.
Default: None. (Part cases)
Returns:
----------
nset: List of strings.
The definition of a specific nset.
"""
if instance_name == None: nset = ["*Nset, nset={}".format(nset_name)]
else: nset = ["*Nset, nset={}, instance={}".format(nset_name, instance_name)]
nset_line_temp, nset_string_temp = [], None
for i, ind in enumerate(node_list):
nset_line_temp.append(str(ind))
if (i+1) % 10 == 0:
nset_string_temp = ', '.join(nset_line_temp)
nset.append(copy.deepcopy(nset_string_temp))
nset_line_temp, nset_string_temp = [], None
nset_string_temp = ', '.join(nset_line_temp)
nset.append(copy.deepcopy(nset_string_temp))
return nset
def generateElset(self, elem_list, elset_name, instance_name=None):
"""
Generate element set information.
Parameters:
----------
elem_list: List of ints.
The list of elements to be contained in the element list.
elset_name: String.
The name of the to-be-defined element list.
instance_name (optional): String.
The name of specified instance.
Only use in assembly definition.
Default: None. (Part cases)
Returns:
----------
elset: List of strings.
The definition of a specific elset.
"""
if instance_name == None: elset = ["*Elset, elset={}".format(elset_name)]
else: elset = ["*Elset, elset={}, instance={}".format(elset_name, instance_name)]
elset_line_temp, elset_string_temp = [], None
for i, ind in enumerate(elem_list):
elset_line_temp.append(str(ind))
if (i+1) % 10 == 0:
elset_string_temp = ', '.join(elset_line_temp)
elset.append(copy.deepcopy(elset_string_temp))
elset_line_temp, elset_string_temp = [], None
elset_string_temp = ', '.join(elset_line_temp)
elset.append(copy.deepcopy(elset_string_temp))
return elset
def generateSection(self, elset_name, material_name):
"""
Generate section information.
Parameters:
----------
elset_name: String.
The name of the elset to be assigned a section.
material_name: String.
The name of defined material.
Returns:
----------
section: List of strings.
The definition of section.
"""
section = ["*Solid Section, elset={}, material={}".format(elset_name, material_name),
","]
return section
def generateMaterial(self, material_type):
"""
Generate lines for material definition.
Parameters:
----------
material_type: String.
Indicate what type of material is used.
Returns:
----------
material_lines: List of lines.
The lines of material definition.
"""
material_lines = ["*Material, name={}".format(self._material_name)]
if material_type == "neo_hookean_fitting":
stress_strain_lines = self._generateNeoHookeanFitting(self._modulus, (-0.3, 0.3), file_name=self._material_def_file_name)
material_lines += ["*Hyperelastic, neo hooke, test data input, poisson={}".format(self._poisson_ratio),
"*Uniaxial Test Data"]
material_lines += stress_strain_lines
elif material_type == "neo_hookean_solid":
c10 = self._modulus / (4 * (1 + self._poisson_ratio))
d1 = 6 * (1 - 2 * self._poisson_ratio) / self._modulus
material_lines += ["*Hyperelastic, neo hooke",
"{}, {}".format(c10, d1)]
elif material_type == "linear":
material_lines += ["*Elastic",
"{}, {}".format(self._modulus, self._poisson_ratio)]
else: material_lines = self.generateMaterial("linear")
return material_lines
def _generateNeoHookeanFitting(self, modulus, strain_range, file_name=""):
"""
Import/Generate stress strain data for neo-Hookean material fitting.
Parameters:
----------
modulus: Float.
The elastic modulus of material.
strain_range: Tuple of floats.
Range for strain interpolation.
file_name (optional): String.
The name of stress strain data definition file.
Default: "".
Returns:
----------
stress_strain_lines: List of strings.
The lines of stress strain data.
"""
if file_name != "": return self.readFile(file_name)
else:
"""
Assumptions of neo-Hookean formulation:
Incompressible (Poisson's ratio = ~0.5, small deformation).
Undergoing uniaxial loading.
Formulation: sigma = 2*C*(stretch - 1/(stretch^2)).
E = 6*C.
"""
strain_data = np.linspace(strain_range[0], strain_range[1], 100)
stretch_data = strain_data + 1.0
stress_data = (self._modulus / 3.0) * (stretch_data - 1.0 / stretch_data**2) # Formulation.
stress_strain_lines = []
for i in range(len(stress_data)):
stress_strain_lines.append("%.6f, %.6f" % (stress_data[i], strain_data[i]))
return stress_strain_lines
def _generateLoadPositions(self, loads_num, fix_indices_list, style="random", input_posi_indices_list=[]):
"""
Randomly generate positions of the load.
Parameters:
----------
loads_num: Int.
Number of loads.
fix_indices_list: List of ints.
Indices of fixed nodes.
style (optional): String.
Indicate how to generate initial load positions.
"random" / "fix":
"random": Randomly generate load positions.
"fix": Use the user input of initial load position indices.
Default: "random".
input_posi_indices_list (optional): List of ints.
User input of initial load positions indices list.
Indexed from 1.
Default: [].
Returns:
----------
loads_posi_indices_list: List of ints.
Picked indices for load application positions.
"""
if style == "random":
loads_posi_indices_list = []
for i in range(loads_num):
while(True):
load_posi_index_temp = random.choice(self._outer_surface_nodes_list) # Randomly chosen an outer surface node to apply load F(x, y, z). Indexed from 1.
if load_posi_index_temp not in fix_indices_list: break # The randomly generated index cannot be one of the fixed nodes.
loads_posi_indices_list.append(load_posi_index_temp)
return loads_posi_indices_list
elif style == "fix": return input_posi_indices_list
else: return self._generateLoadPositions(loads_num, fix_indices_list)
def _generateLoadValues(self, output_dimension, load_scale, sampling_style="uniform"):
"""
Randomly generate force values for load component definition.
Using function: numpy.random.rand().
Parameters:
----------
output_dimension: Tuple of ints.
The shape of output random array.
Size: 2*1. (dim1, dim2).
load_scale: Tuple of floats.
Size: 2*1. (min_laod, max_laod) / (mean, deviation).
sampling_style (optional): String.
Indicating the type of sampling.
"uniform": uniform distribution.
"gaussian": Gaussian distribution.
Default: "uniform".
Returns:
----------
load_result: Array of floats.
Size: output_dimension.
"""
if sampling_style == "uniform":
load_result = (np.random.rand(output_dimension[0], output_dimension[1]) * 2 - 1) * abs(load_scale[1] - load_scale[0])
load_result = load_result.reshape(-1,1)
for index, load_value_temp in enumerate(load_result):
if load_value_temp < 0: load_result[index] -= self._load_scale[0]
else: load_result[index] += self._load_scale[0]
load_result = load_result.reshape(output_dimension[0], output_dimension[1])
elif sampling_style == "gaussian":
mean, deviation = load_scale[0], load_scale[1]
load_result = np.random.normal(mean, deviation, size=output_dimension)
load_result = load_result.reshape(-1,1)
for index, load_value_temp in enumerate(load_result):
if np.random.rand() <= 0.5: load_result[index] *= -1
load_result = load_result.reshape(output_dimension[0], output_dimension[1])
else: load_result = self._generateLoadValues(output_dimension, load_scale)
return load_result
def generateAssembly(self):
"""
Generate assembly definition.
Returns:
----------
The list collection of all sub-definition lists, including:
assenbly_initial: Header of the assembly definition.
instance: The instance definition.
nset_boundary: The definition of BC related node set.
asssenbly_end: The endline of assembly definition.
"""
# Generate "self.loads_num" nsets, each of which has 1 node.
if self._isCoupleOn:
for i, load_posi_index_temp in enumerate(self._loads_posi_indices_list):
ref_name_temp = "rf-{}".format(i+1)
ref_nset_name_temp = "rf-{}-nset".format(i+1)
self._rf_name_list.append(ref_name_temp)
self._rf_nset_name_list.append(ref_nset_name_temp)
# Generate assembly node definitions for reference points.
ref_node_list_temp = ["*Node"]
ref_pt_coord_list_temp = [float(item) for item in self._node[load_posi_index_temp].split(',')[1:]]
self.generateNodes(np.array(ref_pt_coord_list_temp).astype(float).reshape(1,-1), ref_node_list_temp,
specified_indices_list=[i+1])
self._ref_nodes_list += copy.deepcopy(ref_node_list_temp)
rf_nset_list_temp = self._findCouplingNodes(load_posi_index_temp, self._coupling_neighbor_layers)
# Generate reference point node sets.
self._load_nsets += self.generateNset([i+1], ref_name_temp)
# Generate coupling constraint node sets.
self._rf_nsets += self.generateNset(rf_nset_list_temp, ref_nset_name_temp,
self._instance_name)
self.generateCoupling()
else:
if self._isLaplacianSmoothingOn:
force_vector_temp = np.zeros(shape=(3*self._surface_nodes_num, 1))
self._laplacian_initial_loads_posi = copy.deepcopy(self._loads_posi_indices_list)
if self._initial_force_component_vector == []:
for load_posi_index_temp in self._loads_posi_indices_list:
force_vector_temp[(load_posi_index_temp-1)*3:load_posi_index_temp*3,:] = self._generateLoadValues((3,1), self._load_params_tuple,
sampling_style=self._load_sampling_style)
else:
for load_posi_index_temp in self._loads_posi_indices_list:
force_vector_temp[(load_posi_index_temp-1)*3:load_posi_index_temp*3,:] = np.array(self._initial_force_component_vector).astype(float).reshape(3,1)
laplacian_matrix, mass_matrix = self.data_mat[self._laplacian_variable_name], self.data_mat[self._massMatrix_variable_name]
laplacian_matrix = self._laplacianMatrixShrink(laplacian_matrix, self._surface_nodes, self.data_mat["faces"], self._outer_surface_regionNum)
force_vector_new = self._laplacianSmoothing(force_vector_temp, laplacian_matrix, mass_matrix, iter_num=self._laplacian_iter_num,
smoothing_rate=self._smoothing_rate, laplacian_force_field=self._user_prescribed_force_field) # Size: (nSurfI x 3)*1. Fix force value: initial_BC_state="fix" (not recommended).
self._laplacian_force_field = force_vector_new.reshape(-1,3)
self._loads_posi_indices_list = copy.deepcopy([(list(force_vector_new).index(item)//3)+1 for item in list(force_vector_new) if item != 0]) # Indexed from 1.
self._loads_posi_indices_list = list(set(self._loads_posi_indices_list))
self._loads_posi_indices_list.sort()
for i, load_posi_index_temp in enumerate(self._loads_posi_indices_list):
load_nset_name_temp = "Load-{}".format(i+1)
self._loads_nset_name_list.append(load_nset_name_temp)
self._load_nsets += self.generateNset([load_posi_index_temp], load_nset_name_temp, self._instance_name)
self._load_nsets += self.generateNset(self._laplacian_initial_loads_posi, "Orig_loads_posi", self._instance_name)
self._load = self.generateLoadSetting(force_list=list(force_vector_new.reshape(-1,1)))
else:
for i, load_posi_index_temp in enumerate(self._loads_posi_indices_list):
load_nset_name_temp = "Load-{}".format(i+1)
self._loads_nset_name_list.append(load_nset_name_temp)
self._load_nsets += self.generateNset([load_posi_index_temp], load_nset_name_temp, self._instance_name)
# Concatenate assembly subparts.
self._nset_boundary = self._nset_boundary + self._load_nsets + self._rf_nsets + self._fix_nset + self._surface_list + self._coupling_list
return (self._assembly_initial + self._instance + self._ref_nodes_list + self._nset_boundary + self._assembly_end)
def generateCoupling(self):
"""
Generate coupling constriants for concentrated forces application.
"""
for index, rf_name in enumerate(self._rf_nset_name_list):
self._surface_list += ["*Surface, type=NODE, name={}_CNS_, internal".format(rf_name),
"{}, 1.".format(rf_name)]
self._coupling_list += ["*Coupling, constraint name={}, ref node={}, surface={}_CNS_".format(self._rf_name_list[index],
self._rf_name_list[index],
rf_name),
"*{}".format(self._coupling_type)]
def _findCouplingNodes(self, rf_node_ind, neighbor_layers):
"""
Find the immediate neighbors of each specified node index.
Parameters:
----------
rf_node_ind: Int.
The index of target node.
Returns:
----------
rf_nset_list: List of ints (duplicated items removed).
"rf_node_ind"'s corresponding immediate neighbor nodes set.
"""
rf_nset_list, new_nodes_list, searched_nodes_list = [rf_node_ind], [rf_node_ind], []
for j in range(neighbor_layers):
for ind_temp in new_nodes_list:
for i in range(len(self._triangle_nodes_list)):
if ind_temp in self._triangle_nodes_list[i]:
rf_nset_list += copy.deepcopy(self._triangle_nodes_list[i])
else: continue
searched_nodes_list += copy.deepcopy(new_nodes_list)
rf_nset_list = list(set(copy.deepcopy(rf_nset_list)))
new_nodes_list = [ind for ind in rf_nset_list if ind not in searched_nodes_list]
# Avoid assigning same nodes to different coupled node sets.
for ind in rf_nset_list:
if ind in self._coupled_list: rf_nset_list.remove(ind)
else: self._coupled_list.append(ind)
return rf_nset_list
def generateBoundaryCondition_fixAll(self):
"""
Generate fix boundary condition.
Returns:
----------
The list collection of all sub-definition lists, including:
boundary_initial: Header of boundary condition definition.
BC_list_temp: The detailed BC definition of boundary conditions.
"""
BC_list_temp = []
for i in range(6): # 6: 6 DOFs (disp. + rot.); 3: 3 DOFs (disp.).
BC_list_temp.append("{}, {}, {}".format(self._fix_nset_name, i+1, i+1))
return (self._boundary_initial + BC_list_temp)
def generateLoadSetting(self, force_list=[]):
"""
Generate load information.
Returns:
----------
load_list: List of strings.
Definition of concentrated forces.
force_list (optional): List of forces (floats).
Size: loads_num * 3.
Default: [].
"""
load_list = []
if force_list == []:
force_list = list(self._generateLoadValues((self.loads_num*3, 1), self._load_params_tuple, sampling_style=self._load_sampling_style))
force_list = np.array(force_list).astype(float).reshape(-1,3) # 2D Array of floats. Size: self._loads_num * 3.
if self._isCoupleOn:
for j, rf_name in enumerate(self._rf_name_list): # Length: self._loads_num
load_temp = ["*Cload, op=NEW"]
for i in range(force_list.shape[1]): # 3: Three directions.
load_temp.append("{}, {}, {}".format(rf_name, i+1, force_list[j,i]))
load_list += copy.deepcopy(load_temp)
else:
for j, load_name in enumerate(self._loads_nset_name_list): # Length: length of self._loads_nset_name_list.
load_temp = ["*Cload"]
for i in range(force_list.shape[1]): # 3: Three directions.
load_temp.append("{}, {}, {}".format(load_name, i+1, force_list[self._loads_posi_indices_list[j]-1,i]))
load_list += copy.deepcopy(load_temp)
return load_list
def _laplacianMatrixShrink(self, laplacian_matrix, surface_nodes_list, faces_def_matrix, outer_surface_regionNum):
"""
Assign zeros to the DOFs without force value applied.
Parameters:
----------
laplacian_matrix: 2D Array of floats.
The surface's Laplacian for force smoothing.
Size: nSurfI*3 x nSurfI*3.
surface_nodes_list: List of ints.
All indices of nodes on all surfaces.
faces_def_matrix: 2D Array of ints.
The definition of all faces, including the information of surface region number.
outer_surface_regionNum: Int.
The region number of outer surface of the geometry.
Returns:
----------
laplacian_matrix: 2D Array of floats.
Laplacian with zeros assigned to the nodes not on the outer surfaces.
Size: nSurfI*3 x nSurfI*3.
"""
surface_nodes_list = [ind for ind in surface_nodes_list]
outer_surface_nodes_list = self._extractOuterSurfaceNodes(faces_def_matrix, outer_surface_regionNum)
other_surface_nodes_list = [ind for ind in surface_nodes_list if ind not in outer_surface_nodes_list]
other_surface_nodes_list.sort()
for ind in other_surface_nodes_list:
laplacian_matrix[surface_nodes_list.index(ind)*3:(surface_nodes_list.index(ind)+1)*3,:] = 0.0
laplacian_matrix[:,surface_nodes_list.index(ind)*3:(surface_nodes_list.index(ind)+1)*3] = 0.0
return laplacian_matrix
def _laplacianSmoothing(self, force_vector, laplacian_matrix, mass_matrix, iter_num=3, smoothing_rate=1e-4, initial_BC_state="", laplacian_force_field=[]):
"""
Implement laplacian smoothing based on pre-calculated Laplacian matrix.
Formulation: Forward Euler.
F_(n+1) = (I + lambda*massMatrix*Laplacian) * F_n
Parameters:
----------
force_vector: 1D Array of floats.
With concentrated force values applied at the specidied nodes.
Size: (self._surface_nodes_num x 3) * 1.
laplacian_matrix: 2D Array of floats.
Size: (self._surface_nodes_num x 3) * (self._surface_nodes_num x 3).
mass_matrix: 2D Array of floats.
Diagonal matrix.
Size: (self._surface_nodes_num x 3) * (self._surface_nodes_num x 3).
iter_num (optional): Int.
The number of smoothing iterations.
Default: 3.
smoothing_rate (optional): float.
The coefficient that control the step size of smoothing.
Default: 1e-4.
initial_BC_state (optional): String.
Indicating whether to "fix" or "decay" the original concentrated force value.
Default: "". Indicating smoothing including the original forces.
laplacian_force_field (optional): List of floats.
The user-prescribed vector of laplacian force field.
Size: self._surface_nodes_num x 3.
Default: [].
Returns:
----------
force_vector_new: 1D Array of floats.
The laplacian-smoothed force vector.
Size: (self._surface_nodes_num x 3) * 1.
"""
if laplacian_force_field == []:
force_vector_new = copy.deepcopy(force_vector)
for i in range(iter_num):
force_vector_new += smoothing_rate * (laplacian_matrix @ force_vector_new) # Without mass matrix.
# force_vector_new += smoothing_rate * (mass_matrix @ laplacian_matrix @ force_vector_new) # With mass matrix (NOT recommended).
if initial_BC_state == "fix":
for j, value in enumerate(force_vector):
if value != 0:
force_vector_new[j] = value
else: force_vector_new = np.array(laplacian_force_field).astype(float).reshape(len(laplacian_force_field),1)
return force_vector_new
def _computeMidPoint(self, ind_1, ind_2):
"""
Compute the mid-point of the edge.
Parameters:
----------
ind_1: Int.
The first index of the node pair. Indexed from 1.
ind_2: Int.
The second index of the node pair. Indexed from 1.
Returns:
----------
ind_mid: Int.
The index of the self._node. Index from 1.
"""
key_string_temp_1, key_string_temp_2 = "{}_{}".format(ind_1, ind_2), "{}_{}".format(ind_2, ind_1)
if key_string_temp_1 in self._new_node_dict.keys(): return self._new_node_dict[key_string_temp_1]
elif key_string_temp_2 in self._new_node_dict.keys(): return self._new_node_dict[key_string_temp_2]
else:
coord_temp_1 = np.array(self._node[ind_1].split(',')[1:]).astype(float).reshape(1,-1)
coord_temp_2 = np.array(self._node[ind_2].split(',')[1:]).astype(float).reshape(1,-1)
coord_temp_mid = (coord_temp_1 + coord_temp_2) / 2.0
coord_mid_list = [str(item) for item in list(coord_temp_mid[0])]
self._node_num = len(self._node)
new_node_def_list_temp = copy.deepcopy([str(self._node_num)])
new_node_def_list_temp += copy.deepcopy(coord_mid_list)
self._node.append(', '.join(new_node_def_list_temp))
self._new_node_list.append(', '.join(new_node_def_list_temp))
self._new_node_dict[key_string_temp_1] = self._node_num
self._new_node_dict[key_string_temp_2] = self._node_num
return self._node_num
def insertNode(self):
"""
Insert one node (at the mid-point) of each edge.
Create C3D10 element structure.
"""
for index, elem_def_string in enumerate(self._elem[1:]):
elem_node_list_temp = [int(ind) for ind in elem_def_string.split(',')[1:]]
# Obtain the mid-point index in order. Assume tetrahedral element (C3D4).
mid_pt_ind_5 = self._computeMidPoint(elem_node_list_temp[0], elem_node_list_temp[1])
mid_pt_ind_6 = self._computeMidPoint(elem_node_list_temp[1], elem_node_list_temp[2])
mid_pt_ind_7 = self._computeMidPoint(elem_node_list_temp[0], elem_node_list_temp[2])
mid_pt_ind_8 = self._computeMidPoint(elem_node_list_temp[0], elem_node_list_temp[3])
mid_pt_ind_9 = self._computeMidPoint(elem_node_list_temp[1], elem_node_list_temp[3])
mid_pt_ind_10 = self._computeMidPoint(elem_node_list_temp[2], elem_node_list_temp[3])
elem_new_def_list_temp = [str(mid_pt_ind_5),
str(mid_pt_ind_6),
str(mid_pt_ind_7),
str(mid_pt_ind_8),
str(mid_pt_ind_9),
str(mid_pt_ind_10)]
# Redefine the new C3D10 element in order.
elem_def_list_temp = copy.deepcopy(elem_def_string.split(',')) + copy.deepcopy(elem_new_def_list_temp)
elem_def_string_temp = ', '.join(elem_def_list_temp)
self._elem[index+1] = copy.deepcopy(elem_def_string_temp)
def _triangleNodesCollection(self):
"""
Collect all the nodes on each triangle (surface).
Need to be implemented after "self.insertNode()".
"""
for i in range(self._surface_mat.shape[0]):
tri_temp = self._surface_mat[i,:]
# Assuming all triangles on the surface of geometry.
middle_pts_list_temp = [self._computeMidPoint(tri_temp[0], tri_temp[1]),
self._computeMidPoint(tri_temp[0], tri_temp[2]),
self._computeMidPoint(tri_temp[1], tri_temp[2])]
triangle_nodes_list_temp = list(copy.deepcopy(tri_temp)) + copy.deepcopy(middle_pts_list_temp)
self._triangle_nodes_list.append(copy.deepcopy(triangle_nodes_list_temp)) # List of lists of ints.
def nonlinearization(self):
"""
Nonlinearize the linear tetrahedral (CST) element to quadratic tetrahedral element.
"""
self._elem_num = len(self._elem) - 1
self._orig_node_num = len(self._node) - 1
self.insertNode()
self._triangleNodesCollection()
self._node_num = len(self._node) - 1
def saveLog(file_name_list, elapsed_time_list, write_status, data_file_name,
sample_num, fix_indices_list, loads_num, load_sampling_type, load_param_tuple,
material_type, modulus, poisson_ratio, isCoupleOn, isLaplacianSmoothingOn,
coupling_type="", coupling_neighbor_layer_num=1,
laplacian_iter_num=5, laplacian_smoothing_rate=1e-4, write_path="nonlinear_case_generation.log"):
"""
Save the nonlinear cases generation results into .log file.
Parameters:
----------
file_name_list: List of strings.
Names of generated files.
elapsed_time_list: List of floats.
Elapsed time of generation for each input file.
In exact order.
write_status: String.
Indicating the type of input file generation.
"Normal" / "Fast":
"Normal": generate all definitions;
"Fast": generate nodes and elements definition only.
data_file_name: String.
The name of modeling data file.
Format: .mat
sample_num: Int.
Number of generated input files.
fix_indices_list: List of ints.
Indices of fixed points.
Indexed from 1.
loads_num: Int.
The number of concentrated forces.
load_sampling_type: String.
The distribution type for force sampling.
"uniform" / "gaussian":
"uniform": uniform distribution with specified (min, max) range.
"gaussian": gaussian distribution with specified (mean, dev) parameters.
load_param_tuple: tuple of floats.
Parameters of load sampling.
load_sampling_type specific.
material_type: String.
The type of material.
"linear" / "neo_hookean_solid" / "neo_hookean_fitting":
"linear": linear elastic material.
"neo_hookean_solid": neo-Hookean solid following the stain energy formulation.
"neo_hookean_fitting": neo-Hookean solid following the strass-strain curved fitted from user-input strss-strain data.
modulus: Float.
Elastic modulus of the material.
poisson_ratio: Float.
Poisson's ratio of the material.
isCoupleOn: Boolean indicator.
True: using coupling constraint for local force distribution.
False: not using coupling constraint.
isLaplacianSmoothingOn: Boolean indicator.
True: using Laplacian-Beltrami operator matrix to smooth the force distribution.
False: not using Laplacian smoothing.
coupling_type (optional): String.
The type of coupling constraint.
Default: "".
coupling_neighbor_layer_num (optional): Int.
The number of neighbor layers to which the local force distributing goes.
Default: 1.
laplacian_iter_num (optional): Int.
The number of iteration for laplacian smoothing.
Default: 5.
laplacian_smoothing_rate (optional): Float.
The rate of Laplacian smoothing.
Default: 1e-4.
write_path (optional): String.
The path of to-be-written file.
Default: "nonlinear_case_generation.log".
"""
if isCoupleOn: isCoupleOn_status = "On"
else: isCoupleOn_status = "Off"
if isLaplacianSmoothingOn: isLaplacianSmoothingOn_status = "On"
else: isLaplacianSmoothingOn_status = "Off"
content = ["Data_file_name: {}".format(data_file_name),
"Sample_num = {}".format(sample_num),
"Fixed_indices_list (indexed from 1): {}".format(fix_indices_list),
"Material type: {}".format(material_type),
"Elastic modulus = {} Pa".format(modulus),
"Poisson's ratio = {}".format(poisson_ratio),
"Loads_num = {}".format(loads_num)]
if load_sampling_type == "uniform":
content += ["Load sampling type: {}".format(load_sampling_type),
"Load sampling range (min, max): {} N".format(load_param_tuple)]
elif load_sampling_type == "gaussian":
content += ["Load sampling type: {}".format(load_sampling_type),
"Load sampling parameters (mean, dev): {} N".format(load_param_tuple)]
else:
load_sampling_type = "uniform"
content += ["Load sampling type: {}".format(load_sampling_type),
"Load sampling range (min, max): {} N".format(load_param_tuple)]
content += ["Coupling constraint status: {}".format(isCoupleOn_status),
"Laplacian smoothing status: {}".format(isLaplacianSmoothingOn_status)]
if isCoupleOn:
content += ["Coupling type: {}".format(coupling_type),
"Coupling neighbor layer numbers: {}".format(coupling_neighbor_layer_num)]
if isLaplacianSmoothingOn:
content += ["Laplacian smoothing iteration numbers = {}".format(laplacian_iter_num),
"Laplacian smoothing rate = {}".format(laplacian_smoothing_rate)]
content += ["----------------------------------------------------------",
"Input file\t\tExport status\tGeneration status\tElapsed time/s"]
elapsed_time_total = 0
for i, file_name in enumerate(file_name_list):
data_string_temp = "{}\t\t{}\t\tCompleted\t".format(file_name, write_status) + "\t%.8f" % (elapsed_time_list[i])
content.append(data_string_temp)
elapsed_time_total += elapsed_time_list[i]
content += ["----------------------------------------------------------",
"Total elapsed time: {} s".format(elapsed_time_total)]
content = '\n'.join(content)
with open(write_path, 'w') as f: f.write(content)
def main():
abaqus_default_directory = "C:/temp" # Default working directory of Abaqus.
inp_folder = "inp_files"
sample_nums = 1500
data_file_path = "data_aorta.mat"
node_variable_name, elem_variable_name = "NodeI", "EleI"
results_folder_path_stress, results_folder_path_coor = "stress", "coor"
material_type = "neo_hookean_solid" # "linear" / "neo_hookean_fitting" / "neo_hookean_solid".
fix_indices_list = [1148, 1156, 1169] # Specify the node to fix. At least 3. Indexed from 1.
write_status = "Normal" # String. "Normal" / "Fast". "Normal": generate all definitions; "Fast": generate nodes and elements definition only.
# ================================== Force interpolation related variables ================================== #
force_field_mat_name = "force_field_data.mat"
force_interpolation_folder = "inp_interpolation"
isPrescribedForceOn = True # Boolean indicator. True: use prescribed force field; False: no specified force field. Default: False.
force_type = "random" # String. The type of prescribed force field. "interpolated": interpolated force fields; "random": weighted-summed force fields.
eigen_num_force, force_scalar = 20, 0.4 # Float. The scalar of force fields controlling the force magnitude -> deformation magnitude of the tumor in nonlinear solver. Unit: N.
# =========================================================================================================== #
if isPrescribedForceOn:
"""
The pipeline of generating interpolated force fields:
1. Run "nonlinearCasesCreation.py" with 'isPrescribedForceOn = False' firstly.
2. Run "forceInterpolation.py" in the same directory.
3. Set 'isPrescribedForceOn = True', set 'force_type = "interpolated", then run "nonlinearCasesCreation.py" again.
Get input files with "*_interpolated.inp" in the folder 'force_interpolation_folder'.
4. Set 'isPrescribedForceOn = True', set 'force_type = "random", then run "nonlinearCasesCreation.py" again.
Get input files with "*_random.inp" in the folder 'force_interpolation_folder'.
"""
force_fields = (scipy.io.loadmat(force_field_mat_name)["force_field_interpolated"] if force_type == "interpolated" else
scipy.io.loadmat(force_field_mat_name)["force_field_random"]) # Size: nSurfI*3 x sampleNum. Concatenated as xyzxyz...
sample_nums = force_fields.shape[1]
# Generate input file for Abaqus.
file_name_list, elapsed_time_list, force_field_matrix = [], [], None
for i in range(sample_nums):
start_time = time.time()
if isPrescribedForceOn:
if not os.path.isdir(force_interpolation_folder): os.mkdir(force_interpolation_folder)
file_name_temp = ("{}_interpolated.inp".format(str(i+20001)) if force_type == "interpolated" else
"{}_random.inp".format(str(i+20001)))
write_path = os.path.join(force_interpolation_folder, file_name_temp)
force_field_prescribed_list = list(force_fields[:,i])
inputFile_temp = inputFileGenerator(data_file_path, write_path, material_type,
fix_indices_list, node_variable_name, elem_variable_name,
user_prescribed_force_field=force_field_prescribed_list)
else:
if not os.path.isdir(inp_folder): os.mkdir(inp_folder)
file_name_temp = "{}.inp".format(str(i+20001))
write_path = os.path.join(inp_folder, file_name_temp)
inputFile_temp = inputFileGenerator(data_file_path, write_path, material_type,
fix_indices_list, node_variable_name, elem_variable_name)
inputFile_temp.writeFile(write_status)
end_time = time.time()
elapsed_time = end_time - start_time
file_name_list.append(file_name_temp)
elapsed_time_list.append(elapsed_time)
if i == 0: force_field_matrix = inputFile_temp._laplacian_force_field.reshape(-1,1)
else: force_field_matrix = np.hstack((force_field_matrix, inputFile_temp._laplacian_force_field.reshape(-1,1)))
# ============================ For force visualization only (sample_nums = 1) ============================ #
# print(inputFile_temp._laplacian_initial_loads_posi)
# force_field = {"force_field": inputFile_temp._laplacian_force_field}
# scipy.io.savemat("force_field.mat", force_field)
# ======================================================================================================== #
print("Input_file: ", file_name_temp, "| Status:", write_status, "| Generation: Completed | Time: %.4f s" % (elapsed_time))
saveLog(file_name_list, elapsed_time_list, write_status, data_file_path, sample_nums,
fix_indices_list, inputFile_temp.loads_num, inputFile_temp._load_sampling_style, inputFile_temp._load_params_tuple,
material_type, inputFile_temp._modulus, inputFile_temp._poisson_ratio,
inputFile_temp._isCoupleOn, inputFile_temp._isLaplacianSmoothingOn,
coupling_type=inputFile_temp._coupling_type, coupling_neighbor_layer_num=inputFile_temp._coupling_neighbor_layers,
laplacian_iter_num=inputFile_temp._laplacian_iter_num, laplacian_smoothing_rate=inputFile_temp._smoothing_rate,
write_path="nonlinear_case_generation.log")
if not isPrescribedForceOn: weight_matrix = (2.0 * np.random.rand(eigen_num_force, 3*sample_nums) - 1.0) # Distinct random weights corresponding to each laplacian-force-field.
else: weight_matrix = scipy.io.loadmat(force_field_mat_name)["weight_matrix"] # Distinct random force field for each laplacian-force-field.
mdict = {"fix_indices_list": fix_indices_list,
"orig_data_file_name": data_file_path,
"orig_config_var_name": node_variable_name,
"inp_folder": inp_folder if not isPrescribedForceOn else force_interpolation_folder, # The folder containing input files.
"current_directory": os.getcwd(),
"results_folder_path_stress": results_folder_path_stress,
"results_folder_path_coor": results_folder_path_coor,
"original_node_number": inputFile_temp._orig_node_num,
"couple_region_num": inputFile_temp._couple_region_num,
"force_field_matrix": force_field_matrix, # The force field matrix of all generated samples. Size: nSurfI*3 x sampleNum_total.
"weight_matrix": weight_matrix, "force_scalar_coeff": force_scalar, # The randomly generated matrix for force fields' reconstruction. Size: eigen_num x (3*sample_num).
"eigen_number_force": eigen_num_force, # Int. The eigenmode number of force field reconstruction. (Used only in force field interpolation)
"alpha_indexing_vector": np.zeros(shape=(sample_nums, 1)) if not isPrescribedForceOn else scipy.io.loadmat(force_field_mat_name)["alpha_indexing_vector"]
}
scipy.io.savemat("training_parameters_transfer.mat", mdict)
# np.save(os.path.join(abaqus_default_directory, "training_parameters_transfer.npy"), mdict, fix_imports=True)
# np.savez(os.path.join(abaqus_default_directory, "training_parameters_transfer.npz"),
# fix_indices_list=fix_indices_list,
# orig_data_file_name=data_file_path,
# orig_config_var_name=node_variable_name,
# inp_folder=inp_folder,
# current_directory=os.getcwd(),
# results_folder_path_stress=results_folder_path_stress,
# results_folder_path_coor=results_folder_path_coor)
if __name__ == "__main__":
main()
| 46.594807
| 238
| 0.590473
| 6,689
| 59,222
| 4.923456
| 0.095829
| 0.017369
| 0.01166
| 0.009717
| 0.390338
| 0.294507
| 0.233079
| 0.197796
| 0.175812
| 0.156196
| 0
| 0.010634
| 0.30926
| 59,222
| 1,270
| 239
| 46.631496
| 0.794436
| 0.322228
| 0
| 0.121442
| 0
| 0
| 0.075736
| 0.009897
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051233
| false
| 0
| 0.011385
| 0
| 0.096774
| 0.001898
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
912ed2b516655605fdb89fa39bcc4f1ec0c3ed2a
| 2,306
|
py
|
Python
|
Packs/HealthCheck/Scripts/HealthCheckIncidentsCreatedMonthly/HealthCheckIncidentsCreatedMonthly.py
|
mazmat-panw/content
|
024a65c1dea2548e2637a9cbbe54966e9e34a722
|
[
"MIT"
] | 2
|
2021-12-06T21:38:24.000Z
|
2022-01-13T08:23:36.000Z
|
Packs/HealthCheck/Scripts/HealthCheckIncidentsCreatedMonthly/HealthCheckIncidentsCreatedMonthly.py
|
mazmat-panw/content
|
024a65c1dea2548e2637a9cbbe54966e9e34a722
|
[
"MIT"
] | 87
|
2022-02-23T12:10:53.000Z
|
2022-03-31T11:29:05.000Z
|
Packs/HealthCheck/Scripts/HealthCheckIncidentsCreatedMonthly/HealthCheckIncidentsCreatedMonthly.py
|
henry-sue-pa/content
|
043c6badfb4f9c80673cad9242fdea72efe301f7
|
[
"MIT"
] | 2
|
2022-01-05T15:27:01.000Z
|
2022-02-01T19:27:43.000Z
|
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
ctx = demisto.context()
dataFromCtx = ctx.get("widgets")
if not dataFromCtx:
incident = demisto.incidents()[0]
accountName = incident.get('account')
accountName = f"acc_{accountName}" if accountName != "" else ""
stats = demisto.executeCommand(
"demisto-api-post",
{
"uri": f"{accountName}/statistics/widgets/query",
"body": {
"size": 13,
"dataType": "incidents",
"query": "",
"dateRange": {
"period": {
"byFrom": "months",
"fromValue": 12
}
},
"widgetType": "line",
"params": {
"groupBy": [
"occurred(m)",
"null"
],
"timeFrame": "months"
},
},
})
res = stats[0]["Contents"]["response"]
buildNumber = demisto.executeCommand("DemistoVersion", {})[0]['Contents']['DemistoVersion']['buildNumber']
buildNumber = f'{buildNumber}' if buildNumber != "REPLACE_THIS_WITH_CI_BUILD_NUM" else "618658"
if int(buildNumber) >= 618657:
# Line graph:
data = {
"Type": 17,
"ContentsFormat": "line",
"Contents": {
"stats": res,
"params": {
"timeFrame": "months"
}
}
}
else:
# Bar graph:
output = []
for entry in res:
output.append({"name": entry["name"], "data": entry["data"]})
data = {
"Type": 17,
"ContentsFormat": "bar",
"Contents": {
"stats": output,
"params": {
"layout": "horizontal"
}
}
}
demisto.results(data)
else:
data = {
"Type": 17,
"ContentsFormat": "line",
"Contents": {
"stats": dataFromCtx['IncidentsCreatedMonthly'],
"params": {
"timeFrame": "months"
}
}
}
demisto.results(data)
| 27.452381
| 110
| 0.423677
| 158
| 2,306
| 6.14557
| 0.493671
| 0.046344
| 0.030896
| 0.07415
| 0.084449
| 0.084449
| 0.084449
| 0
| 0
| 0
| 0
| 0.02392
| 0.437988
| 2,306
| 83
| 111
| 27.783133
| 0.725309
| 0.019081
| 0
| 0.30137
| 0
| 0
| 0.234825
| 0.040319
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.027397
| 0
| 0.027397
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
912f4cb2d5b6031823d833fa3533c0b3fca9c0fd
| 13,099
|
py
|
Python
|
Bert_training.py
|
qzlydao/Bert_Sentiment_Analysis
|
2da2d0c6da2cdb55f37ff0a7e95f0ea4876b2d61
|
[
"Apache-2.0"
] | null | null | null |
Bert_training.py
|
qzlydao/Bert_Sentiment_Analysis
|
2da2d0c6da2cdb55f37ff0a7e95f0ea4876b2d61
|
[
"Apache-2.0"
] | null | null | null |
Bert_training.py
|
qzlydao/Bert_Sentiment_Analysis
|
2da2d0c6da2cdb55f37ff0a7e95f0ea4876b2d61
|
[
"Apache-2.0"
] | null | null | null |
from torch.utils.data import DataLoader
from dataset.wiki_dataset import BERTDataset
from models.bert_model import *
from tqdm import tqdm
import numpy as np
import pandas as pd
import os
config = {}
config['train_corpus_path'] = './corpus/train_wiki.txt'
config['test_corpus_path'] = './corpus/test_wiki.txt'
config['word2idx_path'] = './corpus/bert_word2idx_extend.json'
config['output_path'] = './output_wiki_bert'
config['batch_size'] = 1
config['max_seq_len'] = 200
config['vocab_size'] = 32162
config['lr'] = 2e-6
config['num_workers'] = 0
class Pretrainer:
def __init__(self, bert_model,
vocab_size, max_seq_len,
batch_size, lr, with_cuda=True):
# 词量, 注意这里实际字(词)汇量 = vocab_size - 20
# 因为前20个token用来做一些特殊功能,如padding等
self.vocab_size = vocab_size
self.batch_size = batch_size
self.lr = lr
cuda_condition = torch.cuda.is_available() and with_cuda
self.device = torch.device('cuda:0' if cuda_condition else 'cpu')
# 限定单句最大长度
self.max_seq_len = max_seq_len
# 初始化超参数的配置
bertconfig = BertConfig(vocab_size=config['vocab_size'])
# 初始化bert模型
self.bert_model = bert_model(config=bertconfig)
self.bert_model.to(self.device)
# 初始化训练数据集
train_dataset = BERTDataset(corpus_path=config['train_corpus_path'],
word2idx_path=config['word2idx_path'],
seq_len=self.max_seq_len,
hidden_dim=bertconfig.hidden_size,
on_memory=False)
# 初始化训练dataloader
self.train_dataloader = DataLoader(train_dataset,
batch_size=config['batch_size'],
num_workers=config['num_workers'],
collate_fn=lambda x:x)
# 初始化测试数据集
test_dataset = BERTDataset(corpus_path=config['test_corpus_path'],
word2idx_path=config['word2idx_path'],
seq_len=self.max_seq_len,
hidden_dim=bertconfig.hidden_size,
on_memory=True)
# 初始化测试dataloader
self.test_dataloader = DataLoader(test_dataset, batch_size=self.batch_size,
num_workers=config['num_workers'],
collate_fn=lambda x: x)
# 初始化positional_encoding [max_seq_len, hidden_size]
self.positional_enc = self.init_positional_encoding(hidden_dim=bertconfig.hidden_size,
max_seq_len=self.max_seq_len)
# 拓展positional_encoding的维度为[1, max_seq_len, hidden_size]
self.positional_enc = torch.unsqueeze(self.positional_enc, dim=0)
# 列举需要优化的参数并传入优化器
optim_parameters = list(self.bert_model.parameters())
self.optimizer = torch.optim.Adam(optim_parameters, lr=self.lr)
print('Total Parameters:', sum(p.nelement() for p in self.bert_model.parameters()))
def init_positional_encoding(self, hidden_dim, max_seq_len):
position_enc = np.array([
[pos / np.power(10000, 2 * i / hidden_dim) for i in range(hidden_dim)]
if pos != 0 else np.zeros(hidden_dim) for pos in range(max_seq_len)
])
# dim=2i
position_enc[1:, 0::2] = np.sin(position_enc[1:, 0::2])
# dim=2i+1
position_enc[1:, 1::2] = np.sin(position_enc[1:, 1::2])
# todo 归一化处理 why? 用位置嵌入的每一行除以它的模长
denominator = np.sqrt(np.sum(position_enc**2, axis=1, keepdims=True)) # 作为分母
position_enc /= (denominator + 1e-8)
position_enc = torch.from_numpy(position_enc).type(torch.FloatTensor)
return position_enc
def test(self, epoch, df_path='./output_wiki_bert/df_log.pickle'):
self.bert_model.eval()
with torch.no_grad():
return self.iteration(epoch, self.test_dataloader, train=False, df_path=df_path)
def load_model(self, model, dir_path='./output'):
# 加载模型
checkpoint_dir = self.find_most_recent_state_dict(dir_path)
checkpoint = torch.load(checkpoint_dir)
# todo key在哪保存的
model.load_state_dict(checkpoint['model_state_dict'], strict=False)
torch.cuda.empty_cache()
model.to(self.device)
print('{} loaded for training!'.format(checkpoint_dir))
def train(self, epoch, df_path='./output_wiki_bert/df_log.pickle'):
self.bert_model.train()
self.iteration(epoch, self.train_dataloader, train=True, df_path=df_path)
def compute_loss(self, preditions, labels, num_class=2, ignore_index=None):
if ignore_index is None:
loss_func = CrossEntropyLoss()
else:
loss_func = CrossEntropyLoss(ignore_index=ignore_index)
return loss_func(preditions.view(-1, num_class), labels.view(-1))
def get_mlm_accuracy(self, predictions, labels):
# predictions [batch_size, seq_len, vocab_size]
predictions = torch.argmax(predictions, dim=-1, keepdim=False) # predictions: [batch_size, seq_len]
# labels: [batch_size, seq_len]
mask = (labels > 0) # 只考虑被MASK的token
# 预测正确的数量
pred_correct = torch.sum((predictions == labels) * mask).float()
# accuracy
mlm_accuracy = pred_correct / (torch.sum(mask).float() + 1e-8)
return mlm_accuracy.item()
def padding(self, output_dic_list):
# todo output_dic_list的格式
# [batch_size, seq_len, embed_dim]
bert_input = [i['bert_input'] for i in output_dic_list]
bert_label = [i['bert_label'] for i in output_dic_list]
segment_label = [i['segment_label'] for i in output_dic_list]
# padding
bert_input = torch.nn.utils.rnn.pad_sequence(bert_input, batch_first=True)
bert_label = torch.nn.utils.rnn.pad_sequence(bert_label, batch_first=True)
segment_label = torch.nn.utils.rnn.pad_sequence(segment_label, batch_first=True)
# [batch_size]
is_next = torch.cat([i['is_next'] for i in output_dic_list])
return {
'bert_input': bert_input,
'bert_label': bert_label,
'segment_label': segment_label,
'is_next': is_next
}
def find_most_recent_state_dict(self, dir_path):
if not os.path.exists(dir_path):
os.mkdir(dir_path)
dic_list = [i for i in os.listdir(dir_path)]
if len(dic_list) == 0:
raise FileNotFoundError('can not find any state dict in {}'.format(dir_path))
# todo model什么时候存放的?
dic_list = [i for i in dic_list if 'model' in i]
dic_list = sorted(dic_list, key=lambda k: int(k.split('.')[-1]))
return dir_path + '/' + dic_list[-1]
def iteration(self, epoch, data_loader, train=True, df_path='./output_wiki_bert/df_log.pickle'):
if not os.path.isfile(df_path) and epoch != 0:
raise RuntimeError("log DataFrame path not found and can't create a new one because we're not training from scratch!")
if not os.path.isfile(df_path) and epoch == 0:
df = pd.DataFrame(columns=['epoch', 'train_next_sen_loss', 'train_mlm_loss',
'train_next_sen_acc', 'train_mlm_acc',
'test_next_sen_loss', 'test_mlm_loss',
'test_next_sen_acc', 'test_mlm_acc'])
df.to_pickle(df_path)
print('log DataFrame created!')
str_code = 'train' if train else 'test'
# 设置进度条,得到迭代器对象
data_iter = tqdm(enumerate(data_loader),
desc='EP_%s:%d' % (str_code, epoch),
total=len(data_loader),
bar_format='{l_bar}{r_bar}')
total_next_sen_loss = 0
total_mlm_loss = 0
total_next_sen_acc = 0
total_mlm_acc = 0
total_element = 0
for i, data in data_iter:
data = self.padding(data)
# 0. batch_data will be sent into the device
data = {key: value.to(self.device) for key, value in data.items()}
# todo data['bert_input'] 的维度
positional_enc = self.positional_enc[:, :data['bert_input'].size()[-1], :].to(self.device)
# 1. forward the next_sentence_prediction and masked_lm_model
# mlm_preds: [batch_size, seq_len, vocab_size]
# next_sen_preds: [batch_size, seq_len]
mlm_preds, next_sen_preds = self.bert_model.forward(input_ids=data['bert_input'],
positional_enc=positional_enc,
token_type_ids=data['segment_label'])
mlm_acc = self.get_mlm_accuracy(mlm_preds, data['bert_label'])
next_sen_acc = next_sen_preds.argmax(dim=-1, keepdim=False).eq(data['is_next']).sum().item()
mlm_loss = self.compute_loss(mlm_preds, data['bert_label'], self.vocab_size, ignore_index=0)
next_sen_loss = self.compute_loss(next_sen_preds, data['is_next'])
# 两个任务联合训练
loss = mlm_loss + next_sen_loss
# 3. 反向传播和梯度更新
if train:
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
total_next_sen_loss += next_sen_loss.item()
total_mlm_loss += mlm_loss.item()
total_next_sen_acc += next_sen_acc
total_element += data['is_next'].nelement()
total_mlm_acc += mlm_acc
if train:
log_dict = {
'epoch': epoch,
'train_next_sen_loss': total_next_sen_loss / (i + 1),
'train_mlm_loss': total_mlm_loss / (i + 1),
'train_next_sen_acc': total_next_sen_acc / total_element,
'train_mlm_acc': total_mlm_acc / (i + 1),
'test_next_sen_loss': 0, 'test_mlm_loss':0,
'test_next_sen_acc':0, 'test_mlm_acc':0
}
else:
log_dict = {
'epoch': epoch,
'test_next_sen_loss': total_next_sen_loss / (i + 1),
'test_mlm_loss': total_mlm_loss / (i + 1),
'test_next_sen_acc': total_next_sen_acc / total_element,
'test_mlm_acc': total_mlm_acc / (i + 1),
'train_next_sen_loss': 0, 'train_mlm_loss': 0,
'train_next_sen_acc': 0, 'train_mlm_acc': 0
}
if i % 10 == 0:
data_iter.write(str({k: v for k, v in log_dict.items() if v != 0 and k != 'epoch'}))
if train:
df = pd.read_pickle(df_path)
# 将日志信息追加到df中
df = df.append([log_dict])
# 重置索引
df.reset_index(inplace=True, drop=True)
# 保存到本地
df.to_pickle(df_path)
else:
log_dict = {k: v for k, v in log_dict.items() if v != 0 and k != 'epoch'}
df = pd.read_pickle(df_path)
df.reset_index(inplace=True, drop=True)
for k, v in log_dict.items():
df.at[epoch, k] = v
df.to_pickle(df_path)
return float(log_dict['test_next_sen_loss']) + float(log_dict['test_mlm_loss'])
def save_state_dict(self, model, epoch, dir_path='./output', file_path='bert.model'):
if not os.path.exists(dir_path):
os.mkdir(dir_path)
save_path = dir_path + '/' + file_path + '.epoch.{}'.format(str(epoch))
model.to('cpu')
torch.save({'model_state_dict': model.state_dict()}, save_path)
print('{} saved!'.format(save_path))
model.to(self.device)
if __name__ == '__main__':
def init_trainer(dynamic_lr, load_model=False):
trainer = Pretrainer(BertForPreTraining,
vocab_size=config['vocab_size'],
max_seq_len=config['max_seq_len'],
batch_size=config['batch_size'],
lr=dynamic_lr,
with_cuda=True)
if load_model:
trainer.load_model(trainer.bert_model, dir_path=config['output_path'])
return trainer
start_epoch = 3
train_epoches = 1
trainer = init_trainer(config['lr'], load_model=True)
all_loss = []
threshold = 0
patient = 10
best_f1 = 0
dynamic_lr = config['lr']
# todo start_epoch 为什么要从3开始
for epoch in range(start_epoch, start_epoch + train_epoches):
print('train with learning rate {}'.format(str(dynamic_lr)))
trainer.train(epoch)
trainer.save_state_dict(trainer.bert_model, epoch, dir_path=config['output_path'],
file_path='bert.model')
trainer.test(epoch)
| 41.062696
| 130
| 0.579128
| 1,641
| 13,099
| 4.310786
| 0.176722
| 0.029686
| 0.017812
| 0.012723
| 0.283291
| 0.192112
| 0.162567
| 0.119452
| 0.104891
| 0.086231
| 0
| 0.011248
| 0.314528
| 13,099
| 318
| 131
| 41.191824
| 0.77659
| 0.066875
| 0
| 0.157658
| 0
| 0.004505
| 0.114819
| 0.014383
| 0
| 0
| 0
| 0.003145
| 0
| 1
| 0.054054
| false
| 0
| 0.031532
| 0
| 0.126126
| 0.022523
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
913073679e4abf540c0706db4723633ae6619d7d
| 5,757
|
py
|
Python
|
python/triton/language/random.py
|
appliedml85/triton
|
8bedcce9befbbe95d8fe0a082718edc4050e2831
|
[
"MIT"
] | 1
|
2021-09-03T15:58:49.000Z
|
2021-09-03T15:58:49.000Z
|
python/triton/language/random.py
|
appliedml85/triton
|
8bedcce9befbbe95d8fe0a082718edc4050e2831
|
[
"MIT"
] | null | null | null |
python/triton/language/random.py
|
appliedml85/triton
|
8bedcce9befbbe95d8fe0a082718edc4050e2831
|
[
"MIT"
] | null | null | null |
import triton
import triton.language as tl
# Notes
# 1. triton doesn't support uint32, so we use int32 instead and benefit from the fact that two's complement operations are equivalent to uint operations.
# 2. multiply_low_high is currently inefficient.
# 3. Even though technically philox sampling outputs int, in many places we pretends they were actualy uints e.g. uint_to_uniform_float
@triton.jit
def PHILOX_KEY_A():
# 0x9E3779B9
return -1640531527
@triton.jit
def PHILOX_KEY_B():
# 0xBB67AE85
return -1150833019
@triton.jit
def PHILOX_ROUND_A():
# 0xD2511F53
return -766435501
@triton.jit
def PHILOX_ROUND_B():
# 0xCD9E8D57
return -845247145
@triton.jit
def hacky_to_uint64(x):
return ((x >> 1).to(tl.int64) << 1) + (x & 1).to(tl.int64)
@triton.jit
def multiply_low_high(a, b):
return (
a * b,
((hacky_to_uint64(a) * hacky_to_uint64(b)) >> 32).to(tl.int32)
)
@triton.jit
def single_round(c0, c1, c2, c3, k0, k1):
A = PHILOX_ROUND_A()
B = PHILOX_ROUND_B()
lo0, hi0 = multiply_low_high(A, c0)
lo1, hi1 = multiply_low_high(B, c2)
return (
hi1 ^ c1 ^ k0,
lo1,
hi0 ^ c3 ^ k1,
lo0,
)
@triton.jit
def raise_key(k0, k1):
return (
k0 + PHILOX_KEY_A(),
k1 + PHILOX_KEY_B(),
)
@triton.jit
def philox_f(c0, c1, c2, c3, k0, k1):
c0, c1, c2, c3 = single_round(c0, c1, c2, c3, k0, k1)
k0, k1 = raise_key(k0, k1)
c0, c1, c2, c3 = single_round(c0, c1, c2, c3, k0, k1)
k0, k1 = raise_key(k0, k1)
c0, c1, c2, c3 = single_round(c0, c1, c2, c3, k0, k1)
k0, k1 = raise_key(k0, k1)
c0, c1, c2, c3 = single_round(c0, c1, c2, c3, k0, k1)
k0, k1 = raise_key(k0, k1)
c0, c1, c2, c3 = single_round(c0, c1, c2, c3, k0, k1)
k0, k1 = raise_key(k0, k1)
c0, c1, c2, c3 = single_round(c0, c1, c2, c3, k0, k1)
k0, k1 = raise_key(k0, k1)
c0, c1, c2, c3 = single_round(c0, c1, c2, c3, k0, k1)
k0, k1 = raise_key(k0, k1)
c0, c1, c2, c3 = single_round(c0, c1, c2, c3, k0, k1)
k0, k1 = raise_key(k0, k1)
c0, c1, c2, c3 = single_round(c0, c1, c2, c3, k0, k1)
k0, k1 = raise_key(k0, k1)
c0, c1, c2, c3 = single_round(c0, c1, c2, c3, k0, k1)
return c0, c1, c2, c3
@triton.jit
def uint32_to_uniform_float(x):
"""
Numerically stable function to convert a random integer into a random float uniformly sampled in [0, 1).
This is originally designed from uint32, but it works with int32 too as long as the int32 uniformly
covers all the possible values it can take.
"""
mantissa = x & 0x7fffff
exp = 127
res = mantissa | (exp << 23)
return res.to(tl.float32, bitcast=True) - 1.0
@triton.jit
def pair_uniform_to_normal(u1, u2):
"""Box-Muller transform"""
u1 = tl.maximum(1.0e-7, u1)
th = 6.283185307179586 * u2
r = tl.sqrt(-2.0 * tl.log(u1))
return r * tl.cos(th), r * tl.sin(th)
@triton.jit
def randint4x(seed, offset):
"""
Given a :code:`seed` scalar and an :code:`offset` block, returns four
blocks of random :code:`int32`.
This is the maximally efficient entry point
to Triton's Philox pseudo-random number generator.
:param seed: The seed for generating random numbers.
:param offsets: The offsets to generate random numbers for.
"""
z = 0
return philox_f(offset, z, z, z, seed, z)
@triton.jit
def randint(seed, offset):
"""
Given a :code:`seed` scalar and an :code:`offset` block, returns a single
block of random :code:`int32`.
If you need multiple streams of random numbers,
using `randint4x` is likely to be faster than calling `randint` 4 times.
:param seed: The seed for generating random numbers.
:param offsets: The offsets to generate random numbers for.
"""
ret, _, _, _ = randint4x(seed, offset)
return ret
@triton.jit
def rand(seed, offset):
"""
Given a :code:`seed` scalar and an :code:`offset` block,
returns a block of random :code:`float32` in :math:`U(0, 1)`
:param seed: The seed for generating random numbers.
:param offsets: The offsets to generate random numbers for.
"""
source = randint(seed, offset)
return uint32_to_uniform_float(source)
@triton.jit
def randn(seed, offset):
"""
Given a :code:`seed` scalar and an :code:`offset` block,
returns a block of random :code:`float32` in :math:`\mathcal{N}(0, 1)`
:param seed: The seed for generating random numbers.
:param offsets: The offsets to generate random numbers for.
"""
i1, i2, _, _ = randint4x(seed, offset)
u1 = uint32_to_uniform_float(i1)
u2 = uint32_to_uniform_float(i2)
n1, _ = pair_uniform_to_normal(u1, u2)
return n1
@triton.jit
def rand4x(seed, offsets):
"""
Given a :code:`seed` scalar and an :code:`offsets` block,
returns a 4 blocks of random :code:`float32` in :math:`U(0, 1)`
:param seed: The seed for generating random numbers.
:param offsets: The offsets to generate random numbers for.
"""
i1, i2, i3, i4 = randint4x(seed, offsets)
u1 = uint32_to_uniform_float(i1)
u2 = uint32_to_uniform_float(i2)
u3 = uint32_to_uniform_float(i3)
u4 = uint32_to_uniform_float(i4)
return u1, u2, u3, u4
@triton.jit
def randn4x(seed, offset):
"""
Given a :code:`seed` scalar and an :code:`offset` block,
returns a 4 blocks of random :code:`float32` in :math:`\mathcal{N}(0, 1)`
:param seed: The seed for generating random numbers.
:param offsets: The offsets to generate random numbers for.
"""
u1, u2, u3, u4 = rand4x(seed, offset)
n1, n2 = pair_uniform_to_normal(u1, u2)
n3, n4 = pair_uniform_to_normal(u3, u4)
return n1, n2, n3, n4
| 27.545455
| 153
| 0.639396
| 916
| 5,757
| 3.911572
| 0.220524
| 0.034608
| 0.038515
| 0.051354
| 0.500419
| 0.469718
| 0.448228
| 0.448228
| 0.433715
| 0.433715
| 0
| 0.089436
| 0.238666
| 5,757
| 208
| 154
| 27.677885
| 0.72804
| 0.394303
| 0
| 0.398148
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00245
| 0
| 0
| 1
| 0.157407
| false
| 0
| 0.018519
| 0.064815
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9130e2a60db5f7dd70d5dc6252d49d770a1edb17
| 6,567
|
py
|
Python
|
platypush/backend/joystick/linux/__init__.py
|
BlackLight/platypush
|
6c0a8bf2599eb4ab41a6122dbd988075d8b1a63a
|
[
"MIT"
] | 228
|
2018-01-30T11:17:09.000Z
|
2022-03-24T11:22:26.000Z
|
platypush/backend/joystick/linux/__init__.py
|
BlackLight/platypush
|
6c0a8bf2599eb4ab41a6122dbd988075d8b1a63a
|
[
"MIT"
] | 167
|
2017-12-11T19:35:38.000Z
|
2022-03-27T14:45:30.000Z
|
platypush/backend/joystick/linux/__init__.py
|
BlackLight/runbullet
|
8d26c8634d2677b4402f0a21b9ab8244b44640db
|
[
"MIT"
] | 16
|
2018-05-03T07:31:56.000Z
|
2021-12-05T19:27:37.000Z
|
import array
import struct
import time
from fcntl import ioctl
from typing import IO
from platypush.backend import Backend
from platypush.message.event.joystick import JoystickConnectedEvent, JoystickDisconnectedEvent, \
JoystickButtonPressedEvent, JoystickButtonReleasedEvent, JoystickAxisEvent
class JoystickLinuxBackend(Backend):
"""
This backend intercepts events from joystick devices through the native Linux API implementation.
It is loosely based on https://gist.github.com/rdb/8864666, which itself uses the
`Linux kernel joystick API <https://www.kernel.org/doc/Documentation/input/joystick-api.txt>`_ to interact with
the devices.
Triggers:
* :class:`platypush.message.event.joystick.JoystickConnectedEvent` when the joystick is connected.
* :class:`platypush.message.event.joystick.JoystickDisconnectedEvent` when the joystick is disconnected.
* :class:`platypush.message.event.joystick.JoystickButtonPressedEvent` when a joystick button is pressed.
* :class:`platypush.message.event.joystick.JoystickButtonReleasedEvent` when a joystick button is released.
* :class:`platypush.message.event.joystick.JoystickAxisEvent` when an axis value of the joystick changes.
"""
# These constants were borrowed from linux/input.h
axis_names = {
0x00: 'x',
0x01: 'y',
0x02: 'z',
0x03: 'rx',
0x04: 'ry',
0x05: 'rz',
0x06: 'throttle',
0x07: 'rudder',
0x08: 'wheel',
0x09: 'gas',
0x0a: 'brake',
0x10: 'hat0x',
0x11: 'hat0y',
0x12: 'hat1x',
0x13: 'hat1y',
0x14: 'hat2x',
0x15: 'hat2y',
0x16: 'hat3x',
0x17: 'hat3y',
0x18: 'pressure',
0x19: 'distance',
0x1a: 'tilt_x',
0x1b: 'tilt_y',
0x1c: 'tool_width',
0x20: 'volume',
0x28: 'misc',
}
button_names = {
0x120: 'trigger',
0x121: 'thumb',
0x122: 'thumb2',
0x123: 'top',
0x124: 'top2',
0x125: 'pinkie',
0x126: 'base',
0x127: 'base2',
0x128: 'base3',
0x129: 'base4',
0x12a: 'base5',
0x12b: 'base6',
0x12f: 'dead',
0x130: 'a',
0x131: 'b',
0x132: 'c',
0x133: 'x',
0x134: 'y',
0x135: 'z',
0x136: 'tl',
0x137: 'tr',
0x138: 'tl2',
0x139: 'tr2',
0x13a: 'select',
0x13b: 'start',
0x13c: 'mode',
0x13d: 'thumbl',
0x13e: 'thumbr',
0x220: 'dpad_up',
0x221: 'dpad_down',
0x222: 'dpad_left',
0x223: 'dpad_right',
# XBox 360 controller uses these codes.
0x2c0: 'dpad_left',
0x2c1: 'dpad_right',
0x2c2: 'dpad_up',
0x2c3: 'dpad_down',
}
def __init__(self, device: str = '/dev/input/js0', *args, **kwargs):
"""
:param device: Joystick device to monitor (default: ``/dev/input/js0``).
"""
super().__init__(*args, **kwargs)
self.device = device
self._axis_states = {}
self._button_states = {}
self._axis_map = []
self._button_map = []
def _init_joystick(self, dev: IO):
# Get the device name.
buf = array.array('B', [0] * 64)
ioctl(dev, 0x80006a13 + (0x10000 * len(buf)), buf) # JSIOCGNAME(len)
js_name = buf.tobytes().rstrip(b'\x00').decode('utf-8')
# Get number of axes and buttons.
buf = array.array('B', [0])
ioctl(dev, 0x80016a11, buf) # JSIOCGAXES
num_axes = buf[0]
buf = array.array('B', [0])
ioctl(dev, 0x80016a12, buf) # JSIOCGBUTTONS
num_buttons = buf[0]
# Get the axis map.
buf = array.array('B', [0] * 0x40)
ioctl(dev, 0x80406a32, buf) # JSIOCGAXMAP
for axis in buf[:num_axes]:
axis_name = self.axis_names.get(axis, 'unknown(0x%02x)' % axis)
self._axis_map.append(axis_name)
self._axis_states[axis_name] = 0.0
# Get the button map.
buf = array.array('H', [0] * 200)
ioctl(dev, 0x80406a34, buf) # JSIOCGBTNMAP
for btn in buf[:num_buttons]:
btn_name = self.button_names.get(btn, 'unknown(0x%03x)' % btn)
self._button_map.append(btn_name)
self._button_states[btn_name] = 0
self.bus.post(JoystickConnectedEvent(device=self.device, name=js_name, axes=self._axis_map,
buttons=self._button_map))
def run(self):
super().run()
self.logger.info(f'Opening {self.device}...')
while not self.should_stop():
# Open the joystick device.
try:
jsdev = open(self.device, 'rb')
self._init_joystick(jsdev)
except Exception as e:
self.logger.debug(f'Joystick device on {self.device} not available: {e}')
time.sleep(5)
continue
# Joystick event loop
while not self.should_stop():
try:
evbuf = jsdev.read(8)
if evbuf:
_, value, evt_type, number = struct.unpack('IhBB', evbuf)
if evt_type & 0x80: # Initial state notification
continue
if evt_type & 0x01:
button = self._button_map[number]
if button:
self._button_states[button] = value
evt_class = JoystickButtonPressedEvent if value else JoystickButtonReleasedEvent
# noinspection PyTypeChecker
self.bus.post(evt_class(device=self.device, button=button))
if evt_type & 0x02:
axis = self._axis_map[number]
if axis:
fvalue = value / 32767.0
self._axis_states[axis] = fvalue
# noinspection PyTypeChecker
self.bus.post(JoystickAxisEvent(device=self.device, axis=axis, value=fvalue))
except OSError as e:
self.logger.warning(f'Connection to {self.device} lost: {e}')
self.bus.post(JoystickDisconnectedEvent(device=self.device))
break
| 34.563158
| 115
| 0.536166
| 681
| 6,567
| 5.057269
| 0.433186
| 0.029036
| 0.036585
| 0.050523
| 0.117305
| 0.013357
| 0.013357
| 0
| 0
| 0
| 0
| 0.076202
| 0.350541
| 6,567
| 189
| 116
| 34.746032
| 0.731301
| 0.198873
| 0
| 0.057143
| 0
| 0
| 0.092263
| 0
| 0
| 0
| 0.069052
| 0
| 0
| 1
| 0.021429
| false
| 0
| 0.05
| 0
| 0.092857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91319c7d1f44146497a5047e81aae4b710f7a353
| 10,043
|
py
|
Python
|
src/modules/sensors/vehicle_magnetometer/mag_compensation/python/mag_compensation.py
|
SaxionMechatronics/Firmware
|
7393d5d7610dc8d2cb64d90a5359b6c561fb642a
|
[
"BSD-3-Clause"
] | 4,224
|
2015-01-02T11:51:02.000Z
|
2020-10-27T23:42:28.000Z
|
src/modules/sensors/vehicle_magnetometer/mag_compensation/python/mag_compensation.py
|
SaxionMechatronics/Firmware
|
7393d5d7610dc8d2cb64d90a5359b6c561fb642a
|
[
"BSD-3-Clause"
] | 11,736
|
2015-01-01T11:59:16.000Z
|
2020-10-28T17:13:38.000Z
|
src/modules/sensors/vehicle_magnetometer/mag_compensation/python/mag_compensation.py
|
SaxionMechatronics/Firmware
|
7393d5d7610dc8d2cb64d90a5359b6c561fb642a
|
[
"BSD-3-Clause"
] | 11,850
|
2015-01-02T14:54:47.000Z
|
2020-10-28T16:42:47.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
File: mag_compensation.py
Author: Tanja Baumann
Email: tanja@auterion.com
Github: https://github.com/baumanta
Description:
Computes linear coefficients for mag compensation from thrust and current
Usage:
python mag_compensation.py /path/to/log/logfile.ulg current --instance 1
Remark:
If your logfile does not contain some of the topics, e.g.battery_status/current_a
you will have to comment out the corresponding parts in the script
"""
import matplotlib.pylab as plt
from mpl_toolkits.mplot3d import Axes3D
from pyulog import ULog
from pyulog.px4 import PX4ULog
from pylab import *
import numpy as np
import textwrap as tw
import argparse
#arguments
parser = argparse.ArgumentParser(description='Calculate compensation parameters from ulog')
parser.add_argument('logfile', type=str, nargs='?', default=[],
help='full path to ulog file')
parser.add_argument('type', type=str, nargs='?', choices=['current', 'thrust'], default=[],
help='Power signal used for compensation, supported is "current" or "thrust".')
parser.add_argument('--instance', type=int, nargs='?', default=0,
help='instance of the current or thrust signal to use (0 or 1)')
args = parser.parse_args()
log_name = args.logfile
comp_type = args.type
comp_instance = args.instance
#Load the log data (produced by pyulog)
log = ULog(log_name)
pxlog = PX4ULog(log)
def get_data(topic_name, variable_name, index):
try:
dataset = log.get_dataset(topic_name, index)
return dataset.data[variable_name]
except:
return []
def ms2s_list(time_ms_list):
if len(time_ms_list) > 0:
return 1e-6 * time_ms_list
else:
return time_ms_list
# Select msgs and copy into arrays
armed = get_data('vehicle_status', 'arming_state', 0)
t_armed = ms2s_list(get_data('vehicle_status', 'timestamp', 0))
if comp_type == "thrust":
power = get_data('vehicle_rates_setpoint', 'thrust_body[2]', comp_instance)
power_t = ms2s_list(get_data('vehicle_rates_setpoint', 'timestamp', comp_instance))
comp_type_param = 1
factor = 1
unit = "[G]"
elif comp_type == "current":
power = get_data('battery_status', 'current_a', comp_instance)
power = np.true_divide(power, 1000) #kA
power_t = ms2s_list(get_data('battery_status', 'timestamp', comp_instance))
comp_type_param = 2 + comp_instance
factor = -1
unit = "[G/kA]"
else:
print("unknown compensation type {}. Supported is either 'thrust' or 'current'.".format(comp_type))
sys.exit(1)
if len(power) == 0:
print("could not retrieve power signal from log, zero data points")
sys.exit(1)
mag0X_body = get_data('sensor_mag', 'x', 0)
mag0Y_body = get_data('sensor_mag', 'y', 0)
mag0Z_body = get_data('sensor_mag', 'z', 0)
t_mag0 = ms2s_list(get_data('sensor_mag', 'timestamp', 0))
mag0_ID = get_data('sensor_mag', 'device_id', 0)
mag1X_body = get_data('sensor_mag', 'x', 1)
mag1Y_body = get_data('sensor_mag', 'y', 1)
mag1Z_body = get_data('sensor_mag', 'z', 1)
t_mag1 = ms2s_list(get_data('sensor_mag', 'timestamp', 1))
mag1_ID = get_data('sensor_mag', 'device_id', 1)
mag2X_body = get_data('sensor_mag', 'x', 2)
mag2Y_body = get_data('sensor_mag', 'y', 2)
mag2Z_body = get_data('sensor_mag', 'z', 2)
t_mag2 = ms2s_list(get_data('sensor_mag', 'timestamp', 2))
mag2_ID = get_data('sensor_mag', 'device_id', 2)
mag3X_body = get_data('sensor_mag', 'x', 3)
mag3Y_body = get_data('sensor_mag', 'y', 3)
mag3Z_body = get_data('sensor_mag', 'z', 3)
t_mag3 = ms2s_list(get_data('sensor_mag', 'timestamp', 3))
mag3_ID = get_data('sensor_mag', 'device_id', 3)
magX_body = []
magY_body = []
magZ_body = []
mag_id = []
t_mag = []
if len(mag0X_body) > 0:
magX_body.append(mag0X_body)
magY_body.append(mag0Y_body)
magZ_body.append(mag0Z_body)
t_mag.append(t_mag0)
mag_id.append(mag0_ID[0])
if len(mag1X_body) > 0:
magX_body.append(mag1X_body)
magY_body.append(mag1Y_body)
magZ_body.append(mag1Z_body)
t_mag.append(t_mag1)
mag_id.append(mag1_ID[0])
if len(mag2X_body) > 0:
magX_body.append(mag2X_body)
magY_body.append(mag2Y_body)
magZ_body.append(mag2Z_body)
t_mag.append(t_mag2)
mag_id.append(mag2_ID[0])
if len(mag3X_body) > 0:
magX_body.append(mag3X_body)
magY_body.append(mag3Y_body)
magZ_body.append(mag3Z_body)
t_mag.append(t_mag3)
mag_id.append(mag3_ID[0])
n_mag = len(magX_body)
#log index does not necessarily match mag calibration instance number
calibration_instance = []
instance_found = False
for idx in range(n_mag):
instance_found = False
for j in range(4):
if mag_id[idx] == log.initial_parameters["CAL_MAG{}_ID".format(j)]:
calibration_instance.append(j)
instance_found = True
if not instance_found:
print('Mag {} calibration instance not found, run compass calibration first.'.format(mag_id[idx]))
#get first arming sequence from data
start_time = 0
stop_time = 0
for i in range(len(armed)-1):
if armed[i] == 1 and armed[i+1] == 2:
start_time = t_armed[i+1]
if armed[i] == 2 and armed[i+1] == 1:
stop_time = t_armed[i+1]
break
#cut unarmed sequences from mag data
index_start = 0
index_stop = 0
for idx in range(n_mag):
for i in range(len(t_mag[idx])):
if t_mag[idx][i] > start_time:
index_start = i
break
for i in range(len(t_mag[idx])):
if t_mag[idx][i] > stop_time:
index_stop = i -1
break
t_mag[idx] = t_mag[idx][index_start:index_stop]
magX_body[idx] = magX_body[idx][index_start:index_stop]
magY_body[idx] = magY_body[idx][index_start:index_stop]
magZ_body[idx] = magZ_body[idx][index_start:index_stop]
#resample data
power_resampled = []
for idx in range(n_mag):
power_resampled.append(interp(t_mag[idx], power_t, power))
#fit linear to get coefficients
px = []
py = []
pz = []
for idx in range(n_mag):
px_temp, res_x, _, _, _ = polyfit(power_resampled[idx], magX_body[idx], 1,full = True)
py_temp, res_y, _, _, _ = polyfit(power_resampled[idx], magY_body[idx], 1,full = True)
pz_temp, res_z, _, _, _ = polyfit(power_resampled[idx], magZ_body[idx], 1, full = True)
px.append(px_temp)
py.append(py_temp)
pz.append(pz_temp)
#print to console
for idx in range(n_mag):
print('Mag{} device ID {} (calibration instance {})'.format(idx, mag_id[idx], calibration_instance[idx]))
print('\033[91m \n{}-based compensation: \033[0m'.format(comp_type))
print('\nparam set CAL_MAG_COMP_TYP {}'.format(comp_type_param))
for idx in range(n_mag):
print('\nparam set CAL_MAG{}_XCOMP {:.3f}'.format(calibration_instance[idx], factor * px[idx][0]))
print('param set CAL_MAG{}_YCOMP {:.3f}'.format(calibration_instance[idx], factor * py[idx][0]))
print('param set CAL_MAG{}_ZCOMP {:.3f}'.format(calibration_instance[idx], factor * pz[idx][0]))
#plot data
for idx in range(n_mag):
fig = plt.figure(num=None, figsize=(25, 14), dpi=80, facecolor='w', edgecolor='k')
fig.suptitle('Compensation Parameter Fit \n{} \nmag {} ID: {} (calibration instance {})'.format(log_name, idx, mag_id[idx], calibration_instance[idx]), fontsize=14, fontweight='bold')
plt.subplot(1,3,1)
plt.plot(power_resampled[idx], magX_body[idx], 'yo', power_resampled[idx], px[idx][0]*power_resampled[idx]+px[idx][1], '--k')
plt.xlabel('current [kA]')
plt.ylabel('mag X [G]')
plt.subplot(1,3,2)
plt.plot(power_resampled[idx], magY_body[idx], 'yo', power_resampled[idx], py[idx][0]*power_resampled[idx]+py[idx][1], '--k')
plt.xlabel('current [kA]')
plt.ylabel('mag Y [G]')
plt.subplot(1,3,3)
plt.plot(power_resampled[idx], magZ_body[idx], 'yo', power_resampled[idx], pz[idx][0]*power_resampled[idx]+pz[idx][1], '--k')
plt.xlabel('current [kA]')
plt.ylabel('mag Z [G]')
# display results
plt.figtext(0.24, 0.03, 'CAL_MAG{}_XCOMP: {:.3f} {}'.format(calibration_instance[idx],factor * px[idx][0],unit), horizontalalignment='center', fontsize=12, multialignment='left', bbox=dict(boxstyle="round", facecolor='#D8D8D8', ec="0.5", pad=0.5, alpha=1), fontweight='bold')
plt.figtext(0.51, 0.03, 'CAL_MAG{}_YCOMP: {:.3f} {}'.format(calibration_instance[idx],factor * py[idx][0],unit), horizontalalignment='center', fontsize=12, multialignment='left', bbox=dict(boxstyle="round", facecolor='#D8D8D8', ec="0.5", pad=0.5, alpha=1), fontweight='bold')
plt.figtext(0.79, 0.03, 'CAL_MAG{}_ZCOMP: {:.3f} {}'.format(calibration_instance[idx],factor * pz[idx][0],unit), horizontalalignment='center', fontsize=12, multialignment='left', bbox=dict(boxstyle="round", facecolor='#D8D8D8', ec="0.5", pad=0.5, alpha=1), fontweight='bold')
#compensation comparison plots
for idx in range(n_mag):
fig = plt.figure(num=None, figsize=(25, 14), dpi=80, facecolor='w', edgecolor='k')
fig.suptitle('Original Data vs. Compensation \n{}\nmag {} ID: {} (calibration instance {})'.format(log_name, idx, mag_id[idx], calibration_instance[idx]), fontsize=14, fontweight='bold')
plt.subplot(3,1,1)
original_x, = plt.plot(t_mag[idx], magX_body[idx], label='original')
power_x, = plt.plot(t_mag[idx],magX_body[idx] - px[idx][0] * power_resampled[idx], label='compensated')
plt.legend(handles=[original_x, power_x])
plt.xlabel('Time [s]')
plt.ylabel('Mag X corrected[G]')
plt.subplot(3,1,2)
original_y, = plt.plot(t_mag[idx], magY_body[idx], label='original')
power_y, = plt.plot(t_mag[idx],magY_body[idx] - py[idx][0] * power_resampled[idx], label='compensated')
plt.legend(handles=[original_y, power_y])
plt.xlabel('Time [s]')
plt.ylabel('Mag Y corrected[G]')
plt.subplot(3,1,3)
original_z, = plt.plot(t_mag[idx], magZ_body[idx], label='original')
power_z, = plt.plot(t_mag[idx],magZ_body[idx] - pz[idx][0] * power_resampled[idx], label='compensated')
plt.legend(handles=[original_z, power_z])
plt.xlabel('Time [s]')
plt.ylabel('Mag Z corrected[G]')
plt.show()
| 36.787546
| 279
| 0.685054
| 1,582
| 10,043
| 4.138432
| 0.183312
| 0.028868
| 0.039713
| 0.048877
| 0.502062
| 0.436689
| 0.319383
| 0.242859
| 0.242859
| 0.219032
| 0
| 0.02804
| 0.154834
| 10,043
| 272
| 280
| 36.922794
| 0.743285
| 0.082346
| 0
| 0.137056
| 0
| 0
| 0.180661
| 0.004786
| 0
| 0
| 0
| 0
| 0
| 1
| 0.010152
| false
| 0.005076
| 0.040609
| 0
| 0.071066
| 0.045685
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
913206ffbcd62d973e6003afaac405c6a7ea1d3b
| 524
|
py
|
Python
|
portfolio_optimization/constants.py
|
AI-Traiding-Team/paired_trading
|
72d4dd0071314e2f0efaa26931ca7339199fc998
|
[
"MIT"
] | 1
|
2022-03-26T23:21:51.000Z
|
2022-03-26T23:21:51.000Z
|
portfolio_optimization/constants.py
|
AI-Traiding-Team/paired_trading
|
72d4dd0071314e2f0efaa26931ca7339199fc998
|
[
"MIT"
] | null | null | null |
portfolio_optimization/constants.py
|
AI-Traiding-Team/paired_trading
|
72d4dd0071314e2f0efaa26931ca7339199fc998
|
[
"MIT"
] | 3
|
2021-12-07T07:39:43.000Z
|
2022-01-24T05:05:55.000Z
|
import os
path1 = "outputs"
path2 = "outputs/_imgs"
path3 = "outputs/max_sharpe_weights"
path4 = "outputs/opt_portfolio_trades"
try:
os.mkdir(path1)
except OSError:
print ("Директория %s уже создана" % path1)
else:
print ("Успешно создана директория %s " % path1)
try:
os.makedirs(path2)
os.makedirs(path3)
os.makedirs(path4)
except OSError:
print ("Директории уже созданы")
else:
print ("Успешно созданы нужные директории")
source_path = '../source_root/1m'
destination_path = 'outputs'
| 20.153846
| 52
| 0.704198
| 66
| 524
| 5.469697
| 0.515152
| 0.083102
| 0.099723
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025463
| 0.175573
| 524
| 26
| 53
| 20.153846
| 0.810185
| 0
| 0
| 0.285714
| 0
| 0
| 0.39619
| 0.102857
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.047619
| 0
| 0.047619
| 0.190476
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91329b52b2eb8891b64c02d1b241dca7cd47466e
| 26,007
|
py
|
Python
|
mypy/transformtype.py
|
silky/mypy
|
de6a8d3710df9f49109cb682f2092e4967bfb92c
|
[
"PSF-2.0"
] | 1
|
2019-06-27T11:34:27.000Z
|
2019-06-27T11:34:27.000Z
|
mypy/transformtype.py
|
silky/mypy
|
de6a8d3710df9f49109cb682f2092e4967bfb92c
|
[
"PSF-2.0"
] | null | null | null |
mypy/transformtype.py
|
silky/mypy
|
de6a8d3710df9f49109cb682f2092e4967bfb92c
|
[
"PSF-2.0"
] | null | null | null |
"""Transform classes for runtime type checking."""
from typing import Undefined, List, Set, Any, cast, Tuple, Dict
from mypy.nodes import (
TypeDef, Node, FuncDef, VarDef, Block, Var, ExpressionStmt,
TypeInfo, SuperExpr, NameExpr, CallExpr, MDEF, MemberExpr, ReturnStmt,
AssignmentStmt, TypeExpr, PassStmt, SymbolTableNode
)
from mypy import nodes
from mypy.semanal import self_type
from mypy.types import (
Callable, Instance, Type, AnyType, BOUND_VAR, Void, RuntimeTypeVar,
UnboundType
)
from mypy.checkmember import analyse_member_access
from mypy.checkexpr import type_object_type
from mypy.subtypes import map_instance_to_supertype
import mypy.transform
from mypy.transformfunc import FuncTransformer
from mypy.transutil import (
self_expr, tvar_slot_name, tvar_arg_name, prepend_arg_type
)
from mypy.rttypevars import translate_runtime_type_vars_locally
from mypy.compileslotmap import find_slot_origin
from mypy.coerce import coerce
from mypy.maptypevar import num_slots, get_tvar_access_path
from mypy import erasetype
class TypeTransformer:
"""Class for transforming type definitions for runtime type checking.
Transform a type definition by modifying it in-place.
The following transformations are performed:
* Represent generic type variables explicitly as attributes.
* Create generic wrapper classes used by coercions to different type
args.
* Create wrapper methods needed when overriding methods with different
signatures.
* Create wrapper methods for calling methods in dynamically typed code.
These perform the necessary coercions for arguments and return values
to/from 'Any'.
This is used by DyncheckTransformVisitor and is logically aggregated within
that class.
"""
# Used for common transformation operations.
tf = Undefined('mypy.transform.DyncheckTransformVisitor')
# Used for transforming methods.
func_tf = Undefined(FuncTransformer)
def __init__(self, tf: 'mypy.transform.DyncheckTransformVisitor') -> None:
self.tf = tf
self.func_tf = FuncTransformer(tf)
def transform_type_def(self, tdef: TypeDef) -> List[Node]:
"""Transform a type definition.
The result may be one or two definitions. The first is the
transformation of the original TypeDef. The second is a
wrapper type, which is generated for generic types only.
"""
defs = [] # type: List[Node]
if tdef.info.type_vars:
# This is a generic type. Insert type variable slots in
# the class definition for new type variables, i.e. type
# variables not mapped to superclass type variables.
defs.extend(self.make_tvar_representation(tdef.info))
# Iterate over definitions and transform each of them.
vars = set() # type: Set[Var]
for d in tdef.defs.body:
if isinstance(d, FuncDef):
# Implicit cast from FuncDef[] to Node[] is safe below.
defs.extend(Any(self.func_tf.transform_method(d)))
elif isinstance(d, VarDef):
defs.extend(self.transform_var_def(d))
for n in d.items:
vars.add(n)
elif isinstance(d, AssignmentStmt):
self.transform_assignment(d)
defs.append(d)
# Add accessors for implicitly defined attributes.
for node in tdef.info.names.values():
if isinstance(node.node, Var):
v = cast(Var, node.node)
if v.info == tdef.info and v not in vars:
defs.extend(self.make_accessors(v))
# For generic classes, add an implicit __init__ wrapper.
defs.extend(self.make_init_wrapper(tdef))
if tdef.is_generic() or (tdef.info.bases and
tdef.info.mro[1].is_generic()):
self.make_instance_tvar_initializer(
cast(FuncDef, tdef.info.get_method('__init__')))
if not defs:
defs.append(PassStmt())
if tdef.is_generic():
gen_wrapper = self.generic_class_wrapper(tdef)
tdef.defs = Block(defs)
dyn_wrapper = self.make_type_object_wrapper(tdef)
if not tdef.is_generic():
return [tdef, dyn_wrapper]
else:
return [tdef, dyn_wrapper, gen_wrapper]
def make_init_wrapper(self, tdef: TypeDef) -> List[Node]:
"""Make and return an implicit __init__ if class needs it.
Otherwise, return an empty list. We include an implicit
__init__ if the class is generic or if it extends a generic class
and if it does not define __init__.
The __init__ of a generic class requires one or more extra type
variable arguments. The inherited __init__ may not accept these.
For example, assume these definitions:
. class A(Generic[T]): pass
. class B(A[int]): pass
The constructor for B will be (equivalent to)
. def __init__(self: B) -> None:
. self.__tv = <int>
. super().__init__(<int>)
"""
# FIX overloading, default args / varargs, keyword args
info = tdef.info
if '__init__' not in info.names and (
tdef.is_generic() or (info.bases and
info.mro[1].is_generic())):
# Generic class with no explicit __init__ method
# (i.e. __init__ inherited from superclass). Generate a
# wrapper that initializes type variable slots and calls
# the superclass __init__ method.
base = info.mro[1]
selftype = self_type(info)
callee_type = cast(Callable, analyse_member_access(
'__init__', selftype, None, False, True, None, None,
base))
# Now the callee type may contain the type variables of a
# grandparent as bound type variables, but we want the
# type variables of the parent class. Explicitly set the
# bound type variables.
callee_type = self.fix_bound_init_tvars(callee_type,
map_instance_to_supertype(selftype, base))
super_init = cast(FuncDef, base.get_method('__init__'))
# Build argument list.
args = [Var('self')]
for i in range(1, len(super_init.args)):
args.append(Var(super_init.args[i].name()))
args[-1].type = callee_type.arg_types[i - 1]
selft = self_type(self.tf.type_context())
callee_type = prepend_arg_type(callee_type, selft)
creat = FuncDef('__init__', args,
super_init.arg_kinds, [None] * len(args),
Block([]))
creat.info = tdef.info
creat.type = callee_type
creat.is_implicit = False
tdef.info.names['__init__'] = SymbolTableNode(MDEF, creat,
typ=creat.type)
# Insert a call to superclass constructor. If the
# superclass is object, the constructor does nothing =>
# omit the call.
if base.fullname() != 'builtins.object':
creat.body.body.append(
self.make_superclass_constructor_call(tdef.info,
callee_type))
# Implicit cast from FuncDef[] to Node[] is safe below.
return Any(self.func_tf.transform_method(creat))
else:
return []
def fix_bound_init_tvars(self, callable: Callable,
typ: Instance) -> Callable:
"""Replace bound type vars of callable with args from instance type."""
a = [] # type: List[Tuple[int, Type]]
for i in range(len(typ.args)):
a.append((i + 1, typ.args[i]))
return Callable(callable.arg_types, callable.arg_kinds,
callable.arg_names, callable.ret_type,
callable.is_type_obj(), callable.name,
callable.variables, a)
def make_superclass_constructor_call(
self, info: TypeInfo, callee_type: Callable) -> ExpressionStmt:
"""Construct a statement that calls the superclass constructor.
In particular, it passes any type variables arguments as needed.
"""
callee = SuperExpr('__init__')
callee.info = info
# We do not handle generic constructors. Either pass runtime
# type variables from the current scope or perhaps require
# explicit constructor in this case.
selftype = self_type(info)
# FIX overloading
# FIX default args / varargs
# Map self type to the superclass context.
base = info.mro[1]
selftype = map_instance_to_supertype(selftype, base)
super_init = cast(FuncDef, base.get_method('__init__'))
# Add constructor arguments.
args = [] # type: List[Node]
for n in range(1, callee_type.min_args):
args.append(NameExpr(super_init.args[n].name()))
self.tf.set_type(args[-1], callee_type.arg_types[n])
# Store callee type after stripping away the 'self' type.
self.tf.set_type(callee, nodes.method_callable(callee_type))
call = CallExpr(callee, args, [nodes.ARG_POS] * len(args))
return ExpressionStmt(call)
def transform_var_def(self, o: VarDef) -> List[Node]:
"""Transform a member variable definition.
The result may be one or more definitions.
"""
res = [o] # type: List[Node]
self.tf.visit_var_def(o)
# Add $x and set$x accessor wrappers for data attributes. These let
# derived classes redefine a data attribute as a property.
for n in o.items:
res.extend(self.make_accessors(n))
return res
def transform_assignment(self, o: AssignmentStmt) -> None:
"""Transform an assignment statement in class body."""
self.tf.visit_assignment_stmt(o)
def make_accessors(self, n: Var) -> List[Node]:
if n.type:
t = n.type
else:
t = AnyType()
return [self.make_getter_wrapper(n.name(), t),
self.make_setter_wrapper(n.name(), t),
self.make_dynamic_getter_wrapper(n.name(), t),
self.make_dynamic_setter_wrapper(n.name(), t)]
def make_getter_wrapper(self, name: str, typ: Type) -> FuncDef:
"""Create a getter wrapper for a data attribute.
The getter will be of this form:
. def $name*(self: C) -> type:
. return self.name!
"""
scope = self.make_scope()
selft = self.self_type()
selfv = scope.add('self', selft)
member_expr = MemberExpr(scope.name_expr('self'), name, direct=True)
ret = ReturnStmt(member_expr)
wrapper_name = '$' + name
sig = Callable([selft], [nodes.ARG_POS], [None], typ, False)
fdef = FuncDef(wrapper_name,
[selfv],
[nodes.ARG_POS],
[None],
Block([ret]), sig)
fdef.info = self.tf.type_context()
return fdef
def make_dynamic_getter_wrapper(self, name: str, typ: Type) -> FuncDef:
"""Create a dynamically-typed getter wrapper for a data attribute.
The getter will be of this form:
. def $name*(self: C) -> Any:
. return {Any <= typ self.name!}
"""
scope = self.make_scope()
selft = self.self_type()
selfv = scope.add('self', selft)
member_expr = MemberExpr(scope.name_expr('self'), name, direct=True)
coerce_expr = coerce(member_expr, AnyType(), typ,
self.tf.type_context())
ret = ReturnStmt(coerce_expr)
wrapper_name = '$' + name + self.tf.dynamic_suffix()
sig = Callable([selft], [nodes.ARG_POS], [None], AnyType(), False)
return FuncDef(wrapper_name,
[selfv],
[nodes.ARG_POS],
[None],
Block([ret]), sig)
def make_setter_wrapper(self, name: str, typ: Type) -> FuncDef:
"""Create a setter wrapper for a data attribute.
The setter will be of this form:
. def set$name(self: C, name: typ) -> None:
. self.name! = name
"""
scope = self.make_scope()
selft = self.self_type()
selfv = scope.add('self', selft)
namev = scope.add(name, typ)
lvalue = MemberExpr(scope.name_expr('self'), name, direct=True)
rvalue = scope.name_expr(name)
ret = AssignmentStmt([lvalue], rvalue)
wrapper_name = 'set$' + name
sig = Callable([selft, typ],
[nodes.ARG_POS, nodes.ARG_POS],
[None, None],
Void(), False)
fdef = FuncDef(wrapper_name,
[selfv, namev],
[nodes.ARG_POS, nodes.ARG_POS],
[None, None],
Block([ret]), sig)
fdef.info = self.tf.type_context()
return fdef
def make_dynamic_setter_wrapper(self, name: str, typ: Type) -> FuncDef:
"""Create a dynamically-typed setter wrapper for a data attribute.
The setter will be of this form:
. def set$name*(self: C, name; Any) -> None:
. self.name! = {typ name}
"""
lvalue = MemberExpr(self_expr(), name, direct=True)
name_expr = NameExpr(name)
rvalue = coerce(name_expr, typ, AnyType(), self.tf.type_context())
ret = AssignmentStmt([lvalue], rvalue)
wrapper_name = 'set$' + name + self.tf.dynamic_suffix()
selft = self_type(self.tf.type_context())
sig = Callable([selft, AnyType()],
[nodes.ARG_POS, nodes.ARG_POS],
[None, None],
Void(), False)
return FuncDef(wrapper_name,
[Var('self'), Var(name)],
[nodes.ARG_POS, nodes.ARG_POS],
[None, None],
Block([ret]), sig)
def generic_accessor_wrappers(self, s: AssignmentStmt) -> List[Node]:
"""Construct wrapper class methods for attribute accessors."""
res = [] # type: List[Node]
assert len(s.lvalues) == 1
assert isinstance(s.lvalues[0], NameExpr)
assert s.type is not None
name = cast(NameExpr, s.lvalues[0])
for fd in [self.make_getter_wrapper(name.name, s.type),
self.make_setter_wrapper(name.name, s.type)]:
res.extend(self.func_tf.generic_method_wrappers(fd))
return res
def generic_class_wrapper(self, tdef: TypeDef) -> TypeDef:
"""Construct a wrapper class for a generic type."""
# FIX semanal meta-info for nodes + TypeInfo
defs = [] # type: List[Node]
# Does the type have a superclass, other than builtins.object?
base = tdef.info.mro[1]
has_proper_superclass = base.fullname() != 'builtins.object'
if not has_proper_superclass or self.tf.is_java:
# Generate member variables for wrapper object.
defs.extend(self.make_generic_wrapper_member_vars(tdef))
for alt in [False, BOUND_VAR]:
defs.extend(self.make_tvar_representation(tdef.info, alt))
# Generate constructor.
defs.append(self.make_generic_wrapper_init(tdef.info))
# Generate method wrappers.
for d in tdef.defs.body:
if isinstance(d, FuncDef):
if not d.is_constructor():
defs.extend(self.func_tf.generic_method_wrappers(d))
elif isinstance(d, AssignmentStmt):
defs.extend(self.generic_accessor_wrappers(d))
elif not isinstance(d, PassStmt):
raise RuntimeError(
'Definition {} at line {} not supported'.format(
type(d), d.line))
base_type = self.tf.named_type('builtins.object') # type: Type
# Inherit superclass wrapper if there is one.
if has_proper_superclass:
base = self.find_generic_base_class(tdef.info)
if base:
# TODO bind the type somewhere
base_type = UnboundType(base.defn.name +
self.tf.wrapper_class_suffix())
# Build the type definition.
wrapper = TypeDef(tdef.name + self.tf.wrapper_class_suffix(),
Block(defs),
None,
[base_type])
# FIX fullname
self.tf.add_line_mapping(tdef, wrapper)
return wrapper
def find_generic_base_class(self, info: TypeInfo) -> TypeInfo:
base = info.mro[1]
while True:
if base.type_vars != []:
return base
if len(base.mro) <= 1:
return None
base = base.mro[1]
def make_generic_wrapper_member_vars(self, tdef: TypeDef) -> List[Node]:
"""Generate member variable definition for wrapped object (__o).
This is added to a generic wrapper class.
"""
# The type is 'Any' since it should behave covariantly in subclasses.
return [VarDef([Var(self.object_member_name(tdef.info),
AnyType())], False, None)]
def object_member_name(self, info: TypeInfo) -> str:
if self.tf.is_java:
return '__o_{}'.format(info.name)
else:
return '__o'
def make_generic_wrapper_init(self, info: TypeInfo) -> FuncDef:
"""Build constructor of a generic wrapper class."""
nslots = num_slots(info)
cdefs = [] # type: List[Node]
# Build superclass constructor call.
base = info.mro[1]
if base.fullname() != 'builtins.object' and self.tf.is_java:
s = SuperExpr('__init__')
cargs = [NameExpr('__o')] # type: List[Node]
for n in range(num_slots(base)):
cargs.append(NameExpr(tvar_arg_name(n + 1)))
for n in range(num_slots(base)):
cargs.append(NameExpr(tvar_arg_name(n + 1, BOUND_VAR)))
c = CallExpr(s, cargs, [nodes.ARG_POS] * len(cargs))
cdefs.append(ExpressionStmt(c))
# Create initialization of the wrapped object.
cdefs.append(AssignmentStmt([MemberExpr(
self_expr(),
self.object_member_name(info),
direct=True)],
NameExpr('__o')))
# Build constructor arguments.
args = [Var('self'), Var('__o')]
init = [None, None] # type: List[Node]
for alt in [False, BOUND_VAR]:
for n in range(nslots):
args.append(Var(tvar_arg_name(n + 1, alt)))
init.append(None)
nargs = nslots * 2 + 2
fdef = FuncDef('__init__',
args,
[nodes.ARG_POS] * nargs,
init,
Block(cdefs),
Callable( [AnyType()] * nargs,
[nodes.ARG_POS] * nargs, [None] * nargs,
Void(),
is_type_obj=False))
fdef.info = info
self.make_wrapper_slot_initializer(fdef)
return fdef
def make_tvar_representation(self, info: TypeInfo,
is_alt: Any = False) -> List[Node]:
"""Return type variable slot member definitions.
There are of form '__tv*: Any'. Only include new slots defined in the
type.
"""
defs = [] # type: List[Node]
base_slots = num_slots(info.mro[1])
for n in range(len(info.type_vars)):
# Only include a type variable if it introduces a new slot.
slot = get_tvar_access_path(info, n + 1)[0] - 1
if slot >= base_slots:
defs.append(VarDef([Var(tvar_slot_name(slot, is_alt),
AnyType())], False, None))
return defs
def make_instance_tvar_initializer(self, creat: FuncDef) -> None:
"""Add type variable member initialization code to a constructor.
Modify the constructor body directly.
"""
for n in range(num_slots(creat.info)):
rvalue = self.make_tvar_init_expression(creat.info, n)
init = AssignmentStmt([MemberExpr(self_expr(),
tvar_slot_name(n),
direct=True)],
rvalue)
self.tf.set_type(init.lvalues[0], AnyType())
self.tf.set_type(init.rvalue, AnyType())
creat.body.body.insert(n, init)
def make_wrapper_slot_initializer(self, creat: FuncDef) -> None:
"""Add type variable member initializations to a wrapper constructor.
The function must be a constructor of a generic wrapper class. Modify
the constructor body directly.
"""
for alt in [BOUND_VAR, False]:
for n in range(num_slots(creat.info)):
rvalue = TypeExpr(
RuntimeTypeVar(NameExpr(tvar_slot_name(n, alt))))
init = AssignmentStmt(
[MemberExpr(self_expr(),
tvar_slot_name(n, alt), direct=True)],
rvalue)
self.tf.set_type(init.lvalues[0], AnyType())
self.tf.set_type(init.rvalue, AnyType())
creat.body.body.insert(n, init)
def make_tvar_init_expression(self, info: TypeInfo, slot: int) -> TypeExpr:
"""Return the initializer for the given slot in the given type.
This is the type expression that initializes the given slot
using the type arguments given to the constructor.
Examples:
- In 'class C(Generic[T]) ...', the initializer for the slot 0 is
TypeExpr(RuntimeTypeVar(NameExpr('__tv'))).
- In 'class D(C[int]) ...', the initializer for the slot 0 is
TypeExpr(<int instance>).
"""
# Figure out the superclass which defines the slot; also figure out
# the tvar index that maps to the slot.
origin, tv = find_slot_origin(info, slot)
# Map self type to the superclass -> extract tvar with target index
# (only contains subclass tvars?? PROBABLY NOT).
selftype = self_type(info)
selftype = map_instance_to_supertype(selftype, origin)
tvar = selftype.args[tv - 1]
# Map tvar to an expression; refer to local vars instead of member
# vars always.
tvar = translate_runtime_type_vars_locally(tvar)
# Build the rvalue (initializer) expression
return TypeExpr(tvar)
def make_type_object_wrapper(self, tdef: TypeDef) -> FuncDef:
"""Construct dynamically typed wrapper function for a class.
It simple calls the type object and returns the result.
"""
# TODO keyword args, default args and varargs
# TODO overloads
type_sig = cast(Callable, type_object_type(tdef.info, None))
type_sig = cast(Callable, erasetype.erase_typevars(type_sig))
init = cast(FuncDef, tdef.info.get_method('__init__'))
arg_kinds = type_sig.arg_kinds
# The wrapper function has a dynamically typed signature.
wrapper_sig = Callable( [AnyType()] * len(arg_kinds),
arg_kinds, [None] * len(arg_kinds),
AnyType(), False)
n = NameExpr(tdef.name) # TODO full name
args = self.func_tf.call_args(
init.args[1:],
type_sig,
wrapper_sig,
True, False)
call = CallExpr(n, args, arg_kinds)
ret = ReturnStmt(call)
fdef = FuncDef(tdef.name + self.tf.dynamic_suffix(),
init.args[1:],
arg_kinds, [None] * len(arg_kinds),
Block([ret]))
fdef.type = wrapper_sig
return fdef
def self_type(self) -> Instance:
return self_type(self.tf.type_context())
def make_scope(self) -> 'Scope':
return Scope(self.tf.type_map)
class Scope:
"""Maintain a temporary local scope during transformation."""
def __init__(self, type_map: Dict[Node, Type]) -> None:
self.names = {} # type: Dict[str, Var]
self.type_map = type_map
def add(self, name: str, type: Type) -> Var:
v = Var(name)
v.type = type
self.names[name] = v
return v
def name_expr(self, name: str) -> NameExpr:
nexpr = NameExpr(name)
nexpr.kind = nodes.LDEF
node = self.names[name]
nexpr.node = node
self.type_map[nexpr] = node.type
return nexpr
| 39.285498
| 79
| 0.560541
| 2,971
| 26,007
| 4.742174
| 0.129586
| 0.011924
| 0.012492
| 0.008517
| 0.27305
| 0.231812
| 0.196891
| 0.165519
| 0.144723
| 0.119881
| 0
| 0.002001
| 0.346714
| 26,007
| 661
| 80
| 39.344932
| 0.827251
| 0.245742
| 0
| 0.232984
| 0
| 0
| 0.017701
| 0.004146
| 0
| 0
| 0
| 0.003026
| 0.007853
| 1
| 0.073298
| false
| 0.007853
| 0.041885
| 0.005236
| 0.196335
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9134284163c48ec784f6cf8bb5ff49c9902c49ec
| 2,760
|
py
|
Python
|
classification/imaterialist_challenge_furniture_2018/configs/train/train_inceptionresnetv2_350_ssd_like_v3.py
|
vfdev-5/ignite-examples
|
fb15b59e2b159e1e2bc4628f8756055e9154f5c8
|
[
"MIT"
] | 11
|
2018-04-07T17:49:58.000Z
|
2022-03-15T07:18:18.000Z
|
classification/imaterialist_challenge_furniture_2018/configs/train/train_inceptionresnetv2_350_ssd_like_v3.py
|
vfdev-5/ignite-examples
|
fb15b59e2b159e1e2bc4628f8756055e9154f5c8
|
[
"MIT"
] | null | null | null |
classification/imaterialist_challenge_furniture_2018/configs/train/train_inceptionresnetv2_350_ssd_like_v3.py
|
vfdev-5/ignite-examples
|
fb15b59e2b159e1e2bc4628f8756055e9154f5c8
|
[
"MIT"
] | null | null | null |
# Basic training configuration file
from torch.optim import RMSprop
from torch.optim.lr_scheduler import MultiStepLR
from torchvision.transforms import RandomHorizontalFlip, Compose
from torchvision.transforms import RandomResizedCrop, RandomAffine, RandomApply
from torchvision.transforms import ColorJitter, ToTensor, Normalize
from common.dataset import FilesFromCsvDataset
from common.data_loaders import get_data_loader
from models.inceptionresnetv2_ssd_like import FurnitureInceptionResNetV4350SSDLike_v3
SEED = 17
DEBUG = True
DEVICE = 'cuda'
OUTPUT_PATH = "output"
size = 350
TRAIN_TRANSFORMS = Compose([
RandomApply(
[RandomAffine(degrees=10, resample=3, fillcolor=(255, 255, 255)), ],
p=0.5
),
RandomResizedCrop(size, scale=(0.7, 1.0), interpolation=3),
RandomHorizontalFlip(p=0.5),
ColorJitter(hue=0.12, brightness=0.12),
ToTensor(),
Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
])
VAL_TRANSFORMS = TRAIN_TRANSFORMS
BATCH_SIZE = 24
NUM_WORKERS = 15
dataset = FilesFromCsvDataset("output/unique_filtered_train_dataset.csv")
TRAIN_LOADER = get_data_loader(dataset,
data_transform=TRAIN_TRANSFORMS,
batch_size=BATCH_SIZE,
num_workers=NUM_WORKERS,
pin_memory='cuda' in DEVICE)
val_dataset = FilesFromCsvDataset("output/unique_filtered_val_dataset.csv")
VAL_LOADER = get_data_loader(val_dataset,
data_transform=VAL_TRANSFORMS,
batch_size=BATCH_SIZE,
num_workers=NUM_WORKERS,
pin_memory='cuda' in DEVICE)
MODEL = FurnitureInceptionResNetV4350SSDLike_v3(num_classes=128, pretrained='imagenet')
N_EPOCHS = 100
OPTIM = RMSprop(
params=[
{"params": MODEL.extractor.stem.parameters(), 'lr': 0.0001},
{"params": MODEL.extractor.low_features_a.parameters(), 'lr': 0.00045},
{"params": MODEL.extractor.low_features_b.parameters(), 'lr': 0.00045},
{"params": MODEL.extractor.mid_features.parameters(), 'lr': 0.0045},
{"params": MODEL.extractor.top_features.parameters(), 'lr': 0.0045},
{"params": MODEL.extractor.smooth_layers.parameters(), 'lr': 0.045},
{"params": MODEL.cls_layers.parameters(), 'lr': 0.045},
{"params": MODEL.boxes_to_classes.parameters(), 'lr': 0.045},
{"params": MODEL.final_classifier.parameters(), 'lr': 0.045},
],
alpha=0.9,
eps=1.0
)
LR_SCHEDULERS = [
MultiStepLR(OPTIM, milestones=[4, 5, 6, 7, 8, 10, 11, 13, 14, 15], gamma=0.5),
]
EARLY_STOPPING_KWARGS = {
'patience': 25,
# 'score_function': None
}
LOG_INTERVAL = 100
| 30
| 87
| 0.659058
| 323
| 2,760
| 5.44582
| 0.396285
| 0.010233
| 0.066515
| 0.009096
| 0.308698
| 0.232518
| 0.210347
| 0.129619
| 0.078454
| 0.078454
| 0
| 0.06221
| 0.219565
| 2,760
| 91
| 88
| 30.32967
| 0.75441
| 0.02029
| 0
| 0.095238
| 0
| 0
| 0.068123
| 0.028878
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.126984
| 0
| 0.126984
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
913439b2a09a820bfc3faefc3e105469f128a1a8
| 1,352
|
py
|
Python
|
examples/qmmm/02-mcscf.py
|
QuESt-Calculator/pyscf
|
0ed03633b699505c7278f1eb501342667d0aa910
|
[
"Apache-2.0"
] | 501
|
2018-12-06T23:48:17.000Z
|
2022-03-31T11:53:18.000Z
|
examples/qmmm/02-mcscf.py
|
QuESt-Calculator/pyscf
|
0ed03633b699505c7278f1eb501342667d0aa910
|
[
"Apache-2.0"
] | 710
|
2018-11-26T22:04:52.000Z
|
2022-03-30T03:53:12.000Z
|
examples/qmmm/02-mcscf.py
|
QuESt-Calculator/pyscf
|
0ed03633b699505c7278f1eb501342667d0aa910
|
[
"Apache-2.0"
] | 273
|
2018-11-26T10:10:24.000Z
|
2022-03-30T12:25:28.000Z
|
#!/usr/bin/env python
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
A simple example to run MCSCF with background charges.
'''
import numpy
from pyscf import gto, scf, mcscf, qmmm
mol = gto.M(atom='''
C 1.1879 -0.3829 0.0000
C 0.0000 0.5526 0.0000
O -1.1867 -0.2472 0.0000
H -1.9237 0.3850 0.0000
H 2.0985 0.2306 0.0000
H 1.1184 -1.0093 0.8869
H 1.1184 -1.0093 -0.8869
H -0.0227 1.1812 0.8852
H -0.0227 1.1812 -0.8852
''',
basis='3-21g',
verbose=4)
numpy.random.seed(1)
coords = numpy.random.random((5,3)) * 10
charges = (numpy.arange(5) + 1.) * -.1
#
# There are two ways to add background charges to MCSCF method.
# The recommended one is to initialize it in SCF calculation. The MCSCF
# calculation takes the information from SCF objects.
#
mf = qmmm.mm_charge(scf.RHF(mol), coords, charges).run()
mc = mcscf.CASSCF(mf, 6, 6)
mc.run()
mc = mcscf.CASCI(mf, 6, 6)
mc.run()
#
# The other method is to patch the MCSCF object with the background charges.
# Note: it updates the underlying SCF object inplace.
#
mo_init = mf.mo_coeff
mf = scf.RHF(mol)
mc = mcscf.CASSCF(mf, 6, 6)
mc = qmmm.mm_charge(mc, coords, charges)
mc.run(mo_init)
mf = scf.RHF(mol)
mc = mcscf.CASCI(mf, 6, 6)
mc = qmmm.mm_charge(mc, coords, charges)
mc.run(mo_init)
| 22.915254
| 76
| 0.637574
| 242
| 1,352
| 3.533058
| 0.400826
| 0.035088
| 0.018713
| 0.02807
| 0.283041
| 0.276023
| 0.250292
| 0.14152
| 0.102924
| 0.102924
| 0
| 0.147619
| 0.223373
| 1,352
| 58
| 77
| 23.310345
| 0.666667
| 0.316568
| 0
| 0.375
| 0
| 0
| 0.330752
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.0625
| 0
| 0.0625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9136706832c51a492458e311e9d6b0efd4abea13
| 2,931
|
py
|
Python
|
vesper/mpg_ranch/nfc_detector_low_score_classifier_1_0/classifier.py
|
RichardLitt/Vesper
|
5360844f42a06942e7684121c650b08cf8616285
|
[
"MIT"
] | 29
|
2017-07-10T14:49:15.000Z
|
2022-02-02T23:14:38.000Z
|
vesper/mpg_ranch/nfc_detector_low_score_classifier_1_0/classifier.py
|
Tubbz-alt/Vesper
|
76e5931ca0c7fbe070c53b1362ec246ec9007beb
|
[
"MIT"
] | 167
|
2015-03-17T14:45:22.000Z
|
2022-03-30T21:00:05.000Z
|
vesper/mpg_ranch/nfc_detector_low_score_classifier_1_0/classifier.py
|
Tubbz-alt/Vesper
|
76e5931ca0c7fbe070c53b1362ec246ec9007beb
|
[
"MIT"
] | 4
|
2015-02-06T03:30:27.000Z
|
2020-12-27T08:38:52.000Z
|
"""
Module containing low score classifier for MPG Ranch NFC detectors.
An instance of the `Classifier` class of this module assigns the `LowScore`
classification to a clip if the clip has no `Classification` annotation and
has a `DetectorScore` annotation whose value is less than a threshold.
This classifier is intended for use on clips created by the the
MPG Ranch Thrush Detector 1.0 and the MPG Ranch Tseep Detector 1.0.
"""
import logging
from vesper.command.annotator import Annotator
from vesper.django.app.models import AnnotationInfo, StringAnnotation
_logger = logging.getLogger()
_SCORE_THRESHOLDS = {
# For 50 percent precision on validation recordings.
'MPG Ranch Thrush Detector 1.0 40': 70,
'MPG Ranch Tseep Detector 1.0 20': 41,
# For 75 percent precision on validation recordings.
# 'MPG Ranch Thrush Detector 1.0 40': 91,
# 'MPG Ranch Tseep Detector 1.0 20': 63,
}
class Classifier(Annotator):
extension_name = 'MPG Ranch NFC Detector Low Score Classifier 1.0'
def __init__(
self, annotation_info, creating_user=None, creating_job=None,
creating_processor=None):
super().__init__(
annotation_info, creating_user, creating_job, creating_processor)
self._score_annotation_info = _get_annotation_info('Detector Score')
self._score_thresholds = _SCORE_THRESHOLDS
def annotate(self, clip):
annotated = False
classification = self._get_annotation_value(clip)
if classification is None:
# clip is unclassified
score = self._get_score(clip)
if score is not None:
# clip has a detector score
threshold = self._get_score_threshold(clip)
if threshold is not None and score < threshold:
# detector score is below threshold
self._annotate(clip, 'LowScore')
annotated = True
return annotated
def _get_score(self, clip):
try:
annotation = StringAnnotation.objects.get(
clip=clip, info=self._score_annotation_info)
except StringAnnotation.DoesNotExist:
return None
else:
return float(annotation.value)
def _get_score_threshold(self, clip):
detector = clip.creating_processor
if detector is None:
return None
else:
return self._score_thresholds.get(detector.name)
def _get_annotation_info(name):
try:
return AnnotationInfo.objects.get(name=name)
except AnnotationInfo.DoesNotExist:
raise ValueError(
'Unrecognized annotation "{}".'.format(name))
| 28.735294
| 77
| 0.616172
| 323
| 2,931
| 5.427245
| 0.315789
| 0.036509
| 0.034227
| 0.03765
| 0.128351
| 0.128351
| 0.10154
| 0.073018
| 0.073018
| 0.073018
| 0
| 0.017137
| 0.323098
| 2,931
| 101
| 78
| 29.019802
| 0.866431
| 0.234391
| 0
| 0.125
| 0
| 0
| 0.072197
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.104167
| false
| 0
| 0.0625
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91374929866f2c29362313f46503faaf0a90ed51
| 1,506
|
py
|
Python
|
setup.py
|
yitzikc/athena2pd
|
d2d6b886a70e958f51d90103600572152eaa7bb9
|
[
"MIT"
] | 1
|
2020-04-05T18:41:17.000Z
|
2020-04-05T18:41:17.000Z
|
setup.py
|
yitzikc/athena2pd
|
d2d6b886a70e958f51d90103600572152eaa7bb9
|
[
"MIT"
] | null | null | null |
setup.py
|
yitzikc/athena2pd
|
d2d6b886a70e958f51d90103600572152eaa7bb9
|
[
"MIT"
] | 1
|
2021-04-22T09:22:31.000Z
|
2021-04-22T09:22:31.000Z
|
from setuptools import setup, find_packages
def find_version(path):
import re
# path shall be a plain ascii tetxt file
s = open(path, 'rt').read()
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", s, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError('Version not found')
def get_requirements(filename):
with open(filename, 'r') as fh:
return [l.strip() for l in fh]
def get_long_desc(filename):
with open(filename, 'r') as fh:
return fh.read()
setup(
name='athena2pd',
packages=['athena2pd'],
version=find_version('athena2pd/__init__.py'),
description='Help\'s simplify the access of databases stored in Amazon Athena by using SQL and pandas DataFrames.',
long_description=get_long_desc('README.md'),
long_description_content_type='text/markdown',
author='Joe Dementri',
maintainer='Joe Dementri',
maintainer_email='joedementri42012@gmail.com',
license='MIT',
install_requires=get_requirements('requirements.txt'),
zip_safe=False,
url='https://github.com/joedementri/athena2pd',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent'
],
python_requires='>=2.7,>=3.6'
)
| 33.466667
| 119
| 0.653386
| 183
| 1,506
| 5.224044
| 0.617486
| 0.037657
| 0.033473
| 0.050209
| 0.073222
| 0.073222
| 0.073222
| 0.073222
| 0
| 0
| 0
| 0.015075
| 0.207171
| 1,506
| 45
| 120
| 33.466667
| 0.785595
| 0.025232
| 0
| 0.051282
| 0
| 0
| 0.316973
| 0.032038
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.051282
| 0
| 0.205128
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
913761b87b7ebbbec82bddc1bdba8144eb580e3d
| 436
|
py
|
Python
|
PythonBasics/ExamPreparation/FamilyTrip.py
|
achoraev/SoftUni
|
0cc7db470a096cc33bbe0ca6bd90060b79120573
|
[
"Apache-2.0"
] | null | null | null |
PythonBasics/ExamPreparation/FamilyTrip.py
|
achoraev/SoftUni
|
0cc7db470a096cc33bbe0ca6bd90060b79120573
|
[
"Apache-2.0"
] | null | null | null |
PythonBasics/ExamPreparation/FamilyTrip.py
|
achoraev/SoftUni
|
0cc7db470a096cc33bbe0ca6bd90060b79120573
|
[
"Apache-2.0"
] | null | null | null |
budget = float(input())
nights = int(input())
price_night = float(input())
percent_extra = int(input())
if nights > 7:
price_night = price_night - (price_night * 0.05)
sum = nights * price_night
total_sum = sum + (budget * percent_extra / 100)
if total_sum <= budget:
print(f"Ivanovi will be left with {(budget - total_sum):.2f} leva after vacation.")
else:
print(f"{(total_sum - budget):.2f} leva needed.")
| 29.066667
| 88
| 0.655963
| 63
| 436
| 4.365079
| 0.460317
| 0.181818
| 0.109091
| 0.145455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025714
| 0.197248
| 436
| 15
| 89
| 29.066667
| 0.76
| 0
| 0
| 0
| 0
| 0
| 0.264775
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91381ad1149218813852e6f68213b5362dda4a67
| 2,573
|
py
|
Python
|
tex_live_package_manager/progress.py
|
csch0/SublimeText-TeX-Live-Package-Manager
|
ab21bd49a945f611250613e9cb862a7703dc534f
|
[
"Unlicense",
"MIT"
] | 2
|
2018-11-03T16:15:59.000Z
|
2018-11-23T16:14:57.000Z
|
tex_live_package_manager/progress.py
|
csch0/SublimeText-TeX-Live-Package-Manager
|
ab21bd49a945f611250613e9cb862a7703dc534f
|
[
"Unlicense",
"MIT"
] | 1
|
2016-12-08T05:39:58.000Z
|
2016-12-08T05:39:58.000Z
|
tex_live_package_manager/progress.py
|
csch0/SublimeText-TeX-Live-Package-Manager
|
ab21bd49a945f611250613e9cb862a7703dc534f
|
[
"Unlicense",
"MIT"
] | null | null | null |
import sublime, sublime_plugin
import threading
class ProcessQueueManager():
__shared = {}
items = []
thread = None
# Current item details
messages = None
function = None
callback = None
# Progress Bar preferences
i = 0
size = 8
add = 1
def __new__(cls, *args, **kwargs):
inst = object.__new__(cls)
inst.__dict__ = cls.__shared
return inst
def queue(self, unique_id, function, messages, callback):
print(unique_id, function, messages, callback)
self.items += [{"function": function, "messages": messages, "callback": callback}]
if not self.thread or not self.thread.is_alive():
sublime.set_timeout(lambda: self.run(), 100)
def run(self):
# If thread available and running
if self.thread and self.thread.is_alive():
# Recall run
self.progress()
sublime.set_timeout(lambda: self.run(), 100)
# Stop if thread available, not running and no item is available
elif self.thread and not self.thread.is_alive() and not self.items:
sublime.status_message(self.messages[1])
# Callback
sublime.set_timeout(self.callback, 0)
# Reset progress details
self.i = 0
self.callback = None
self.function = None
self.message = None
# If no thread availale or not running
elif not self.thread or not self.thread.is_alive():
# Check for callback of old item
if self.callback:
sublime.set_timeout(self.callback, 0)
self.callback = None
# Queue available
if self.items:
item = self.items.pop(0)
self.callback = item["callback"]
self.function = item["function"]
self.messages = item["messages"]
# Start thread for current item
self.thread = HelperThread(self.function)
self.thread.start()
# Call run to start updating progress
sublime.set_timeout(lambda: self.run(), 100)
def progress(self):
# Calculate items on the left size
before = self.i % self.size
after = self.size - (before + 1)
# Print the actual progress
sublime.status_message('%s [%s=%s]' % (self.messages[0], ' ' * before, ' ' * after))
# Invert increment if reached the end or start
if not after:
self.add = -1
elif not before:
self.add = 1
self.i += self.add
class HelperThread(threading.Thread):
def __init__(self, function):
self.function = function if isinstance(function, list) else [function]
threading.Thread.__init__(self)
def run(self):
for function in self.function:
function()
def ProgressFunction(function, messages, callback):
t = ThreadThread(function)
t.start()
Progress(t, messages[0], messages[1], callback)
| 23.605505
| 86
| 0.68869
| 351
| 2,573
| 4.940171
| 0.25641
| 0.05767
| 0.037486
| 0.039216
| 0.202422
| 0.153979
| 0.153979
| 0.11015
| 0.040369
| 0
| 0
| 0.011154
| 0.198601
| 2,573
| 109
| 87
| 23.605505
| 0.829777
| 0.169841
| 0
| 0.140625
| 0
| 0
| 0.028289
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.109375
| false
| 0
| 0.03125
| 0
| 0.328125
| 0.015625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91382da4e9ec5e3e22d31caf7faabb09a28c2093
| 10,199
|
py
|
Python
|
moscow_routes_parser/t_mos_ru.py
|
rscprof/moscow_routes_parser
|
692627dd43d62f70e3e12a761897571c79a022a0
|
[
"MIT"
] | null | null | null |
moscow_routes_parser/t_mos_ru.py
|
rscprof/moscow_routes_parser
|
692627dd43d62f70e3e12a761897571c79a022a0
|
[
"MIT"
] | null | null | null |
moscow_routes_parser/t_mos_ru.py
|
rscprof/moscow_routes_parser
|
692627dd43d62f70e3e12a761897571c79a022a0
|
[
"MIT"
] | null | null | null |
import html
import json
import logging
import re
from abc import abstractmethod
from datetime import datetime, time
from typing import Optional
import requests
from moscow_routes_parser.model import Route, Timetable, Equipment, Timetable_builder
from moscow_routes_parser.model_impl import Timetable_builder_t_mos_ru
class parser_timetable:
""""Interface for parser"""
@abstractmethod
def parse(self, text: str) -> Timetable_builder:
pass
class parser_timetable_t_mos_ru(parser_timetable):
""""Parser for timetable from t.mos.ru implementation"""
def __init__(self, builder: Timetable_builder):
""""Initialize parser
:param builder: Builder for Timetable for route
"""
self.builder = lambda: builder
def parse(self, text: str) -> Timetable_builder:
"""Parse text from https://transport.mos.ru/ru/ajax/App/ScheduleController/getRoute (for format using
2022-Jan-11)
Since 12.01.2022 t.mos.ru drop data-services from results
Since 13.03.2022 added flag_has_another_direction
@param text: text for parse
@return Timetable for route
"""
result_stops = type(self.builder())()
# stops = re.finditer(r'data-stop="([^"]*?)".*?data-services="([^"]*?)".*?d-inline.*?>(.*?)<(.*?)</li>', text,
# re.M + re.S
# )
stops = re.finditer(r'data-stop="(.*?)".*?d-inline.*?>(.*?)<(.*?)</li>', text,
re.M + re.S
)
data_coords_iter = re.finditer(r'data-coords="(.*?)"', text,
re.M + re.S
)
data_coords_list = list(data_coords_iter)
if re.search(r'ic-change-a-b', text, re.M + re.S) is None:
result_stops.set_has_another_direction(False)
else:
result_stops.set_has_another_direction(True)
# если есть расписание
if len(data_coords_list) > 0:
data_coords = data_coords_list[0].group(1)
data_coords = html.unescape(data_coords)
data_coords = json.loads(data_coords)['features']
data_coords = iter(map(lambda feature: feature['geometry']['coordinates'], data_coords))
else:
data_coords = []
for stop in stops:
name_stop = stop.group(2)
coords_stop = next(data_coords)
description = stop.group(3)
logger = logging.getLogger(__name__)
logger.info(name_stop)
hours = re.finditer(r'dt1.*?(\d\d):(.*?)</div>\s*</div>\s*</div>', description, re.M + re.S)
timetable_stop = result_stops.add_stop()
timetable_stop.set_name(name_stop)
timetable_stop.set_coords(coords_stop)
log_timetable = ""
for hour in hours:
num_hour = int(hour.group(1))
minutes_text = hour.group(2)
log_timetable += str(num_hour) + ": "
minutes = re.finditer(r'div10([^>]*)>\s*(\d\d)', minutes_text, re.M + re.S)
for minute in minutes:
num_minute = int(minute.group(2))
color_start = minute.group(1).find('color: ')
if color_start >= 0:
quote = minute.group(1).find('"', color_start)
min_color = minute.group(1)[color_start + 7:quote]
else:
min_color = None
if not (min_color is None):
log_timetable += "{}{}".format(num_minute, min_color) + " "
pass
else:
log_timetable += str(num_minute) + " "
pass
time_flight = time(num_hour, num_minute)
timetable_stop.add_item_timetable(time_flight, min_color)
logger.info(log_timetable)
return result_stops
class Parser_routes:
@abstractmethod
def parse(self, text: str) -> [Route]:
pass
class Parser_routes_t_mos_ru(Parser_routes):
def __init__(self):
self.count = None
def parse(self, text: str) -> [Route]:
""""Parses route info from transport.mos.ru (name, id, type)
:param text: text for parsing from t.mos.ru
:return list of Route
"""
count_result = re.finditer(r'data-count-pages="(\d+)"', text, re.M + re.S)
self.count = int(list(count_result)[0].group(1))
result = re.finditer(r'<a.*?href=.*?route/(.+?)".*?<div.*?ic[ ]([a-z-]+).*?</i>\s*(\S+?)\s*</div>', text,
re.M + re.S)
list_routes = []
for route in result:
num = route.group(1)
type_route = route.group(2)
if type_route.find('-bus') >= 0:
type_route = Equipment.bus()
elif type_route.find('tramway') >= 0:
type_route = Equipment.tramway()
elif type_route.find('trolleybus') >= 0:
type_route = Equipment.trolleybus()
else:
logging.getLogger(__name__).error("Unknown type route: {}".format(type_route))
type_route = None
name = route.group(3)
list_routes.append(Route(num, type_route, name))
return list_routes
def get_route(date: datetime.date, id_route_t_mos_ru: str, direction: int,
get_route_url: str = 'https://transport.mos.ru/ru/ajax/App/ScheduleController/getRoute',
parser: parser_timetable = parser_timetable_t_mos_ru(builder=Timetable_builder_t_mos_ru())
) -> Timetable:
"""Get timetable for route by date and direction
:param date: date of timetable for route
:param id_route_t_mos_ru: id of route from t.mos.ru
:param direction: direction for route (0 or 1)
:param get_route_url URL for requesting timetable
:param parser for timetable
:return timetable for route by date and direction
"""
logger = logging.getLogger(__name__)
try:
# strange problem with SSL Cert in package
response = requests.get(get_route_url,
params={
'mgt_schedule[isNight]': '',
'mgt_schedule[date]': date.strftime("%d.%m.%Y"),
'mgt_schedule[route]': id_route_t_mos_ru,
'mgt_schedule[direction]': direction,
},
headers={'X-Requested-With': 'XMLHttpRequest'}
)
if response.status_code == 200:
logger.info("Get route #{}".format(id_route_t_mos_ru))
route_info = parser.parse(response.text)
else:
logger.error("Error status: {}".format(response.status_code))
route_info = None
except requests.exceptions.RequestException as e:
logger.error("Error " + str(e))
route_info = None
if not (route_info is None):
result = route_info.set_id_route_t_mos_ru(id_route_t_mos_ru).set_direction(direction).set_date(date).build()
if len(result.get_stops()) == 0: # Error of loading timetable without exceptions
result = None
else:
result = None
return result
def get_list_routes(work_time: int, direction: int,
parser: Parser_routes = None,
get_routes_url: str = 'https://transport.mos.ru/ru/ajax/App/ScheduleController/getRoutesList'
) -> Optional[list[Route]]:
"""get list routes by work_time and direction from transport.mos.ru
:param parser: function to parse got string
:param get_routes_url: url for requesting routes
:param work_time: work day or not (1 or 0)
:param direction: 0
:return list of Route
"""
if parser is None:
parser = Parser_routes_t_mos_ru()
page = 1
result_routes = []
finish = False
count = None
logger = logging.getLogger(__name__)
while not finish:
finish = False
repeat = True
while repeat:
repeat = False
try:
# strange problem with SSL Cert in package
response = requests.get(get_routes_url,
params={
'mgt_schedule[search]': '',
'mgt_schedule[isNight]': '',
# 'mgt_schedule[filters]': '',
'mgt_schedule[work_time]': work_time,
'page': page,
'mgt_schedule[direction]': direction,
}
, headers={'X-Requested-With': 'XMLHttpRequest'}
# , headers={'Cookie': "_ym_d=1637468102; _ym_uid=1637468102592825648; mos_id=rBEAAmGaFNawBwAOHRgWAgA=; _ga=GA1.2.1733238845.1637487830; uxs_uid=147e2110-500d-11ec-a7cb-8bb8b12c3186; KFP_DID=ee285837-cd1f-0a9b-c8a2-9cef6a4ee333; _ym_isad=2; _ym_visorc=w"}
)
if response.status_code == 200:
logger.info("Get page #{}".format(page))
routes = parser.parse(response.text)
result_routes += routes
if count is None:
count = parser.count
if not routes:
finish = True
else:
logger.error("Error status: {}".format(response.status_code))
finish = True
page = page + 1
if page > count:
finish = True
except requests.exceptions.RequestException as e:
logger.error("Error " + str(e))
repeat = True
return result_routes
| 42.144628
| 295
| 0.531817
| 1,121
| 10,199
| 4.632471
| 0.199822
| 0.02022
| 0.018486
| 0.009243
| 0.289621
| 0.219141
| 0.176776
| 0.14404
| 0.122858
| 0.067013
| 0
| 0.022978
| 0.359937
| 10,199
| 241
| 296
| 42.319502
| 0.772518
| 0.166095
| 0
| 0.285714
| 0
| 0.005714
| 0.093085
| 0.038685
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045714
| false
| 0.022857
| 0.057143
| 0
| 0.148571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
913b82f09ffffabfd9cdacbe8830d13b360f655c
| 6,762
|
py
|
Python
|
web/api/get_summary_data.py
|
spudmind/spud
|
86e44bca4efd3cd6358467e1511048698a45edbc
|
[
"MIT"
] | 2
|
2015-04-11T12:22:41.000Z
|
2016-08-18T11:12:06.000Z
|
web/api/get_summary_data.py
|
spudmind/spud
|
86e44bca4efd3cd6358467e1511048698a45edbc
|
[
"MIT"
] | 84
|
2015-01-22T14:33:49.000Z
|
2015-04-01T23:15:29.000Z
|
web/api/get_summary_data.py
|
spudmind/spud
|
86e44bca4efd3cd6358467e1511048698a45edbc
|
[
"MIT"
] | 1
|
2015-04-16T03:10:39.000Z
|
2015-04-16T03:10:39.000Z
|
from web.api import BaseAPI
from utils import mongo
import json
class DataApi(BaseAPI):
def __init__(self):
BaseAPI.__init__(self)
self._db = mongo.MongoInterface()
self.query = {}
self.fields = {
"donation_count": "$influences.electoral_commission.donation_count",
"donor_count": '$influences.electoral_commission.donor_count',
"donation_total_int": "$influences.electoral_commission.donation_total_int",
"mp_interest_relationships": "$influences.register_of_interests.relationship_count",
"lord_interest_relationships": "$influences.register_of_interests.interest_relationships",
"remuneration_count": "$influences.register_of_interests.remuneration_count",
"remuneration_total_int": "$influences.register_of_interests.remuneration_total_int",
"lobbyists_hired": "$influences.lobby_registers.lobbyist_hired"
}
def request(self, **args):
node_type = args.get("type")
category = args.get("category")
field = args.get("field")
summary = {
"influencers": self._influencers_aggregate(category, field),
#"lobby_agencies": self._influencers_aggregate(),
"political_parties": self._party_aggregate(category, field),
"mps": self._mp_aggregate(category, field),
"lords": self._lord_aggregate(category, field)
}
return {"children": summary[node_type][category]}
def _influencers_aggregate(self, category, field):
_db_table = 'api_influencers'
response = {}
if category == "electoral_commission":
# get electoral commission data
ec_fields = ["donation_total_int", "donation_count"]
top_total, top_count = self._get_top(_db_table, ec_fields)
ec = {
"donation_total": self._format_top(top_total, "influencer"),
"donation_count": self._format_top(top_count, "influencer", monetary=False)
}
response["electoral_commission"] = ec[field]
if category == "register_of_interests":
# get register of interests data
reg_fields = [
"remuneration_total_int",
"mp_interest_relationships",
"remuneration_count"
]
top_total, top_relationships, top_count = self._get_top(_db_table, reg_fields)
reg = {
"remuneration_total": self._format_top(top_total, "influencer"),
"interest_relationships": self._format_top(
top_relationships, "influencer", monetary=False
),
"remuneration_count": self._format_top(
top_count, "influencer", monetary=False
)
}
response["register_of_interests"] = reg[field]
return response
def _party_aggregate(self, category, field):
_db_table = 'api_political_parties'
response = {}
if category == "political_parties":
ec_fields = ["donation_total_int", "donation_count"]
top_total, top_count = self._get_top(_db_table, ec_fields)
result = {
"donation_total": self._format_top(top_total, "party"),
"donation_count": self._format_top(top_count, "party", monetary=False)
}
response["electoral_commission"] = result[field]
return response
def _mp_aggregate(self, category, field):
_db_table = 'api_mps'
response = {}
if category == "electoral_commission":
# get electoral commission data
ec_fields = ["donation_total_int", "donor_count"]
top_total, top_count = self._get_top(_db_table, ec_fields)
ec = {
"donation_total": self._format_top(top_total, "mp"),
"donor_count": self._format_top(top_count, "mp", monetary=False)
}
response["electoral_commission"] = ec[field]
if category == "register_of_interests":
# get register of interests data
reg_fields = [
"remuneration_total_int",
"lord_interest_relationships",
"remuneration_count"
]
top_total, top_relationships, top_count = self._get_top(_db_table, reg_fields)
reg = {
"remuneration_total": self._format_top(top_total, "mp"),
"interest_relationships": self._format_top(
top_relationships, "mp", monetary=False
),
"remuneration_count": self._format_top(
top_count, "mp", monetary=False
)
}
response["register_of_interests"] = reg[field]
return response
def _lord_aggregate(self, category, field):
_db_table = 'api_lords'
response ={}
if category == "electoral_commission":
# get electoral commission data
ec_fields = ["donation_total_int", "donation_count"]
top_total, top_count = self._get_top(_db_table, ec_fields)
ec = {
"donation_total": self._format_top(top_total, "lord"),
"donation_count": self._format_top(top_count, "lord", monetary=False)
}
response["electoral_commission"] = ec[field]
if category == "register_of_interests":
# get register of interests data
reg_fields = ["lord_interest_relationships"]
top_relationships = self._get_top(_db_table, reg_fields)[0]
reg = {
"interest_relationships": self._format_top(
top_relationships, "lord", monetary=False
)
}
response["register_of_interests"] = reg[field]
return response
def _format_top(self, results, label, monetary=True):
updated = []
for entry in results:
new = {
"name": entry["_id"],
"details_url": self.named_entity_resources(
entry["_id"], label
)[0]
}
if monetary:
new["total_int"] = entry["total"]
new["total"] = self._format_number(entry["total"])
else:
new["total"] = entry["total"]
updated.append(new)
return updated
def _get_aggregate(self, table, field_list):
return [self._db.sum(table, field=self.fields[x]) for x in field_list]
def _get_top(self, table, field_list):
return [self._db.top(table, field=self.fields[x]) for x in field_list]
| 40.981818
| 102
| 0.584147
| 668
| 6,762
| 5.525449
| 0.136228
| 0.043349
| 0.052831
| 0.065023
| 0.661338
| 0.625034
| 0.597941
| 0.458683
| 0.458683
| 0.445137
| 0
| 0.000431
| 0.313665
| 6,762
| 164
| 103
| 41.231707
| 0.794872
| 0.034161
| 0
| 0.342857
| 0
| 0
| 0.247394
| 0.124157
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064286
| false
| 0
| 0.021429
| 0.014286
| 0.15
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
913c3c69be248515aa6faa8629c29e1819e26c9e
| 21,616
|
py
|
Python
|
neutron/common/ovn/utils.py
|
guillermomolina/neutron
|
bd2933a2588d1e0b18790dd719ca1d89aa4a0c8d
|
[
"Apache-2.0"
] | 3
|
2021-02-17T09:49:14.000Z
|
2022-01-19T08:40:34.000Z
|
neutron/common/ovn/utils.py
|
guillermomolina/neutron
|
bd2933a2588d1e0b18790dd719ca1d89aa4a0c8d
|
[
"Apache-2.0"
] | null | null | null |
neutron/common/ovn/utils.py
|
guillermomolina/neutron
|
bd2933a2588d1e0b18790dd719ca1d89aa4a0c8d
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import inspect
import os
import re
import netaddr
from neutron_lib.api.definitions import external_net
from neutron_lib.api.definitions import extra_dhcp_opt as edo_ext
from neutron_lib.api.definitions import l3
from neutron_lib.api.definitions import port_security as psec
from neutron_lib.api.definitions import portbindings
from neutron_lib.api import validators
from neutron_lib import constants as const
from neutron_lib import context as n_context
from neutron_lib import exceptions as n_exc
from neutron_lib.plugins import directory
from neutron_lib.utils import net as n_utils
from oslo_config import cfg
from oslo_log import log
from oslo_serialization import jsonutils
from oslo_utils import netutils
from oslo_utils import strutils
from ovsdbapp import constants as ovsdbapp_const
from neutron._i18n import _
from neutron.common.ovn import constants
from neutron.common.ovn import exceptions as ovn_exc
from neutron.db import models_v2
from neutron.objects import ports as ports_obj
LOG = log.getLogger(__name__)
CONF = cfg.CONF
DNS_RESOLVER_FILE = "/etc/resolv.conf"
AddrPairsDiff = collections.namedtuple(
'AddrPairsDiff', ['added', 'removed', 'changed'])
PortExtraDHCPValidation = collections.namedtuple(
'PortExtraDHCPValidation', ['failed', 'invalid_ipv4', 'invalid_ipv6'])
def ovn_name(id):
# The name of the OVN entry will be neutron-<UUID>
# This is due to the fact that the OVN application checks if the name
# is a UUID. If so then there will be no matches.
# We prefix the UUID to enable us to use the Neutron UUID when
# updating, deleting etc.
return "%s%s" % (constants.OVN_NAME_PREFIX, id)
def ovn_lrouter_port_name(id):
# The name of the OVN lrouter port entry will be lrp-<UUID>
# This is to distinguish with the name of the connected lswitch patch port,
# which is named with neutron port uuid, so that OVS patch ports are
# generated properly. The pairing patch port names will be:
# - patch-lrp-<UUID>-to-<UUID>
# - patch-<UUID>-to-lrp-<UUID>
# lrp stands for Logical Router Port
return constants.LRP_PREFIX + '%s' % id
def ovn_cr_lrouter_port_name(_id):
# The name of the OVN chassisredirect lrouter port entry will be
# cr-lrp-<UUID>
return 'cr-lrp-%s' % _id
def ovn_provnet_port_name(network_id):
# The name of OVN lswitch provider network port entry will be
# provnet-<Network-UUID>. The port is created for network having
# provider:physical_network attribute.
return constants.OVN_PROVNET_PORT_NAME_PREFIX + '%s' % network_id
def ovn_vhu_sockpath(sock_dir, port_id):
# Frame the socket path of a virtio socket
return os.path.join(
sock_dir,
# this parameter will become the virtio port name,
# so it should not exceed IFNAMSIZ(16).
(const.VHOST_USER_DEVICE_PREFIX + port_id)[:14])
def ovn_addrset_name(sg_id, ip_version):
# The name of the address set for the given security group id and ip
# version. The format is:
# as-<ip version>-<security group uuid>
# with all '-' replaced with '_'. This replacement is necessary
# because OVN doesn't support '-' in an address set name.
return ('as-%s-%s' % (ip_version, sg_id)).replace('-', '_')
def ovn_pg_addrset_name(sg_id, ip_version):
# The name of the address set for the given security group id modelled as a
# Port Group and ip version. The format is:
# pg-<security group uuid>-<ip version>
# with all '-' replaced with '_'. This replacement is necessary
# because OVN doesn't support '-' in an address set name.
return ('pg-%s-%s' % (sg_id, ip_version)).replace('-', '_')
def ovn_port_group_name(sg_id):
# The name of the port group for the given security group id.
# The format is: pg-<security group uuid>.
return ('pg-%s' % sg_id).replace('-', '_')
def is_network_device_port(port):
return port.get('device_owner', '').startswith(
const.DEVICE_OWNER_PREFIXES)
def _is_dhcp_disabled(dhcp_opt):
return (dhcp_opt['opt_name'] == constants.DHCP_DISABLED_OPT and
dhcp_opt.get('opt_value', '').lower() == 'true')
def validate_port_extra_dhcp_opts(port):
"""Validate port's extra DHCP options.
:param port: A neutron port.
:returns: A PortExtraDHCPValidation object.
"""
invalid = {const.IP_VERSION_4: [], const.IP_VERSION_6: []}
failed = False
for edo in port.get(edo_ext.EXTRADHCPOPTS, []):
ip_version = edo['ip_version']
opt_name = edo['opt_name']
# If DHCP is disabled for this port via this special option,
# always succeed the validation
if _is_dhcp_disabled(edo):
failed = False
break
if opt_name not in constants.SUPPORTED_DHCP_OPTS_MAPPING[ip_version]:
invalid[ip_version].append(opt_name)
failed = True
return PortExtraDHCPValidation(
failed=failed,
invalid_ipv4=invalid[const.IP_VERSION_4] if failed else [],
invalid_ipv6=invalid[const.IP_VERSION_6] if failed else [])
def get_lsp_dhcp_opts(port, ip_version):
# Get dhcp options from Neutron port, for setting DHCP_Options row
# in OVN.
lsp_dhcp_disabled = False
lsp_dhcp_opts = {}
if is_network_device_port(port):
lsp_dhcp_disabled = True
else:
mapping = constants.SUPPORTED_DHCP_OPTS_MAPPING[ip_version]
for edo in port.get(edo_ext.EXTRADHCPOPTS, []):
if edo['ip_version'] != ip_version:
continue
if _is_dhcp_disabled(edo):
# OVN native DHCP is disabled on this port
lsp_dhcp_disabled = True
# Make sure return value behavior not depends on the order and
# content of the extra DHCP options for the port
lsp_dhcp_opts.clear()
break
if edo['opt_name'] not in mapping:
LOG.warning('The DHCP option %(opt_name)s on port %(port)s '
'is not suppported by OVN, ignoring it',
{'opt_name': edo['opt_name'], 'port': port['id']})
continue
opt = mapping[edo['opt_name']]
lsp_dhcp_opts[opt] = edo['opt_value']
return (lsp_dhcp_disabled, lsp_dhcp_opts)
def is_lsp_trusted(port):
return n_utils.is_port_trusted(port) if port.get('device_owner') else False
def is_lsp_ignored(port):
# Since the floating IP port is not bound to any chassis, packets from vm
# destined to floating IP will be dropped. To overcome this, we do not
# create/update floating IP port in OVN.
return port.get('device_owner') in [const.DEVICE_OWNER_FLOATINGIP]
def get_lsp_security_groups(port, skip_trusted_port=True):
# In other agent link OVS, skipping trusted port is processed in security
# groups RPC. We haven't that step, so we do it here.
return [] if (skip_trusted_port and is_lsp_trusted(port)
) else port.get('security_groups', [])
def is_snat_enabled(router):
return router.get(l3.EXTERNAL_GW_INFO, {}).get('enable_snat', True)
def is_port_security_enabled(port):
return port.get(psec.PORTSECURITY)
def is_security_groups_enabled(port):
return port.get(constants.PORT_SECURITYGROUPS)
def validate_and_get_data_from_binding_profile(port):
if (constants.OVN_PORT_BINDING_PROFILE not in port or
not validators.is_attr_set(
port[constants.OVN_PORT_BINDING_PROFILE])):
return {}
param_set = {}
param_dict = {}
for param_set in constants.OVN_PORT_BINDING_PROFILE_PARAMS:
param_keys = param_set.keys()
for param_key in param_keys:
try:
param_dict[param_key] = (port[
constants.OVN_PORT_BINDING_PROFILE][param_key])
except KeyError:
pass
if len(param_dict) == 0:
continue
if len(param_dict) != len(param_keys):
msg = _('Invalid binding:profile. %s are all '
'required.') % param_keys
raise n_exc.InvalidInput(error_message=msg)
if (len(port[constants.OVN_PORT_BINDING_PROFILE]) != len(
param_keys)):
msg = _('Invalid binding:profile. too many parameters')
raise n_exc.InvalidInput(error_message=msg)
break
if not param_dict:
return {}
for param_key, param_type in param_set.items():
if param_type is None:
continue
param_value = param_dict[param_key]
if not isinstance(param_value, param_type):
msg = _('Invalid binding:profile. %(key)s %(value)s '
'value invalid type') % {'key': param_key,
'value': param_value}
raise n_exc.InvalidInput(error_message=msg)
# Make sure we can successfully look up the port indicated by
# parent_name. Just let it raise the right exception if there is a
# problem.
if 'parent_name' in param_set:
plugin = directory.get_plugin()
plugin.get_port(n_context.get_admin_context(),
param_dict['parent_name'])
if 'tag' in param_set:
tag = int(param_dict['tag'])
if tag < 0 or tag > 4095:
msg = _('Invalid binding:profile. tag "%s" must be '
'an integer between 0 and 4095, inclusive') % tag
raise n_exc.InvalidInput(error_message=msg)
return param_dict
def is_dhcp_options_ignored(subnet):
# Don't insert DHCP_Options entry for v6 subnet with 'SLAAC' as
# 'ipv6_address_mode', since DHCPv6 shouldn't work for this mode.
return (subnet['ip_version'] == const.IP_VERSION_6 and
subnet.get('ipv6_address_mode') == const.IPV6_SLAAC)
def get_ovn_ipv6_address_mode(address_mode):
return constants.OVN_IPV6_ADDRESS_MODES[address_mode]
def get_revision_number(resource, resource_type):
"""Get the resource's revision number based on its type."""
if resource_type in (constants.TYPE_NETWORKS,
constants.TYPE_PORTS,
constants.TYPE_SECURITY_GROUP_RULES,
constants.TYPE_ROUTERS,
constants.TYPE_ROUTER_PORTS,
constants.TYPE_SECURITY_GROUPS,
constants.TYPE_FLOATINGIPS, constants.TYPE_SUBNETS):
return resource['revision_number']
else:
raise ovn_exc.UnknownResourceType(resource_type=resource_type)
def remove_macs_from_lsp_addresses(addresses):
"""Remove the mac addreses from the Logical_Switch_Port addresses column.
:param addresses: The list of addresses from the Logical_Switch_Port.
Example: ["80:fa:5b:06:72:b7 158.36.44.22",
"ff:ff:ff:ff:ff:ff 10.0.0.2"]
:returns: A list of IP addesses (v4 and v6)
"""
ip_list = []
for addr in addresses:
ip_list.extend([x for x in addr.split() if
(netutils.is_valid_ipv4(x) or
netutils.is_valid_ipv6(x))])
return ip_list
def get_allowed_address_pairs_ip_addresses(port):
"""Return a list of IP addresses from port's allowed_address_pairs.
:param port: A neutron port
:returns: A list of IP addesses (v4 and v6)
"""
return [x['ip_address'] for x in port.get('allowed_address_pairs', [])
if 'ip_address' in x]
def get_allowed_address_pairs_ip_addresses_from_ovn_port(ovn_port):
"""Return a list of IP addresses from ovn port.
Return a list of IP addresses equivalent of Neutron's port
allowed_address_pairs column using the data in the OVN port.
:param ovn_port: A OVN port
:returns: A list of IP addesses (v4 and v6)
"""
addresses = remove_macs_from_lsp_addresses(ovn_port.addresses)
port_security = remove_macs_from_lsp_addresses(ovn_port.port_security)
return [x for x in port_security if x not in addresses]
def get_ovn_port_security_groups(ovn_port, skip_trusted_port=True):
info = {'security_groups': ovn_port.external_ids.get(
constants.OVN_SG_IDS_EXT_ID_KEY, '').split(),
'device_owner': ovn_port.external_ids.get(
constants.OVN_DEVICE_OWNER_EXT_ID_KEY, '')}
return get_lsp_security_groups(info, skip_trusted_port=skip_trusted_port)
def get_ovn_port_addresses(ovn_port):
addresses = remove_macs_from_lsp_addresses(ovn_port.addresses)
port_security = remove_macs_from_lsp_addresses(ovn_port.port_security)
return list(set(addresses + port_security))
def sort_ips_by_version(addresses):
ip_map = {'ip4': [], 'ip6': []}
for addr in addresses:
ip_version = netaddr.IPNetwork(addr).version
ip_map['ip%d' % ip_version].append(addr)
return ip_map
def is_lsp_router_port(port):
return port.get('device_owner') in const.ROUTER_PORT_OWNERS
def get_lrouter_ext_gw_static_route(ovn_router):
return [route for route in getattr(ovn_router, 'static_routes', []) if
strutils.bool_from_string(getattr(
route, 'external_ids', {}).get(
constants.OVN_ROUTER_IS_EXT_GW, 'false'))]
def get_lrouter_snats(ovn_router):
return [n for n in getattr(ovn_router, 'nat', []) if n.type == 'snat']
def get_lrouter_non_gw_routes(ovn_router):
routes = []
for route in getattr(ovn_router, 'static_routes', []):
external_ids = getattr(route, 'external_ids', {})
if strutils.bool_from_string(
external_ids.get(constants.OVN_ROUTER_IS_EXT_GW, 'false')):
continue
routes.append({'destination': route.ip_prefix,
'nexthop': route.nexthop})
return routes
def is_ovn_l3(l3_plugin):
return hasattr(l3_plugin, '_ovn_client_inst')
def get_system_dns_resolvers(resolver_file=DNS_RESOLVER_FILE):
resolvers = []
if not os.path.exists(resolver_file):
return resolvers
with open(resolver_file, 'r') as rconf:
for line in rconf.readlines():
if not line.startswith('nameserver'):
continue
line = line.split('nameserver')[1].strip()
ipv4 = re.search(r'^(?:[0-9]{1,3}\.){3}[0-9]{1,3}', line)
if ipv4:
resolvers.append(ipv4.group(0))
return resolvers
def get_port_subnet_ids(port):
fixed_ips = list(port['fixed_ips'])
return [f['subnet_id'] for f in fixed_ips]
def get_method_class(method):
if not inspect.ismethod(method):
return
return method.__self__.__class__
def ovn_metadata_name(id_):
"""Return the OVN metadata name based on an id."""
return 'metadata-%s' % id_
def is_gateway_chassis_invalid(chassis_name, gw_chassis,
physnet, chassis_physnets):
"""Check if gateway chassis is invalid
@param chassis_name: gateway chassis name
@type chassis_name: string
@param gw_chassis: List of gateway chassis in the system
@type gw_chassis: []
@param physnet: physical network associated to chassis_name
@type physnet: string
@param chassis_physnets: Dictionary linking chassis with their physnets
@type chassis_physnets: {}
@return Boolean
"""
if chassis_name == constants.OVN_GATEWAY_INVALID_CHASSIS:
return True
elif chassis_name not in chassis_physnets:
return True
elif physnet and physnet not in chassis_physnets.get(chassis_name):
return True
elif gw_chassis and chassis_name not in gw_chassis:
return True
return False
def is_provider_network(network):
return network.get(external_net.EXTERNAL, False)
def is_neutron_dhcp_agent_port(port):
"""Check if the given DHCP port belongs to Neutron DHCP agents
The DHCP ports with the device_id equals to 'reserved_dhcp_port'
or starting with the word 'dhcp' belongs to the Neutron DHCP agents.
"""
return (port['device_owner'] == const.DEVICE_OWNER_DHCP and
(port['device_id'] == const.DEVICE_ID_RESERVED_DHCP_PORT or
port['device_id'].startswith('dhcp')))
def compute_address_pairs_diff(ovn_port, neutron_port):
"""Compute the differences in the allowed_address_pairs field."""
ovn_ap = get_allowed_address_pairs_ip_addresses_from_ovn_port(
ovn_port)
neutron_ap = get_allowed_address_pairs_ip_addresses(neutron_port)
added = set(neutron_ap) - set(ovn_ap)
removed = set(ovn_ap) - set(neutron_ap)
return AddrPairsDiff(added, removed, changed=any(added or removed))
def get_ovn_cms_options(chassis):
"""Return the list of CMS options in a Chassis."""
return [opt.strip() for opt in chassis.external_ids.get(
constants.OVN_CMS_OPTIONS, '').split(',')]
def is_gateway_chassis(chassis):
"""Check if the given chassis is a gateway chassis"""
return constants.CMS_OPT_CHASSIS_AS_GW in get_ovn_cms_options(chassis)
def get_port_capabilities(port):
"""Return a list of port's capabilities"""
return port.get(portbindings.PROFILE, {}).get('capabilities', [])
def get_port_id_from_gwc_row(row):
"""Return a port_id from gwc row
The Gateway_Chassis row stores router port_id in
the row name attribute:
<prefix>-<port_id>_<chassis_id>
:param row: A Gateway_Chassis table row.
:returns: String containing router port_id.
"""
return constants.RE_PORT_FROM_GWC.search(row.name).group(2)
def get_chassis_availability_zones(chassis):
"""Return a list of availability zones from a given OVN Chassis."""
azs = set()
if not chassis:
return azs
opt_key = constants.CMS_OPT_AVAILABILITY_ZONES + '='
for opt in get_ovn_cms_options(chassis):
if not opt.startswith(opt_key):
continue
values = opt.split('=')[1]
azs = {az.strip() for az in values.split(':') if az.strip()}
break
return azs
def get_chassis_in_azs(chassis_list, az_list):
"""Return a set of Chassis that belongs to the AZs.
Given a list of Chassis and a list of availability zones (AZs),
return a set of Chassis that belongs to one or more AZs.
:param chassis_list: A list of Chassis objects
:param az_list: A list of availability zones
:returns: A set of Chassis names
"""
chassis = set()
for ch in chassis_list:
chassis_azs = get_chassis_availability_zones(ch)
if chassis_azs.intersection(az_list):
chassis.add(ch.name)
return chassis
def get_gateway_chassis_without_azs(chassis_list):
"""Return a set of Chassis that does not belong to any AZs.
Filter a list of Chassis and return only the Chassis that does not
belong to any availability zones.
:param chassis_list: A list of Chassis objects
:returns: A set of Chassis names
"""
return {ch.name for ch in chassis_list if is_gateway_chassis(ch) and not
get_chassis_availability_zones(ch)}
def parse_ovn_lb_port_forwarding(ovn_rtr_lb_pfs):
"""Return a dictionary compatible with port forwarding from OVN lb."""
result = {}
for ovn_lb in ovn_rtr_lb_pfs:
ext_ids = ovn_lb.external_ids
fip_id = ext_ids.get(constants.OVN_FIP_EXT_ID_KEY)
protocol = (ovn_lb.protocol[0]
if ovn_lb.protocol else ovsdbapp_const.PROTO_TCP)
fip_dict = result.get(fip_id, {})
fip_dict_proto = fip_dict.get(protocol, set())
ovn_vips = ovn_lb.vips
for vip, ips in ovn_vips.items():
for ip in ips.split(','):
fip_dict_proto.add("{} {}".format(vip, ip))
fip_dict[protocol] = fip_dict_proto
result[fip_id] = fip_dict
return result
def get_network_name_from_datapath(datapath):
return datapath.external_ids['name'].replace('neutron-', '')
def is_port_external(port):
# This port is represented in OVN DB as lsp.type=external
capabilities = []
vnic_type = portbindings.VNIC_NORMAL
if isinstance(port, dict):
capabilities = get_port_capabilities(port)
vnic_type = port.get(portbindings.VNIC_TYPE,
portbindings.VNIC_NORMAL)
else:
if isinstance(port, models_v2.Port):
bindings = port.port_bindings
elif isinstance(port, ports_obj.Port):
bindings = port.bindings
else: # What else could be "port"?
bindings = []
if bindings:
profile = bindings[0].get('profile')
if profile:
# DB object, not OVO, stores the dict in JSON.
profile = (jsonutils.loads(profile) if isinstance(profile, str)
else profile)
capabilities = profile.get('capabilities', [])
vnic_type = bindings[0].get('vnic_type', portbindings.VNIC_NORMAL)
return (vnic_type in constants.EXTERNAL_PORT_TYPES and
constants.PORT_CAP_SWITCHDEV not in capabilities)
| 35.320261
| 79
| 0.672141
| 3,022
| 21,616
| 4.570483
| 0.166446
| 0.01267
| 0.007095
| 0.006082
| 0.252534
| 0.17854
| 0.147191
| 0.092963
| 0.065957
| 0.059079
| 0
| 0.005789
| 0.24084
| 21,616
| 611
| 80
| 35.378069
| 0.835893
| 0.263462
| 0
| 0.125731
| 0
| 0
| 0.068229
| 0.00475
| 0
| 0
| 0
| 0
| 0
| 1
| 0.149123
| false
| 0.002924
| 0.078947
| 0.070175
| 0.400585
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
913ca9c4582e3db5d9a5c8dc80fedece649fbdb9
| 1,082
|
py
|
Python
|
Submods/MAS Additions/MASM/scripts/midi_input.py
|
CaptainHorse/MAS-Additions
|
5714aaf8cfa3c57432f6231795cbe1d75df46f74
|
[
"MIT"
] | 13
|
2019-09-24T00:09:17.000Z
|
2022-02-26T20:24:18.000Z
|
Submods/MAS Additions/MASM/scripts/midi_input.py
|
CaptainHorse/MAS-Additions
|
5714aaf8cfa3c57432f6231795cbe1d75df46f74
|
[
"MIT"
] | 30
|
2019-06-28T03:16:33.000Z
|
2022-01-19T11:49:59.000Z
|
Submods/MAS Additions/MASM/scripts/midi_input.py
|
CaptainHorse/MAS-Additions
|
5714aaf8cfa3c57432f6231795cbe1d75df46f74
|
[
"MIT"
] | 4
|
2019-10-04T01:59:17.000Z
|
2022-02-26T20:24:20.000Z
|
import mido
from socketer import MASM
inPort = None
doReadInput = False
def Start():
global inPort
try:
print(f"MIDI inputs: {mido.get_input_names()}")
inPort = mido.open_input()
print(f"MIDI input open: {inPort}")
except Exception as e:
inPort = None
print(f"Could not open MIDI input: {e}")
def Update():
global inPort
global doReadInput
if inPort is not None:
if doReadInput and MASM.hasDataBool("MIDI_STOP"):
doReadInput = False
elif not doReadInput and MASM.hasDataBool("MIDI_START"):
doReadInput = True
for msg in inPort.iter_pending():
if MASM.hasDataCheck("MIDI_KEYMAPKEY"):
bytes = msg.bytes()
if len(bytes) >= 3:
MASM.hasDataBool("MIDI_KEYMAPKEY")
MASM.sendData("MIDI_KEY", bytes[1])
elif doReadInput: # We want to clear old pending messages but not send them if input is disabled
bytes = msg.bytes()
if len(bytes) >= 3:
if bytes[0] == 144 and bytes[2] > 0:
MASM.sendData(f"MIDI_NOTE.{bytes[1]}", bytes[2])
elif bytes[0] == 128 or bytes[2] == 0:
MASM.sendData(f"MIDI_NOTE.{bytes[1]}", 0)
| 29.243243
| 99
| 0.677449
| 162
| 1,082
| 4.45679
| 0.395062
| 0.027701
| 0.078947
| 0.080332
| 0.252078
| 0.160665
| 0.160665
| 0.094183
| 0.094183
| 0.094183
| 0
| 0.021814
| 0.195009
| 1,082
| 37
| 100
| 29.243243
| 0.807118
| 0.07024
| 0
| 0.294118
| 0
| 0
| 0.18607
| 0.023881
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.058824
| 0
| 0.117647
| 0.088235
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
913f9ce1958e1ba194c9448681b6fa2b1b835522
| 1,668
|
py
|
Python
|
baseplate_py_upgrader/docker.py
|
reddit/baseplate.py-upgrader
|
2e4b019de7c22e2d2467eba488867fe81d7d5fc1
|
[
"BSD-3-Clause"
] | 6
|
2020-07-09T02:25:23.000Z
|
2021-09-24T17:28:41.000Z
|
baseplate_py_upgrader/docker.py
|
Seanpm2001-reddit/baseplate.py-upgrader
|
a554418c638022b461cf5cae17e894280cf76a25
|
[
"BSD-3-Clause"
] | 9
|
2019-08-13T20:29:04.000Z
|
2022-03-04T19:11:47.000Z
|
baseplate_py_upgrader/docker.py
|
Seanpm2001-reddit/baseplate.py-upgrader
|
a554418c638022b461cf5cae17e894280cf76a25
|
[
"BSD-3-Clause"
] | 4
|
2020-12-11T21:59:37.000Z
|
2022-03-04T00:10:43.000Z
|
import logging
import re
from pathlib import Path
from typing import Match
logger = logging.getLogger(__name__)
IMAGE_RE = re.compile(
r"/baseplate-py:(?P<version>[0-9.]+(\.[0-9]+)?)-py(?P<python>[23]\.[0-9]+)-(?P<distro>(bionic|buster))(?P<repo>-artifactory)?(?P<dev>-dev)?"
)
def upgrade_docker_image_references_in_file(target_series: str, filepath: Path) -> None:
major, minor = target_series.split(".")
if major == "0":
image_series = f"{major}.{minor}"
else:
image_series = f"{major}"
force_distro = None
force_dev = False
force_repo = None
if major == "2":
force_distro = "buster"
force_dev = True
force_repo = ""
def replace_docker_image_reference(m: Match[str]) -> str:
distro = force_distro or m["distro"]
repo = force_repo if force_repo is not None else m["repo"]
dev = "-dev" if force_dev else m["dev"]
return f"/baseplate-py:{image_series}-py{m['python']}-{distro}{repo or ''}{dev or ''}"
file_content = filepath.read_text()
changed = IMAGE_RE.sub(replace_docker_image_reference, file_content, re.MULTILINE)
if file_content == changed:
return
with filepath.open("w") as f:
logger.info("Updated Docker image references in %s", filepath)
f.write(changed)
def upgrade_docker_image_references(target_series: str, root: Path) -> None:
for dockerfile in root.glob("**/Dockerfile*"):
upgrade_docker_image_references_in_file(target_series, dockerfile)
dronefile = root / ".drone.yml"
if dronefile.exists():
upgrade_docker_image_references_in_file(target_series, dronefile)
| 30.327273
| 144
| 0.658873
| 227
| 1,668
| 4.612335
| 0.343612
| 0.073543
| 0.100287
| 0.106972
| 0.164279
| 0.131805
| 0.131805
| 0.131805
| 0
| 0
| 0
| 0.007502
| 0.200839
| 1,668
| 54
| 145
| 30.888889
| 0.777944
| 0
| 0
| 0
| 0
| 0.051282
| 0.193645
| 0.116906
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.102564
| 0
| 0.230769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
913fb3fc99b72d4e97ce88b0037ce6490e6db9c1
| 1,249
|
py
|
Python
|
model/swtz_ty.py
|
ArcherLuo233/election-s-prediction
|
9da72cb855f6d61f9cdec6e15f7ca832629ba51a
|
[
"MIT"
] | null | null | null |
model/swtz_ty.py
|
ArcherLuo233/election-s-prediction
|
9da72cb855f6d61f9cdec6e15f7ca832629ba51a
|
[
"MIT"
] | 1
|
2022-01-26T01:23:26.000Z
|
2022-01-26T01:23:34.000Z
|
model/swtz_ty.py
|
ArcherLuo233/election-s-prediction
|
9da72cb855f6d61f9cdec6e15f7ca832629ba51a
|
[
"MIT"
] | 1
|
2021-11-08T10:58:23.000Z
|
2021-11-08T10:58:23.000Z
|
from sqlalchemy import Column, ForeignKey, Integer, String, Text
from model.base import Base
class SWTZ_TY(Base):
__tablename__ = 'swtz_ty'
class_name = '商务团组-团员'
foreign_key = 'swtz_id'
export_docx = False
export_handle_file = ['identity']
field = [
'id', 'nickname', 'job', 'id_card', 'phone', 'remark', 'identity'
]
combo_field = {
'identity': {
'exclude': False,
'items': ['基层', '青年', '商界', '学界', '政界']
}
}
template_start_row = 3
swtz_id = Column(Integer, ForeignKey('swtz.id'))
nickname = Column(String(100), comment='姓名')
job = Column(String(100), comment='单位职务')
id_card = Column(String(100), comment='身份证号')
phone = Column(String(100), comment='联系电话')
remark = Column(Text, comment='备注')
identity_ = Column('identity', String(100), comment='身份')
@property
def identity(self):
if self.identity_ is None:
return []
return self.identity_.split(' ')
@identity.setter
def identity(self, val):
if isinstance(val, list):
while '' in val:
val.remove('')
self.identity_ = ' '.join(val)
else:
self.identity_ = val
| 26.020833
| 73
| 0.566853
| 140
| 1,249
| 4.892857
| 0.492857
| 0.065693
| 0.116788
| 0.128467
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017917
| 0.285028
| 1,249
| 47
| 74
| 26.574468
| 0.74916
| 0
| 0
| 0
| 0
| 0
| 0.106571
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.052632
| 0
| 0.578947
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
913fde7505a4c384507f28eb2cee97a556b8c075
| 3,515
|
py
|
Python
|
amy/dashboard/tests/test_autoupdate_profile.py
|
code-review-doctor/amy
|
268c1a199510457891459f3ddd73fcce7fe2b974
|
[
"MIT"
] | 53
|
2015-01-10T17:39:19.000Z
|
2019-06-12T17:36:34.000Z
|
amy/dashboard/tests/test_autoupdate_profile.py
|
code-review-doctor/amy
|
268c1a199510457891459f3ddd73fcce7fe2b974
|
[
"MIT"
] | 1,176
|
2015-01-02T06:32:47.000Z
|
2019-06-18T11:57:47.000Z
|
amy/dashboard/tests/test_autoupdate_profile.py
|
code-review-doctor/amy
|
268c1a199510457891459f3ddd73fcce7fe2b974
|
[
"MIT"
] | 44
|
2015-01-03T15:08:56.000Z
|
2019-06-09T05:33:08.000Z
|
from django.urls import reverse
from consents.models import Consent, Term
from workshops.models import KnowledgeDomain, Person, Qualification
from workshops.tests.base import TestBase
class TestAutoUpdateProfile(TestBase):
def setUp(self):
self._setUpAirports()
self._setUpLessons()
self._setUpLanguages()
self.user = Person.objects.create_user(
username="user",
personal="",
family="",
email="user@example.org",
password="pass",
)
self.person_consent_required_terms(self.user)
Qualification.objects.create(person=self.user, lesson=self.git)
Qualification.objects.create(person=self.user, lesson=self.sql)
self.physics = KnowledgeDomain.objects.create(name="physics")
self.chemistry = KnowledgeDomain.objects.create(name="chemistry")
self.user.domains.add(self.physics)
self.user.languages.add(self.english)
self.user.languages.add(self.french)
self.client.login(username="user", password="pass")
def test_load_form(self):
rv = self.client.get(reverse("autoupdate_profile"))
self.assertEqual(rv.status_code, 200)
def test_update_profile(self):
term_slugs = [
"may-contact",
"may-publish-name",
"public-profile",
]
terms_by_term_slug = {
term.slug: term
for term in Term.objects.filter(slug__in=term_slugs)
.active()
.prefetch_active_options()
}
consent_data = {
f"consents-{slug}": terms_by_term_slug[slug].active_options[0].pk
for slug in term_slugs
}
data = {
"personal": "admin",
"middle": "",
"family": "Smith",
"email": "admin@example.org",
"gender": Person.UNDISCLOSED,
"airport": self.airport_0_0.pk,
"github": "changed",
"twitter": "",
"url": "",
"username": "changed",
"affiliation": "",
"languages": [self.latin.pk, self.french.pk],
"domains": [self.chemistry.pk],
"lessons": [self.git.pk, self.matlab.pk],
"consents-person": self.user.pk,
**consent_data,
}
rv = self.client.post(reverse("autoupdate_profile"), data, follow=True)
self.assertEqual(rv.status_code, 200)
content = rv.content.decode("utf-8")
self.assertNotIn("Fix errors below", content)
self.user.refresh_from_db()
self.assertEqual(self.user.username, "user") # username is read-only
self.assertEqual(self.user.github, None) # github is read-only
self.assertEqual(self.user.family, "Smith")
self.assertEqual(set(self.user.lessons.all()), {self.git, self.matlab})
self.assertEqual(list(self.user.domains.all()), [self.chemistry])
self.assertEqual(set(self.user.languages.all()), {self.french, self.latin})
updated_consents_by_term_slug = {
consent.term.slug: consent
for consent in Consent.objects.filter(
term__slug__in=term_slugs, person=self.user
)
.active()
.select_related("term")
}
for slug in term_slugs:
self.assertEqual(
updated_consents_by_term_slug[slug].term_option.pk,
consent_data[f"consents-{slug}"],
)
| 34.80198
| 83
| 0.586629
| 377
| 3,515
| 5.331565
| 0.310345
| 0.063682
| 0.027861
| 0.029851
| 0.228856
| 0.112438
| 0.082587
| 0.049751
| 0
| 0
| 0
| 0.003992
| 0.28734
| 3,515
| 100
| 84
| 35.15
| 0.798403
| 0.011664
| 0
| 0.046512
| 0
| 0
| 0.098243
| 0
| 0
| 0
| 0
| 0
| 0.116279
| 1
| 0.034884
| false
| 0.023256
| 0.046512
| 0
| 0.093023
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9140f295d54089cb5cee0de94bb54febfe097979
| 4,823
|
py
|
Python
|
bot/recognizer_bot/yolo/common/utils.py
|
kprokofi/animal-recognition-with-voice
|
e9e5235315255eb6e17df3dba616b2ed4c902c92
|
[
"MIT"
] | 1
|
2021-03-18T05:51:10.000Z
|
2021-03-18T05:51:10.000Z
|
bot/recognizer_bot/yolo/common/utils.py
|
kprokofi/animal-recognition-with-voice
|
e9e5235315255eb6e17df3dba616b2ed4c902c92
|
[
"MIT"
] | 3
|
2021-04-11T20:52:44.000Z
|
2021-06-13T13:46:08.000Z
|
bot/recognizer_bot/yolo/common/utils.py
|
kprokofi/animal-recognition-with-voice
|
e9e5235315255eb6e17df3dba616b2ed4c902c92
|
[
"MIT"
] | null | null | null |
import numpy as np
import time
import cv2
import colorsys
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Activation, ReLU, Multiply
# Custom objects from backbones package https://github.com/david8862/keras-YOLOv3-model-set/tree/master/common/backbones
def mish(x):
return x * K.tanh(K.softplus(x))
def hard_swish(x):
return Multiply()([Activation(hard_sigmoid)(x), x])
def hard_sigmoid(x):
return ReLU(6.)(x + 3.) * (1. / 6.)
def swish(x):
"""Swish activation function.
# Arguments
x: Input tensor.
# Returns
The Swish activation: `x * sigmoid(x)`.
# References
[Searching for Activation Functions](https://arxiv.org/abs/1710.05941)
"""
if K.backend() == 'tensorflow':
try:
# The native TF implementation has a more
# memory-efficient gradient implementation
return K.tf.nn.swish(x)
except AttributeError:
pass
return x * K.sigmoid(x)
def get_custom_objects():
'''
form up a custom_objects dict so that the customized
layer/function call could be correctly parsed when keras
.h5 model is loading or converting
'''
custom_objects_dict = {
'tf': tf,
'swish': swish,
'hard_sigmoid': hard_sigmoid,
'hard_swish': hard_swish,
'mish': mish
}
return custom_objects_dict
def get_multiscale_list():
input_shape_list = [(320, 320), (352, 352), (384, 384), (416, 416),
(448, 448), (480, 480), (512, 512), (544, 544), (576, 576), (608, 608)]
return input_shape_list
def resize_anchors(base_anchors, target_shape, base_shape=(416, 416)):
'''
original anchor size is clustered from COCO dataset
under input shape (416,416). We need to resize it to
our train input shape for better performance
'''
return np.around(base_anchors*target_shape[::-1]/base_shape[::-1])
def get_classes(classes_path):
'''loads the classes'''
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def get_anchors(anchors_path):
'''loads the anchors from a file'''
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
def get_colors(class_names):
# Generate colors for drawing bounding boxes.
hsv_tuples = [(x / len(class_names), 1., 1.)
for x in range(len(class_names))]
colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
colors = list(
map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
colors))
np.random.seed(10101) # Fixed seed for consistent colors across runs.
# Shuffle colors to decorrelate adjacent classes.
np.random.shuffle(colors)
np.random.seed(None) # Reset seed to default.
return colors
def get_dataset(annotation_file, shuffle=True):
with open(annotation_file) as f:
lines = f.readlines()
lines = [line.strip() for line in lines]
if shuffle:
np.random.seed(int(time.time()))
np.random.shuffle(lines)
# np.random.seed(None)
return lines
def draw_label(image, text, color, coords):
font = cv2.FONT_HERSHEY_PLAIN
font_scale = 1.
(text_width, text_height) = cv2.getTextSize(
text, font, fontScale=font_scale, thickness=1)[0]
padding = 5
rect_height = text_height + padding * 2
rect_width = text_width + padding * 2
(x, y) = coords
cv2.rectangle(image, (x, y), (x + rect_width,
y - rect_height), color, cv2.FILLED)
cv2.putText(image, text, (x + padding, y - text_height + padding), font,
fontScale=font_scale,
color=(255, 255, 255),
lineType=cv2.LINE_AA)
return image
def draw_boxes(image, boxes, classes, scores, class_names, colors, show_score=True):
if boxes is None or len(boxes) == 0:
return image
if classes is None or len(classes) == 0:
return image
for box, cls, score in zip(boxes, classes, scores):
xmin, ymin, xmax, ymax = map(int, box)
class_name = class_names[cls]
if show_score:
label = '{} {:.2f}'.format(class_name, score)
else:
label = '{}'.format(class_name)
#print(label, (xmin, ymin), (xmax, ymax))
# if no color info, use black(0,0,0)
if colors is None:
color = (0, 0, 0)
else:
color = colors[cls]
cv2.rectangle(image, (xmin, ymin), (xmax, ymax), color, 1, cv2.LINE_AA)
image = draw_label(image, label, color, (xmin, ymin))
return image
| 28.708333
| 120
| 0.618495
| 659
| 4,823
| 4.414264
| 0.330804
| 0.030938
| 0.016501
| 0.016501
| 0.01375
| 0
| 0
| 0
| 0
| 0
| 0
| 0.041526
| 0.261041
| 4,823
| 167
| 121
| 28.88024
| 0.774691
| 0.207755
| 0
| 0.060606
| 0
| 0
| 0.014781
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.131313
| false
| 0.010101
| 0.070707
| 0.030303
| 0.363636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91469ce6ec9fde95e8590b13e1386757a2494a57
| 1,374
|
py
|
Python
|
sow_generator/tasks.py
|
praekelt/sow-generator
|
eb5dab3b3231688966254a1797ced7eec67b6e8a
|
[
"BSD-3-Clause"
] | 1
|
2016-04-14T08:34:48.000Z
|
2016-04-14T08:34:48.000Z
|
sow_generator/tasks.py
|
praekelt/sow-generator
|
eb5dab3b3231688966254a1797ced7eec67b6e8a
|
[
"BSD-3-Clause"
] | null | null | null |
sow_generator/tasks.py
|
praekelt/sow-generator
|
eb5dab3b3231688966254a1797ced7eec67b6e8a
|
[
"BSD-3-Clause"
] | null | null | null |
from github3 import login
from github3.models import GitHubError
from celery import task
from celery.decorators import periodic_task
from celery.task.schedules import crontab
from sow_generator.models import Repository, AuthToken
def _sync_repository(obj):
dirty = False
token = AuthToken.objects.get(id=1).token
gh = login(token=token)
dc = gh.user()
org, name = obj.orgname
repo = gh.repository(org, name)
if repo is not None:
# Find RST or MD files. Markdown takes precedence.
for fieldname in ("readme", "sow"):
v = repo.contents("%s.rst" % fieldname.upper())
if v is not None:
setattr(obj, fieldname, v.decoded)
setattr(obj, "%s_format" % fieldname, "rst")
dirty = True
v = repo.contents("%s.md" % fieldname.upper())
if v is not None:
setattr(obj, fieldname, v.decoded)
setattr(obj, "%s_format" % fieldname, "md")
dirty = True
if dirty:
obj.save()
@task(max_retries=5)
def sync_repository(id):
obj = Repository.objects.get(id=id)
_sync_repository(obj)
@periodic_task(run_every=crontab(hour='*', minute='0', day_of_week='*'))
def sync_repositories():
"""Sync all repositories"""
for obj in Repository.objects.all():
_sync_repository(obj)
| 29.869565
| 72
| 0.621543
| 176
| 1,374
| 4.755682
| 0.403409
| 0.066906
| 0.060932
| 0.033453
| 0.188769
| 0.188769
| 0.188769
| 0.188769
| 0.188769
| 0.188769
| 0
| 0.004946
| 0.264192
| 1,374
| 45
| 73
| 30.533333
| 0.822948
| 0.051674
| 0
| 0.228571
| 0
| 0
| 0.035466
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085714
| false
| 0
| 0.171429
| 0
| 0.257143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91472db15a8c58afa56167fc11db5c1a1643924e
| 10,956
|
py
|
Python
|
multiworld/multiworld/core/image_env.py
|
yufeiwang63/ROLL
|
aba0b4530934946eb9c41fbe5a0d6c27775596ff
|
[
"MIT"
] | 11
|
2020-11-04T03:15:27.000Z
|
2021-11-25T16:00:41.000Z
|
multiworld/multiworld/core/image_env.py
|
yufeiwang63/ROLL
|
aba0b4530934946eb9c41fbe5a0d6c27775596ff
|
[
"MIT"
] | null | null | null |
multiworld/multiworld/core/image_env.py
|
yufeiwang63/ROLL
|
aba0b4530934946eb9c41fbe5a0d6c27775596ff
|
[
"MIT"
] | 3
|
2020-11-19T14:16:56.000Z
|
2021-11-25T16:01:13.000Z
|
import random
import cv2
import numpy as np
import warnings
from PIL import Image
from gym.spaces import Box, Dict
from multiworld.core.multitask_env import MultitaskEnv
from multiworld.core.wrapper_env import ProxyEnv
from multiworld.envs.env_util import concatenate_box_spaces
from multiworld.envs.env_util import get_stat_in_paths, create_stats_ordered_dict
class ImageEnv(ProxyEnv, MultitaskEnv):
def __init__(
self,
wrapped_env,
imsize=84,
init_camera=None,
transpose=False,
grayscale=False,
normalize=False,
reward_type='wrapped_env',
threshold=10,
image_length=None,
presampled_goals=None,
non_presampled_goal_img_is_garbage=False,
recompute_reward=True,
):
"""
:param wrapped_env:
:param imsize:
:param init_camera:
:param transpose:
:param grayscale:
:param normalize:
:param reward_type:
:param threshold:
:param image_length:
:param presampled_goals:
:param non_presampled_goal_img_is_garbage: Set this option to True if
you want to allow the code to work without presampled goals,
but where the underlying env doesn't support set_to_goal. As the name,
implies this will make it so that the goal image is garbage if you
don't provide pre-sampled goals. The main use case is if you want to
use an ImageEnv to pre-sample a bunch of goals.
"""
self.quick_init(locals())
super().__init__(wrapped_env)
self.wrapped_env.hide_goal_markers = True
self.imsize = imsize
self.init_camera = init_camera
self.transpose = transpose
self.grayscale = grayscale
self.normalize = normalize
self.recompute_reward = recompute_reward
self.non_presampled_goal_img_is_garbage = non_presampled_goal_img_is_garbage
if image_length is not None:
self.image_length = image_length
else:
if grayscale:
self.image_length = self.imsize * self.imsize
else:
self.image_length = 3 * self.imsize * self.imsize
self.channels = 1 if grayscale else 3
# This is torch format rather than PIL image
self.image_shape = (self.imsize, self.imsize)
# Flattened past image queue
# init camera
if init_camera is not None:
sim = self._wrapped_env.initialize_camera(init_camera)
# viewer = mujoco_py.MjRenderContextOffscreen(sim, device_id=-1)
# init_camera(viewer.cam)
# sim.add_render_context(viewer)
img_space = Box(0, 1, (self.image_length,), dtype=np.float32)
self._img_goal = img_space.sample() #has to be done for presampling
spaces = self.wrapped_env.observation_space.spaces.copy()
spaces['observation'] = img_space
spaces['desired_goal'] = img_space
spaces['achieved_goal'] = img_space
spaces['image_observation'] = img_space
spaces['image_desired_goal'] = img_space
spaces['image_achieved_goal'] = img_space
self.return_image_proprio = False
if 'proprio_observation' in spaces.keys():
self.return_image_proprio = True
spaces['image_proprio_observation'] = concatenate_box_spaces(
spaces['image_observation'],
spaces['proprio_observation']
)
spaces['image_proprio_desired_goal'] = concatenate_box_spaces(
spaces['image_desired_goal'],
spaces['proprio_desired_goal']
)
spaces['image_proprio_achieved_goal'] = concatenate_box_spaces(
spaces['image_achieved_goal'],
spaces['proprio_achieved_goal']
)
self.observation_space = Dict(spaces)
self.action_space = self.wrapped_env.action_space
self.reward_type = reward_type
self.threshold = threshold
self._presampled_goals = presampled_goals
if self._presampled_goals is None:
self.num_goals_presampled = 0
else:
self.num_goals_presampled = presampled_goals[random.choice(list(presampled_goals))].shape[0]
self._last_image = None
def step(self, action):
obs, reward, done, info = self.wrapped_env.step(action)
new_obs = self._update_obs(obs)
if self.recompute_reward:
reward = self.compute_reward(action, new_obs)
self._update_info(info, obs)
return new_obs, reward, done, info
def _update_info(self, info, obs):
achieved_goal = obs['image_achieved_goal']
desired_goal = self._img_goal
image_dist = np.linalg.norm(achieved_goal-desired_goal)
image_success = (image_dist<self.threshold).astype(float)-1
info['image_dist'] = image_dist
info['image_success'] = image_success
def reset(self):
obs = self.wrapped_env.reset()
if self.num_goals_presampled > 0:
goal = self.sample_goal()
self._img_goal = goal['image_desired_goal']
self.wrapped_env.set_goal(goal)
for key in goal:
obs[key] = goal[key]
elif self.non_presampled_goal_img_is_garbage:
# This is use mainly for debugging or pre-sampling goals.
self._img_goal = self._get_flat_img()
else:
env_state = self.wrapped_env.get_env_state()
self.wrapped_env.set_to_goal(self.wrapped_env.get_goal())
self._img_goal = self._get_flat_img()
self.wrapped_env.set_env_state(env_state)
return self._update_obs(obs)
def _get_obs(self):
return self._update_obs(self.wrapped_env._get_obs())
def _update_obs(self, obs):
img_obs = self._get_flat_img()
obs['image_observation'] = img_obs
obs['image_desired_goal'] = self._img_goal
obs['image_achieved_goal'] = img_obs
obs['observation'] = img_obs
obs['desired_goal'] = self._img_goal
obs['achieved_goal'] = img_obs
if self.return_image_proprio:
obs['image_proprio_observation'] = np.concatenate(
(obs['image_observation'], obs['proprio_observation'])
)
obs['image_proprio_desired_goal'] = np.concatenate(
(obs['image_desired_goal'], obs['proprio_desired_goal'])
)
obs['image_proprio_achieved_goal'] = np.concatenate(
(obs['image_achieved_goal'], obs['proprio_achieved_goal'])
)
return obs
def _get_flat_img(self):
image_obs = self._wrapped_env.get_image(
width=self.imsize,
height=self.imsize,
)
self._last_image = image_obs
if self.grayscale:
image_obs = Image.fromarray(image_obs).convert('L')
image_obs = np.array(image_obs)
if self.normalize:
image_obs = image_obs / 255.0
if self.transpose:
image_obs = image_obs.transpose()
assert image_obs.shape[0] == self.channels
return image_obs.flatten()
def render(self, mode='wrapped'):
if mode == 'wrapped':
self.wrapped_env.render()
elif mode == 'cv2':
if self._last_image is None:
self._last_image = self._wrapped_env.get_image(
width=self.imsize,
height=self.imsize,
)
cv2.imshow('ImageEnv', self._last_image)
cv2.waitKey(1)
else:
raise ValueError("Invalid render mode: {}".format(mode))
def show_obs(self, normalized_img_vec_, name='img'):
print(name)
normalized_img_vec = copy.deepcopy(normalized_img_vec_)
img = (normalized_img_vec * 255).astype(np.uint8)
img = img.reshape(3, self.imsize, self.imsize).transpose()
img = img[::-1, :, ::-1]
cv2.imshow(name, img)
cv2.waitKey()
"""
Multitask functions
"""
def get_goal(self):
goal = self.wrapped_env.get_goal()
goal['desired_goal'] = self._img_goal
goal['image_desired_goal'] = self._img_goal
return goal
def set_goal(self, goal):
''' Assume goal contains both image_desired_goal and any goals required for wrapped envs'''
self._img_goal = goal['image_desired_goal']
self.wrapped_env.set_goal(goal)
def sample_goals(self, batch_size):
if self.num_goals_presampled > 0:
idx = np.random.randint(0, self.num_goals_presampled, batch_size)
sampled_goals = {
k: v[idx] for k, v in self._presampled_goals.items()
}
return sampled_goals
if batch_size > 1:
warnings.warn("Sampling goal images is slow")
img_goals = np.zeros((batch_size, self.image_length))
goals = self.wrapped_env.sample_goals(batch_size)
pre_state = self.wrapped_env.get_env_state()
for i in range(batch_size):
goal = self.unbatchify_dict(goals, i)
self.wrapped_env.set_to_goal(goal)
img_goals[i, :] = self._get_flat_img()
self.wrapped_env.set_env_state(pre_state)
goals['desired_goal'] = img_goals
goals['image_desired_goal'] = img_goals
return goals
def compute_rewards(self, actions, obs):
achieved_goals = obs['achieved_goal']
desired_goals = obs['desired_goal']
dist = np.linalg.norm(achieved_goals - desired_goals, axis=1)
if self.reward_type=='image_distance':
return -dist
elif self.reward_type=='image_sparse':
return -(dist > self.threshold).astype(float)
elif self.reward_type=='wrapped_env':
return self.wrapped_env.compute_rewards(actions, obs)
else:
raise NotImplementedError()
def get_diagnostics(self, paths, **kwargs):
statistics = self.wrapped_env.get_diagnostics(paths, **kwargs)
for stat_name_in_paths in ["image_dist", "image_success"]:
stats = get_stat_in_paths(paths, 'env_infos', stat_name_in_paths)
statistics.update(create_stats_ordered_dict(
stat_name_in_paths,
stats,
always_show_all_stats=True,
))
final_stats = [s[-1] for s in stats]
statistics.update(create_stats_ordered_dict(
"Final " + stat_name_in_paths,
final_stats,
always_show_all_stats=True,
))
return statistics
def normalize_image(image, dtype=np.float64):
assert image.dtype == np.uint8
return dtype(image) / 255.0
def unormalize_image(image):
assert image.dtype != np.uint8
return np.uint8(image * 255.0)
| 38.174216
| 104
| 0.621486
| 1,336
| 10,956
| 4.786677
| 0.170659
| 0.043784
| 0.052541
| 0.021267
| 0.247694
| 0.179672
| 0.078342
| 0.05301
| 0.05301
| 0.046286
| 0
| 0.006805
| 0.289157
| 10,956
| 286
| 105
| 38.307692
| 0.81433
| 0.08671
| 0
| 0.104348
| 0
| 0
| 0.092938
| 0.020177
| 0
| 0
| 0
| 0
| 0.013043
| 1
| 0.069565
| false
| 0
| 0.043478
| 0.004348
| 0.178261
| 0.004348
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91492cd2d90ac485784d8d45eca57302464591f8
| 21,084
|
py
|
Python
|
daemon/core/coreobj.py
|
shanv82/core
|
70abb8cc1426ffceb53a03e84edc26f56f9ed4c0
|
[
"BSD-2-Clause"
] | null | null | null |
daemon/core/coreobj.py
|
shanv82/core
|
70abb8cc1426ffceb53a03e84edc26f56f9ed4c0
|
[
"BSD-2-Clause"
] | null | null | null |
daemon/core/coreobj.py
|
shanv82/core
|
70abb8cc1426ffceb53a03e84edc26f56f9ed4c0
|
[
"BSD-2-Clause"
] | null | null | null |
"""
Defines the basic objects for CORE emulation: the PyCoreObj base class, along with PyCoreNode,
PyCoreNet, and PyCoreNetIf.
"""
import os
import shutil
import socket
import threading
from socket import AF_INET
from socket import AF_INET6
from core.data import NodeData, LinkData
from core.enumerations import LinkTypes
from core.misc import ipaddress
class Position(object):
"""
Helper class for Cartesian coordinate position
"""
def __init__(self, x=None, y=None, z=None):
"""
Creates a Position instance.
:param x: x position
:param y: y position
:param z: z position
:return:
"""
self.x = x
self.y = y
self.z = z
def set(self, x=None, y=None, z=None):
"""
Returns True if the position has actually changed.
:param float x: x position
:param float y: y position
:param float z: z position
:return: True if position changed, False otherwise
:rtype: bool
"""
if self.x == x and self.y == y and self.z == z:
return False
self.x = x
self.y = y
self.z = z
return True
def get(self):
"""
Retrieve x,y,z position.
:return: x,y,z position tuple
:rtype: tuple
"""
return self.x, self.y, self.z
class PyCoreObj(object):
"""
Base class for CORE objects (nodes and networks)
"""
apitype = None
# TODO: appears start has no usage, verify and remove
def __init__(self, session, objid=None, name=None, start=True):
"""
Creates a PyCoreObj instance.
:param core.session.Session session: CORE session object
:param int objid: object id
:param str name: object name
:param bool start: start value
:return:
"""
self.session = session
if objid is None:
objid = session.get_object_id()
self.objid = objid
if name is None:
name = "o%s" % self.objid
self.name = name
self.type = None
self.server = None
self.services = None
# ifindex is key, PyCoreNetIf instance is value
self._netif = {}
self.ifindex = 0
self.canvas = None
self.icon = None
self.opaque = None
self.position = Position()
def startup(self):
"""
Each object implements its own startup method.
:return: nothing
"""
raise NotImplementedError
def shutdown(self):
"""
Each object implements its own shutdown method.
:return: nothing
"""
raise NotImplementedError
def setposition(self, x=None, y=None, z=None):
"""
Set the (x,y,z) position of the object.
:param float x: x position
:param float y: y position
:param float z: z position
:return: True if position changed, False otherwise
:rtype: bool
"""
return self.position.set(x=x, y=y, z=z)
def getposition(self):
"""
Return an (x,y,z) tuple representing this object's position.
:return: x,y,z position tuple
:rtype: tuple
"""
return self.position.get()
def ifname(self, ifindex):
"""
Retrieve interface name for index.
:param int ifindex: interface index
:return: interface name
:rtype: str
"""
return self._netif[ifindex].name
def netifs(self, sort=False):
"""
Retrieve network interfaces, sorted if desired.
:param bool sort: boolean used to determine if interfaces should be sorted
:return: network interfaces
:rtype: list
"""
if sort:
return map(lambda k: self._netif[k], sorted(self._netif.keys()))
else:
return self._netif.itervalues()
def numnetif(self):
"""
Return the attached interface count.
:return: number of network interfaces
:rtype: int
"""
return len(self._netif)
def getifindex(self, netif):
"""
Retrieve index for an interface.
:param PyCoreNetIf netif: interface to get index for
:return: interface index if found, -1 otherwise
:rtype: int
"""
for ifindex in self._netif:
if self._netif[ifindex] is netif:
return ifindex
return -1
def newifindex(self):
"""
Create a new interface index.
:return: interface index
:rtype: int
"""
while self.ifindex in self._netif:
self.ifindex += 1
ifindex = self.ifindex
self.ifindex += 1
return ifindex
def data(self, message_type, lat=None, lon=None, alt=None):
"""
Build a data object for this node.
:param message_type: purpose for the data object we are creating
:param str lat: latitude
:param str lon: longitude
:param str alt: altitude
:return: node data object
:rtype: core.data.NodeData
"""
if self.apitype is None:
return None
x, y, _ = self.getposition()
model = self.type
emulation_server = self.server
services = self.services
if services is not None:
services = "|".join([service.name for service in services])
node_data = NodeData(
message_type=message_type,
id=self.objid,
node_type=self.apitype,
name=self.name,
emulation_id=self.objid,
canvas=self.canvas,
icon=self.icon,
opaque=self.opaque,
x_position=x,
y_position=y,
latitude=lat,
longitude=lon,
altitude=alt,
model=model,
emulation_server=emulation_server,
services=services
)
return node_data
def all_link_data(self, flags):
"""
Build CORE Link data for this object. There is no default
method for PyCoreObjs as PyCoreNodes do not implement this but
PyCoreNets do.
:param flags: message flags
:return: list of link data
:rtype: core.data.LinkData
"""
return []
class PyCoreNode(PyCoreObj):
"""
Base class for CORE nodes.
"""
def __init__(self, session, objid=None, name=None, start=True):
"""
Create a PyCoreNode instance.
:param core.session.Session session: CORE session object
:param int objid: object id
:param str name: object name
:param bool start: boolean for starting
"""
super(PyCoreNode, self).__init__(session, objid, name, start=start)
self.services = []
self.nodedir = None
self.tmpnodedir = False
def addservice(self, service):
"""
Add a services to the service list.
:param core.service.CoreService service: service to add
:return: nothing
"""
if service is not None:
self.services.append(service)
def makenodedir(self):
"""
Create the node directory.
:return: nothing
"""
if self.nodedir is None:
self.nodedir = os.path.join(self.session.session_dir, self.name + ".conf")
os.makedirs(self.nodedir)
self.tmpnodedir = True
else:
self.tmpnodedir = False
def rmnodedir(self):
"""
Remove the node directory, unless preserve directory has been set.
:return: nothing
"""
preserve = self.session.options.get_config("preservedir") == "1"
if preserve:
return
if self.tmpnodedir:
shutil.rmtree(self.nodedir, ignore_errors=True)
def addnetif(self, netif, ifindex):
"""
Add network interface to node and set the network interface index if successful.
:param PyCoreNetIf netif: network interface to add
:param int ifindex: interface index
:return: nothing
"""
if ifindex in self._netif:
raise ValueError("ifindex %s already exists" % ifindex)
self._netif[ifindex] = netif
# TODO: this should have probably been set ahead, seems bad to me, check for failure and fix
netif.netindex = ifindex
def delnetif(self, ifindex):
"""
Delete a network interface
:param int ifindex: interface index to delete
:return: nothing
"""
if ifindex not in self._netif:
raise ValueError("ifindex %s does not exist" % ifindex)
netif = self._netif.pop(ifindex)
netif.shutdown()
del netif
# TODO: net parameter is not used, remove
def netif(self, ifindex, net=None):
"""
Retrieve network interface.
:param int ifindex: index of interface to retrieve
:param PyCoreNetIf net: network node
:return: network interface, or None if not found
:rtype: PyCoreNetIf
"""
if ifindex in self._netif:
return self._netif[ifindex]
else:
return None
def attachnet(self, ifindex, net):
"""
Attach a network.
:param int ifindex: interface of index to attach
:param PyCoreNetIf net: network to attach
:return:
"""
if ifindex not in self._netif:
raise ValueError("ifindex %s does not exist" % ifindex)
self._netif[ifindex].attachnet(net)
def detachnet(self, ifindex):
"""
Detach network interface.
:param int ifindex: interface index to detach
:return: nothing
"""
if ifindex not in self._netif:
raise ValueError("ifindex %s does not exist" % ifindex)
self._netif[ifindex].detachnet()
def setposition(self, x=None, y=None, z=None):
"""
Set position.
:param x: x position
:param y: y position
:param z: z position
:return: nothing
"""
changed = super(PyCoreNode, self).setposition(x, y, z)
if changed:
for netif in self.netifs(sort=True):
netif.setposition(x, y, z)
def commonnets(self, obj, want_ctrl=False):
"""
Given another node or net object, return common networks between
this node and that object. A list of tuples is returned, with each tuple
consisting of (network, interface1, interface2).
:param obj: object to get common network with
:param want_ctrl: flag set to determine if control network are wanted
:return: tuples of common networks
:rtype: list
"""
common = []
for netif1 in self.netifs():
if not want_ctrl and hasattr(netif1, "control"):
continue
for netif2 in obj.netifs():
if netif1.net == netif2.net:
common.append((netif1.net, netif1, netif2))
return common
def check_cmd(self, args):
"""
Runs shell command on node.
:param list[str]|str args: command to run
:return: combined stdout and stderr
:rtype: str
:raises CoreCommandError: when a non-zero exit status occurs
"""
raise NotImplementedError
def cmd(self, args, wait=True):
"""
Runs shell command on node, with option to not wait for a result.
:param list[str]|str args: command to run
:param bool wait: wait for command to exit, defaults to True
:return: exit status for command
:rtype: int
"""
raise NotImplementedError
def cmd_output(self, args):
"""
Runs shell command on node and get exit status and output.
:param list[str]|str args: command to run
:return: exit status and combined stdout and stderr
:rtype: tuple[int, str]
"""
raise NotImplementedError
def termcmdstring(self, sh):
"""
Create a terminal command string.
:param str sh: shell to execute command in
:return: str
"""
raise NotImplementedError
class PyCoreNet(PyCoreObj):
"""
Base class for networks
"""
linktype = LinkTypes.WIRED.value
def __init__(self, session, objid, name, start=True):
"""
Create a PyCoreNet instance.
:param core.session.Session session: CORE session object
:param int objid: object id
:param str name: object name
:param bool start: should object start
"""
super(PyCoreNet, self).__init__(session, objid, name, start=start)
self._linked = {}
self._linked_lock = threading.Lock()
def startup(self):
"""
Each object implements its own startup method.
:return: nothing
"""
raise NotImplementedError
def shutdown(self):
"""
Each object implements its own shutdown method.
:return: nothing
"""
raise NotImplementedError
def attach(self, netif):
"""
Attach network interface.
:param PyCoreNetIf netif: network interface to attach
:return: nothing
"""
i = self.newifindex()
self._netif[i] = netif
netif.netifi = i
with self._linked_lock:
self._linked[netif] = {}
def detach(self, netif):
"""
Detach network interface.
:param PyCoreNetIf netif: network interface to detach
:return: nothing
"""
del self._netif[netif.netifi]
netif.netifi = None
with self._linked_lock:
del self._linked[netif]
def all_link_data(self, flags):
"""
Build link data objects for this network. Each link object describes a link
between this network and a node.
"""
all_links = []
# build a link message from this network node to each node having a
# connected interface
for netif in self.netifs(sort=True):
if not hasattr(netif, "node"):
continue
otherobj = netif.node
uni = False
if otherobj is None:
# two layer-2 switches/hubs linked together via linknet()
if not hasattr(netif, "othernet"):
continue
otherobj = netif.othernet
if otherobj.objid == self.objid:
continue
netif.swapparams('_params_up')
upstream_params = netif.getparams()
netif.swapparams('_params_up')
if netif.getparams() != upstream_params:
uni = True
unidirectional = 0
if uni:
unidirectional = 1
interface2_ip4 = None
interface2_ip4_mask = None
interface2_ip6 = None
interface2_ip6_mask = None
for address in netif.addrlist:
ip, _sep, mask = address.partition("/")
mask = int(mask)
if ipaddress.is_ipv4_address(ip):
family = AF_INET
ipl = socket.inet_pton(family, ip)
interface2_ip4 = ipaddress.IpAddress(af=family, address=ipl)
interface2_ip4_mask = mask
else:
family = AF_INET6
ipl = socket.inet_pton(family, ip)
interface2_ip6 = ipaddress.IpAddress(af=family, address=ipl)
interface2_ip6_mask = mask
link_data = LinkData(
message_type=flags,
node1_id=self.objid,
node2_id=otherobj.objid,
link_type=self.linktype,
unidirectional=unidirectional,
interface2_id=otherobj.getifindex(netif),
interface2_mac=netif.hwaddr,
interface2_ip4=interface2_ip4,
interface2_ip4_mask=interface2_ip4_mask,
interface2_ip6=interface2_ip6,
interface2_ip6_mask=interface2_ip6_mask,
delay=netif.getparam("delay"),
bandwidth=netif.getparam("bw"),
dup=netif.getparam("duplicate"),
jitter=netif.getparam("jitter")
)
all_links.append(link_data)
if not uni:
continue
netif.swapparams('_params_up')
link_data = LinkData(
message_type=0,
node1_id=otherobj.objid,
node2_id=self.objid,
unidirectional=1,
delay=netif.getparam("delay"),
bandwidth=netif.getparam("bw"),
dup=netif.getparam("duplicate"),
jitter=netif.getparam("jitter")
)
netif.swapparams('_params_up')
all_links.append(link_data)
return all_links
class PyCoreNetIf(object):
"""
Base class for network interfaces.
"""
def __init__(self, node, name, mtu):
"""
Creates a PyCoreNetIf instance.
:param core.coreobj.PyCoreNode node: node for interface
:param str name: interface name
:param mtu: mtu value
"""
self.node = node
self.name = name
if not isinstance(mtu, (int, long)):
raise ValueError
self.mtu = mtu
self.net = None
self._params = {}
self.addrlist = []
self.hwaddr = None
# placeholder position hook
self.poshook = lambda a, b, c, d: None
# used with EMANE
self.transport_type = None
# interface index on the network
self.netindex = None
# index used to find flow data
self.flow_id = None
def startup(self):
"""
Startup method for the interface.
:return: nothing
"""
pass
def shutdown(self):
"""
Shutdown method for the interface.
:return: nothing
"""
pass
def attachnet(self, net):
"""
Attach network.
:param core.coreobj.PyCoreNet net: network to attach
:return: nothing
"""
if self.net:
self.detachnet()
self.net = None
net.attach(self)
self.net = net
def detachnet(self):
"""
Detach from a network.
:return: nothing
"""
if self.net is not None:
self.net.detach(self)
def addaddr(self, addr):
"""
Add address.
:param str addr: address to add
:return: nothing
"""
self.addrlist.append(addr)
def deladdr(self, addr):
"""
Delete address.
:param str addr: address to delete
:return: nothing
"""
self.addrlist.remove(addr)
def sethwaddr(self, addr):
"""
Set hardware address.
:param core.misc.ipaddress.MacAddress addr: hardware address to set to.
:return: nothing
"""
self.hwaddr = addr
def getparam(self, key):
"""
Retrieve a parameter from the, or None if the parameter does not exist.
:param key: parameter to get value for
:return: parameter value
"""
return self._params.get(key)
def getparams(self):
"""
Return (key, value) pairs for parameters.
"""
parameters = []
for k in sorted(self._params.keys()):
parameters.append((k, self._params[k]))
return parameters
def setparam(self, key, value):
"""
Set a parameter value, returns True if the parameter has changed.
:param key: parameter name to set
:param value: parameter value
:return: True if parameter changed, False otherwise
"""
# treat None and 0 as unchanged values
current_value = self._params.get(key)
if current_value == value or current_value <= 0 and value <= 0:
return False
self._params[key] = value
return True
def swapparams(self, name):
"""
Swap out parameters dict for name. If name does not exist,
intialize it. This is for supporting separate upstream/downstream
parameters when two layer-2 nodes are linked together.
:param str name: name of parameter to swap
:return: nothing
"""
tmp = self._params
if not hasattr(self, name):
setattr(self, name, {})
self._params = getattr(self, name)
setattr(self, name, tmp)
def setposition(self, x, y, z):
"""
Dispatch position hook handler.
:param x: x position
:param y: y position
:param z: z position
:return: nothing
"""
self.poshook(self, x, y, z)
| 27.852048
| 100
| 0.558338
| 2,345
| 21,084
| 4.952239
| 0.156077
| 0.019375
| 0.002325
| 0.006458
| 0.286231
| 0.247223
| 0.233015
| 0.198054
| 0.164213
| 0.15474
| 0
| 0.004872
| 0.357475
| 21,084
| 756
| 101
| 27.888889
| 0.852366
| 0.331057
| 0
| 0.262658
| 0
| 0
| 0.019024
| 0
| 0
| 0
| 0
| 0.003968
| 0
| 1
| 0.155063
| false
| 0.006329
| 0.028481
| 0
| 0.281646
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
914969a6475944053d8a15e1118e2d12ecdc9855
| 349
|
py
|
Python
|
abc/128/b.py
|
wotsushi/competitive-programming
|
17ec8fd5e1c23aee626aee70b1c0da8d7f8b8c86
|
[
"MIT"
] | 3
|
2019-06-25T06:17:38.000Z
|
2019-07-13T15:18:51.000Z
|
abc/128/b.py
|
wotsushi/competitive-programming
|
17ec8fd5e1c23aee626aee70b1c0da8d7f8b8c86
|
[
"MIT"
] | null | null | null |
abc/128/b.py
|
wotsushi/competitive-programming
|
17ec8fd5e1c23aee626aee70b1c0da8d7f8b8c86
|
[
"MIT"
] | null | null | null |
# 入力
N = int(input())
S, P = (
zip(*(
(s, int(p))
for s, p in (input().split() for _ in range(N))
)) if N else
((), ())
)
ans = '\n'.join(
str(i)
for _, _, i in sorted(
zip(
S,
P,
range(1, N + 1)
),
key=lambda t: (t[0], -t[1])
)
)
# 出力
print(ans)
| 13.96
| 55
| 0.34384
| 49
| 349
| 2.387755
| 0.510204
| 0.051282
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020833
| 0.449857
| 349
| 24
| 56
| 14.541667
| 0.588542
| 0.014327
| 0
| 0
| 0
| 0
| 0.005865
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.05
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
914b520c0a97da68019f1f6058aa11f3ec987d8a
| 1,915
|
py
|
Python
|
additional/hashcat_crack.py
|
mmmds/WirelessDiscoverCrackScan
|
2eda9bd7c474d91ea08511a7322f5ba14d034f3d
|
[
"MIT"
] | 2
|
2020-02-09T15:35:05.000Z
|
2020-04-15T10:01:24.000Z
|
additional/hashcat_crack.py
|
mmmds/WirelessDiscoverCrackScan
|
2eda9bd7c474d91ea08511a7322f5ba14d034f3d
|
[
"MIT"
] | null | null | null |
additional/hashcat_crack.py
|
mmmds/WirelessDiscoverCrackScan
|
2eda9bd7c474d91ea08511a7322f5ba14d034f3d
|
[
"MIT"
] | null | null | null |
# External cracking script, part of https://github.com/mmmds/WirelessDiscoverCrackScan
import datetime
import subprocess
import os
### CONFIGURATION
HASHCAT_DIR = "C:\\hashcat-5.1.0"
HASHCAT_EXE = "hashcat64.exe"
LOG_FILE = "crack_log.txt"
DICT_DIR = "./dicts"
def load_dict_list():
for r,d,f in os.walk(DICT_DIR):
return f
def parse_log():
r = {}
with open(LOG_FILE, "r") as f:
for line in f.readlines():
try:
a = line.split("/")
date = a[0]
dict_file = a[1].strip()
hash_file = a[2].split(".")[0].strip()
r[(hash_file, dict_file)] = date
except:
pass
return r
def append_log(file, dictionary):
text = "{}/{}/{}".format(str(datetime.datetime.now()), dictionary, file)
with open(LOG_FILE, "a") as f:
f.write("\n" + text)
def read_files():
result = ([],[])
files = os.listdir(".")
for f in files:
if f.endswith(".16800"):
result[0].append(f.split(".")[0])
elif f.endswith(".2500"):
result[1].append(f.split(".")[0])
return result
def process(files, t, logs, dicts):
for f in files:
for d in dicts:
if (f.split(".")[0], d) not in logs:
print("\n\n######## {} {}\n\n".format(f, d))
cwd = os.getcwd()
subprocess.Popen([HASHCAT_DIR+ "\\" + HASHCAT_EXE, "-m", t, "{}\\{}.{}".format(cwd,f, t), "{}\\{}\\{}".format(cwd,DICT_DIR, d)], cwd = HASHCAT_DIR).wait()
append_log(f, d)
else:
print("\n\n-----------{} {} in logs\n\n".format(f, d))
files = read_files()
logs = parse_log()
dicts = load_dict_list()
print(dicts)
print(files)
print(logs)
pmkid = files[0]
hs4 = files[1]
process(pmkid, "16800", logs, dicts)
process(hs4, "2500", logs, dicts)
| 27.357143
| 170
| 0.518538
| 254
| 1,915
| 3.807087
| 0.330709
| 0.010341
| 0.021717
| 0.031024
| 0.020683
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026529
| 0.291384
| 1,915
| 69
| 171
| 27.753623
| 0.686072
| 0.051175
| 0
| 0.035714
| 0
| 0
| 0.09106
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.089286
| false
| 0.017857
| 0.053571
| 0
| 0.196429
| 0.089286
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
914cfd2421dd20bdadd6d7150cecf300e7699605
| 13,463
|
py
|
Python
|
lbrynet/file_manager/EncryptedFileManager.py
|
shyba/lbry
|
ab3278c50a8b7b5a8e9486a1c52be3d5e0c18297
|
[
"MIT"
] | 1
|
2018-12-08T04:42:11.000Z
|
2018-12-08T04:42:11.000Z
|
lbrynet/file_manager/EncryptedFileManager.py
|
mrlucky9/lbry
|
bf6bc02828ed55e98a3002f487041acbd7841883
|
[
"MIT"
] | null | null | null |
lbrynet/file_manager/EncryptedFileManager.py
|
mrlucky9/lbry
|
bf6bc02828ed55e98a3002f487041acbd7841883
|
[
"MIT"
] | null | null | null |
"""
Keep track of which LBRY Files are downloading and store their LBRY File specific metadata
"""
import logging
import os
from twisted.enterprise import adbapi
from twisted.internet import defer, task, reactor
from twisted.python.failure import Failure
from lbrynet.reflector.reupload import reflect_stream
from lbrynet.core.PaymentRateManager import NegotiatedPaymentRateManager
from lbrynet.file_manager.EncryptedFileDownloader import ManagedEncryptedFileDownloader
from lbrynet.file_manager.EncryptedFileDownloader import ManagedEncryptedFileDownloaderFactory
from lbrynet.lbry_file.StreamDescriptor import EncryptedFileStreamType
from lbrynet.cryptstream.client.CryptStreamDownloader import AlreadyStoppedError
from lbrynet.cryptstream.client.CryptStreamDownloader import CurrentlyStoppingError
from lbrynet.core.sqlite_helpers import rerun_if_locked
from lbrynet import conf
log = logging.getLogger(__name__)
def safe_start_looping_call(looping_call, seconds=3600):
if not looping_call.running:
looping_call.start(seconds)
def safe_stop_looping_call(looping_call):
if looping_call.running:
looping_call.stop()
class EncryptedFileManager(object):
"""Keeps track of currently opened LBRY Files, their options, and
their LBRY File specific metadata.
"""
def __init__(self, session, stream_info_manager, sd_identifier, download_directory=None):
self.session = session
self.stream_info_manager = stream_info_manager
# TODO: why is sd_identifier part of the file manager?
self.sd_identifier = sd_identifier
self.lbry_files = []
self.sql_db = None
if download_directory:
self.download_directory = download_directory
else:
self.download_directory = os.getcwd()
self.lbry_file_reflector = task.LoopingCall(self.reflect_lbry_files)
log.debug("Download directory for EncryptedFileManager: %s", str(self.download_directory))
@defer.inlineCallbacks
def setup(self):
yield self._open_db()
yield self._add_to_sd_identifier()
yield self._start_lbry_files()
if conf.settings['reflect_uploads']:
safe_start_looping_call(self.lbry_file_reflector)
def get_lbry_file_status(self, lbry_file):
return self._get_lbry_file_status(lbry_file.rowid)
def set_lbry_file_data_payment_rate(self, lbry_file, new_rate):
return self._set_lbry_file_payment_rate(lbry_file.rowid, new_rate)
def change_lbry_file_status(self, lbry_file, status):
log.debug("Changing status of %s to %s", lbry_file.stream_hash, status)
return self._change_file_status(lbry_file.rowid, status)
def get_lbry_file_status_reports(self):
ds = []
for lbry_file in self.lbry_files:
ds.append(lbry_file.status())
dl = defer.DeferredList(ds)
def filter_failures(status_reports):
return [status_report for success, status_report in status_reports if success is True]
dl.addCallback(filter_failures)
return dl
def save_sd_blob_hash_to_stream(self, stream_hash, sd_hash):
return self.stream_info_manager.save_sd_blob_hash_to_stream(stream_hash, sd_hash)
def _add_to_sd_identifier(self):
downloader_factory = ManagedEncryptedFileDownloaderFactory(self)
self.sd_identifier.add_stream_downloader_factory(
EncryptedFileStreamType, downloader_factory)
@defer.inlineCallbacks
def _check_stream_is_managed(self, stream_hash):
# check that all the streams in the stream_info_manager are also
# tracked by lbry_file_manager and fix any streams that aren't.
rowid = yield self._get_rowid_for_stream_hash(stream_hash)
if rowid is not None:
defer.returnValue(True)
rate = self.session.base_payment_rate_manager.min_blob_data_payment_rate
key, stream_name, file_name = yield self.stream_info_manager.get_stream_info(stream_hash)
log.warning("Trying to fix missing lbry file for %s", stream_name.decode('hex'))
yield self._save_lbry_file(stream_hash, rate)
@defer.inlineCallbacks
def _check_stream_info_manager(self):
def _iter_streams(stream_hashes):
for stream_hash in stream_hashes:
yield self._check_stream_is_managed(stream_hash)
stream_hashes = yield self.stream_info_manager.get_all_streams()
log.debug("Checking %s streams", len(stream_hashes))
yield defer.DeferredList(list(_iter_streams(stream_hashes)))
@defer.inlineCallbacks
def _start_lbry_files(self):
yield self._check_stream_info_manager()
files_and_options = yield self._get_all_lbry_files()
yield defer.DeferredList([
self._set_options_and_restore(rowid, stream_hash, options)
for rowid, stream_hash, options in files_and_options
])
log.info("Started %i lbry files", len(self.lbry_files))
@defer.inlineCallbacks
def _set_options_and_restore(self, rowid, stream_hash, options):
try:
b_prm = self.session.base_payment_rate_manager
payment_rate_manager = NegotiatedPaymentRateManager(
b_prm, self.session.blob_tracker)
downloader = yield self.start_lbry_file(
rowid, stream_hash, payment_rate_manager, blob_data_rate=options)
yield downloader.restore()
except Exception:
log.error('An error occurred while starting a lbry file (%s, %s, %s)',
rowid, stream_hash, options)
@defer.inlineCallbacks
def start_lbry_file(self, rowid, stream_hash,
payment_rate_manager, blob_data_rate=None,
download_directory=None, file_name=None):
if not download_directory:
download_directory = self.download_directory
payment_rate_manager.min_blob_data_payment_rate = blob_data_rate
lbry_file_downloader = ManagedEncryptedFileDownloader(
rowid,
stream_hash,
self.session.peer_finder,
self.session.rate_limiter,
self.session.blob_manager,
self.stream_info_manager,
self,
payment_rate_manager,
self.session.wallet,
download_directory,
file_name=file_name
)
yield lbry_file_downloader.set_stream_info()
self.lbry_files.append(lbry_file_downloader)
defer.returnValue(lbry_file_downloader)
@defer.inlineCallbacks
def _stop_lbry_file(self, lbry_file):
def wait_for_finished(lbry_file, count=2):
if count or lbry_file.saving_status is not False:
return task.deferLater(reactor, 1, self._stop_lbry_file, lbry_file, count=count - 1)
try:
yield lbry_file.stop(change_status=False)
self.lbry_files.remove(lbry_file)
except CurrentlyStoppingError:
yield wait_for_finished(lbry_file)
except AlreadyStoppedError:
pass
finally:
defer.returnValue(None)
def _stop_lbry_files(self):
log.info("Stopping %i lbry files", len(self.lbry_files))
lbry_files = self.lbry_files
for lbry_file in lbry_files:
yield self._stop_lbry_file(lbry_file)
@defer.inlineCallbacks
def add_lbry_file(self, stream_hash, payment_rate_manager, blob_data_rate=None,
download_directory=None, file_name=None):
rowid = yield self._save_lbry_file(stream_hash, blob_data_rate)
lbry_file = yield self.start_lbry_file(rowid, stream_hash, payment_rate_manager,
blob_data_rate, download_directory,
file_name)
defer.returnValue(lbry_file)
@defer.inlineCallbacks
def delete_lbry_file(self, lbry_file, delete_file=False):
if lbry_file not in self.lbry_files:
raise ValueError("Could not find that LBRY file")
def wait_for_finished(count=2):
if count <= 0 or lbry_file.saving_status is False:
return True
else:
return task.deferLater(reactor, 1, wait_for_finished, count=count - 1)
full_path = os.path.join(lbry_file.download_directory, lbry_file.file_name)
try:
yield lbry_file.stop()
except (AlreadyStoppedError, CurrentlyStoppingError):
yield wait_for_finished()
self.lbry_files.remove(lbry_file)
yield self._delete_lbry_file_options(lbry_file.rowid)
yield lbry_file.delete_data()
# TODO: delete this
# get count for stream hash returns the count of the lbry files with the stream hash
# in the lbry_file_options table, which will soon be removed.
stream_count = yield self.get_count_for_stream_hash(lbry_file.stream_hash)
if stream_count == 0:
yield self.stream_info_manager.delete_stream(lbry_file.stream_hash)
else:
msg = ("Can't delete stream info for %s, count is %i\n"
"The call that resulted in this warning will\n"
"be removed in the database refactor")
log.warning(msg, lbry_file.stream_hash, stream_count)
if delete_file and os.path.isfile(full_path):
os.remove(full_path)
defer.returnValue(True)
def toggle_lbry_file_running(self, lbry_file):
"""Toggle whether a stream reader is currently running"""
for l in self.lbry_files:
if l == lbry_file:
return l.toggle_running()
return defer.fail(Failure(ValueError("Could not find that LBRY file")))
def _reflect_lbry_files(self):
for lbry_file in self.lbry_files:
yield reflect_stream(lbry_file)
@defer.inlineCallbacks
def reflect_lbry_files(self):
yield defer.DeferredList(list(self._reflect_lbry_files()))
@defer.inlineCallbacks
def stop(self):
safe_stop_looping_call(self.lbry_file_reflector)
yield defer.DeferredList(list(self._stop_lbry_files()))
if self.sql_db:
yield self.sql_db.close()
self.sql_db = None
log.info("Stopped %s", self)
defer.returnValue(True)
def get_count_for_stream_hash(self, stream_hash):
return self._get_count_for_stream_hash(stream_hash)
######### database calls #########
def _open_db(self):
# check_same_thread=False is solely to quiet a spurious error that appears to be due
# to a bug in twisted, where the connection is closed by a different thread than the
# one that opened it. The individual connections in the pool are not used in multiple
# threads.
self.sql_db = adbapi.ConnectionPool(
"sqlite3",
os.path.join(self.session.db_dir, "lbryfile_info.db"),
check_same_thread=False
)
return self.sql_db.runQuery(
"create table if not exists lbry_file_options (" +
" blob_data_rate real, " +
" status text," +
" stream_hash text,"
" foreign key(stream_hash) references lbry_files(stream_hash)" +
")"
)
@rerun_if_locked
def _save_lbry_file(self, stream_hash, data_payment_rate):
def do_save(db_transaction):
row = (data_payment_rate, ManagedEncryptedFileDownloader.STATUS_STOPPED, stream_hash)
db_transaction.execute("insert into lbry_file_options values (?, ?, ?)", row)
return db_transaction.lastrowid
return self.sql_db.runInteraction(do_save)
@rerun_if_locked
def _delete_lbry_file_options(self, rowid):
return self.sql_db.runQuery("delete from lbry_file_options where rowid = ?",
(rowid,))
@rerun_if_locked
def _set_lbry_file_payment_rate(self, rowid, new_rate):
return self.sql_db.runQuery(
"update lbry_file_options set blob_data_rate = ? where rowid = ?",
(new_rate, rowid))
@rerun_if_locked
def _get_all_lbry_files(self):
d = self.sql_db.runQuery("select rowid, stream_hash, blob_data_rate from lbry_file_options")
return d
@rerun_if_locked
def _change_file_status(self, rowid, new_status):
return self.sql_db.runQuery("update lbry_file_options set status = ? where rowid = ?",
(new_status, rowid))
@rerun_if_locked
def _get_lbry_file_status(self, rowid):
d = self.sql_db.runQuery("select status from lbry_file_options where rowid = ?",
(rowid,))
d.addCallback(lambda r: (r[0][0] if len(r) else None))
return d
@rerun_if_locked
def _get_count_for_stream_hash(self, stream_hash):
d = self.sql_db.runQuery("select count(*) from lbry_file_options where stream_hash = ?",
(stream_hash,))
d.addCallback(lambda r: (r[0][0] if r else 0))
return d
@rerun_if_locked
def _get_rowid_for_stream_hash(self, stream_hash):
d = self.sql_db.runQuery("select rowid from lbry_file_options where stream_hash = ?",
(stream_hash,))
d.addCallback(lambda r: (r[0][0] if len(r) else None))
return d
| 40.18806
| 100
| 0.672733
| 1,695
| 13,463
| 5.021829
| 0.157522
| 0.078008
| 0.014803
| 0.015977
| 0.37453
| 0.239074
| 0.140625
| 0.104558
| 0.086466
| 0.080122
| 0
| 0.00199
| 0.253584
| 13,463
| 334
| 101
| 40.308383
| 0.845059
| 0.063507
| 0
| 0.18677
| 0
| 0
| 0.086035
| 0.003505
| 0
| 0
| 0
| 0.002994
| 0
| 1
| 0.14786
| false
| 0.003891
| 0.054475
| 0.031128
| 0.291829
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
914ea6fbc1fedc5c88691906b2f1c1f56a6d040c
| 5,907
|
py
|
Python
|
fhir/immunizations_demo/models/trainer/model.py
|
kourtneyshort/healthcare
|
1d1e2375304ac99f43a8b6aee7374fcdf641eb6f
|
[
"Apache-2.0"
] | null | null | null |
fhir/immunizations_demo/models/trainer/model.py
|
kourtneyshort/healthcare
|
1d1e2375304ac99f43a8b6aee7374fcdf641eb6f
|
[
"Apache-2.0"
] | 22
|
2019-12-16T22:18:37.000Z
|
2022-03-12T00:04:43.000Z
|
fhir/immunizations_demo/models/trainer/model.py
|
kourtneyshort/healthcare
|
1d1e2375304ac99f43a8b6aee7374fcdf641eb6f
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""A simple logistics regression model for immunization prediction.
The following features are used in this model:
1. age of the patient
2. gender of the patient
3. country the patient is visiting
4. expected duration of stay
5. disease
We are predicting the possibility of the patient getting a disease.
Note that this model is part of an end-to-end demo which shows how
to leverage the Google Cloud Healthcare APIs (FHIR APIs specifically)
to finish data analysis and machine learning tasks. This problem
itself is not a natural machine learning task.
"""
import tensorflow as tf
from functools import reduce
# Input data specific flags.
tf.flags.DEFINE_string("training_data", default=None,
help="Path to training data. This should be a GCS path.")
tf.flags.DEFINE_string("eval_data", default=None,
help="Path to evaluation data. This should be a GCS path.")
# Model specific flags. See more details here:
# https://www.tensorflow.org/api_docs/python/tf/estimator/LinearClassifier
tf.flags.DEFINE_string("model_dir", default=None,
help="Estimator model_dir.")
tf.flags.DEFINE_string("export_model_dir", default=None,
help="Folder to export trained model.")
tf.flags.DEFINE_integer("batch_size", default=96,
help="Mini-batch size for the training.")
tf.flags.DEFINE_integer("training_steps", default=1000,
help="Total number of training steps.")
tf.flags.DEFINE_integer("eval_steps", default=100,
help="Total number of evaluation steps.")
tf.flags.DEFINE_integer("n_classes", default=2,
help="Number of categories to classify to.")
# More advanced flags that controls the behavior of FTRL optimizer.
# See more details here:
# https://www.tensorflow.org/api_docs/python/tf/train/FtrlOptimizer
tf.flags.DEFINE_float("learning_rate", default=0.01,
help="Learning rate")
tf.flags.DEFINE_float("l1_regularization_strength", default=0.005,
help="L1 regularization strength for FTRL optimizer.")
tf.flags.DEFINE_float("l2_regularization_strength", default=0.001,
help="L2 regularization strength for FTRL optimizer.")
FLAGS = tf.flags.FLAGS
# Feature and label keys.
FEATURE_KEYS = ['age', 'gender', 'country', 'duration', 'disease']
LABEL_KEYS = ['risk']
DS_BUFFER_SIZE = 50000
def build_input_fn(filename):
"""Builds the input funciton for training/evaluation.
Args:
filename (string): The path of the file that contains features and
labels. This can be a Google Cloud Storage path (e.g. gs://...).
"""
def input_fn():
"""Input function to be used by the classifier."""
def parse(serialized_example):
"""Parses a single tensorflow example."""
def parse_feature(features, key):
features[key] = tf.FixedLenFeature([], tf.int64)
return features
data = tf.parse_single_example(serialized_example,
features=reduce(parse_feature, FEATURE_KEYS + LABEL_KEYS, {}))
features = [tf.convert_to_tensor(tf.cast(data[key], tf.int32))
for key in FEATURE_KEYS]
labels = [tf.convert_to_tensor(tf.cast(data[key], tf.int32))
for key in LABEL_KEYS]
return features, labels
dataset = tf.data.TFRecordDataset(filename, buffer_size=DS_BUFFER_SIZE)
dataset = dataset.map(parse).cache().repeat()
dataset = dataset.batch(FLAGS.batch_size)
features, labels = dataset.make_one_shot_iterator().get_next()
# Slice features into a dictionary which is expected by the classifier.
features = tf.transpose(features)
def map_feature(dict, idx):
"""Maps individual features into a dictionary."""
dict[FEATURE_KEYS[idx]] = tf.transpose(
tf.nn.embedding_lookup(features, [idx]))
return dict
return reduce(map_feature, list(range(len(FEATURE_KEYS))), {}), labels
return input_fn
def build_serving_input_receiver_fn():
"""Builds a serving_input_receiver_fn which takes JSON as input."""
def serving_input_receiver_fn():
def add_input(inputs, feature):
inputs[feature] = tf.placeholder(shape=[None], dtype=tf.int32)
return inputs
inputs = reduce(add_input, FEATURE_KEYS, {})
return tf.estimator.export.ServingInputReceiver(inputs, inputs)
return serving_input_receiver_fn
def main(_):
# All features have been converted to integer representation beforehand.
feature_columns = [tf.feature_column.numeric_column(key=key, dtype=tf.int32)
for key in FEATURE_KEYS]
classifier = tf.estimator.LinearClassifier(
feature_columns=feature_columns,
model_dir=FLAGS.model_dir,
n_classes=FLAGS.n_classes,
optimizer=tf.train.FtrlOptimizer(
learning_rate=FLAGS.learning_rate,
l1_regularization_strength=FLAGS.l1_regularization_strength,
l2_regularization_strength=FLAGS.l2_regularization_strength),
config=tf.estimator.RunConfig(keep_checkpoint_max=1))
# Training.
classifier.train(
input_fn=build_input_fn(FLAGS.training_data),
steps=FLAGS.training_steps)
# Evaluation.
classifier.evaluate(
input_fn=build_input_fn(FLAGS.eval_data),
steps=FLAGS.eval_steps)
# Export SavedModel.
if FLAGS.export_model_dir is not None:
classifier.export_saved_model(
FLAGS.export_model_dir,
build_serving_input_receiver_fn())
if __name__ == '__main__':
# Set logging level to INFO.
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run()
| 34.54386
| 78
| 0.744033
| 838
| 5,907
| 5.088305
| 0.332936
| 0.0197
| 0.033537
| 0.025797
| 0.154081
| 0.089353
| 0.06637
| 0.046435
| 0.046435
| 0.046435
| 0
| 0.011838
| 0.156255
| 5,907
| 170
| 79
| 34.747059
| 0.8437
| 0.344507
| 0
| 0.023256
| 0
| 0
| 0.154311
| 0.01367
| 0
| 0
| 0
| 0
| 0
| 1
| 0.104651
| false
| 0
| 0.023256
| 0
| 0.22093
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91504bbaea6d8835c5bee931052df81b48164c98
| 8,305
|
py
|
Python
|
src/ychaos/core/verification/controller.py
|
sushilkar/ychaos
|
6801390f0faf553789e3384440a72a0705310738
|
[
"Apache-2.0"
] | null | null | null |
src/ychaos/core/verification/controller.py
|
sushilkar/ychaos
|
6801390f0faf553789e3384440a72a0705310738
|
[
"Apache-2.0"
] | null | null | null |
src/ychaos/core/verification/controller.py
|
sushilkar/ychaos
|
6801390f0faf553789e3384440a72a0705310738
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021, Yahoo
# Licensed under the terms of the Apache 2.0 license. See the LICENSE file in the project root for terms
import time
from typing import Dict, List, Optional, Type
from pydantic import validate_arguments
from ...app_logger import AppLogger
from ...testplan import SystemState
from ...testplan.schema import TestPlan
from ...testplan.verification import VerificationConfig, VerificationType
from ...utils.hooks import EventHook
from ...utils.yaml import Dumper
from .data import VerificationData, VerificationStateData
from .plugins.BaseVerificationPlugin import BaseVerificationPlugin
from .plugins.HTTPRequestVerificationPlugin import (
HTTPRequestVerificationPlugin,
)
from .plugins.PythonModuleVerificationPlugin import (
PythonModuleVerificationPlugin,
)
from .plugins.SDv4VerificationPlugin import SDv4VerificationPlugin
# Enum value to corresponding Plugin Map
VERIFICATION_PLUGIN_MAP: Dict[str, Type[BaseVerificationPlugin]] = {
"python_module": PythonModuleVerificationPlugin,
"http_request": HTTPRequestVerificationPlugin,
"sdv4": SDv4VerificationPlugin,
}
class VerificationController(EventHook):
"""
Verification controller is used to run all the verification plugins configured in the testplan
and assert that the system is expected to be in a state expected by the user. Extends the EventHook class,
that defines the following event hooks.
## Valid Hooks
=== "on_start"
Hook that gets called when the verification execution is about to start.
No arguments are passed to the callable.
```python
def callable_hook(): ...
```
=== "on_each_plugin_start"
Hook that gets called when a particular plugin execution is about to start. `index` in the signature refers
to the position in the list
```python
def callable_hook(index: int, config: VerificationConfig): ...
```
References:
1. [VerificationConfig][ychaos.testplan.verification.VerificationConfig]
=== "on_each_plugin_end"
Hook that gets called when a particular plugin execution has ended. `index` in the signature refers to the
position in the list
```python
def callable_hook(index: int, config: VerificationConfig, state_data: VerificationStateData): ...
```
References:
1. [VerificationConfig][ychaos.testplan.verification.VerificationConfig]
2. [VerificationStateData][ychaos.core.verification.data.VerificationStateData]
=== "on_end"
Hook that gets called when the verification execution has ended. Each element in the list
of boolean corresponds to the result of the plugin, where `True` indicates successful verification
and `False` is a failure to verify the state
```python
def callable_hook(verify_list: List[bool]): ...
```
=== "on_plugin_not_found"
Hook that gets called when a plugin available in schema is not ready for usage/not implemented.
This case is possible for the plugins that are in Beta/development phase
```python
def callable_hook(index:int, plugin_type: VerificationType): ...
```
---
Each of the hooks get called on a certain event. The caller can register as many hooks for a particular event,
by calling the `register_hook(event_name, hook_method)` method. All the hooks are executed sequentially. The best example
of this is to register a hook to print information on CLI.
"""
__hook_events__ = {
"on_start": EventHook.CallableType(),
"on_each_plugin_start": EventHook.CallableType(int, VerificationConfig),
"on_each_plugin_end": EventHook.CallableType(
int, VerificationConfig, VerificationStateData
),
"on_plugin_not_found": EventHook.CallableType(int, VerificationType),
"on_end": EventHook.CallableType(List[bool]),
}
@validate_arguments
def __init__(
self,
testplan: TestPlan,
current_state: SystemState,
verification_data: List[Dict[SystemState, Optional[VerificationStateData]]],
):
"""
Initialize a verification controller object.
Args:
testplan: A valid testplan object
current_state: The state in which the system is expected to be in
verification_data (List[VerificationData]): The verification data probably from previous run.
"""
super(VerificationController, self).__init__()
self.logger = AppLogger.get_logger(self.__class__.__name__)
self.logger.bind(event="controller")
self.testplan = testplan
self.current_state = current_state
if not verification_data:
verification_data = [
dict(),
] * len(self.testplan.verification)
elif len(verification_data) != len(self.testplan.verification):
raise ValueError("Data and verification config size mismatch")
self.verification_data = list()
for data in verification_data:
self.verification_data.append(VerificationData.parse_obj(data))
def execute(self) -> bool:
"""
Execute the Verification controller.
Returns:
True if all the verification plugin pass, False otherwise
"""
# Call all the hooks that were registered for `verification_start`
# If there were no hooks registered, this will be no-op
self.execute_hooks("on_start")
_verify_list = list()
for index, (verification_plugin, data) in enumerate(
zip(self.testplan.verification, self.verification_data)
):
# Delay before verifying
time.sleep(verification_plugin.delay_before)
assert isinstance(verification_plugin.states, List) # For mypy
if self.current_state in verification_plugin.states:
self.logger.info(
msg=f"Starting {verification_plugin.type.value} verification"
)
plugin_class = VERIFICATION_PLUGIN_MAP.get(
verification_plugin.type.value, None
)
if plugin_class is None:
# This can happen when a new plugin is not implemented yet, but is
# available in the schema
self.execute_hooks(
"on_plugin_not_found", index, verification_plugin.type
)
continue
plugin = plugin_class(verification_plugin.config, data)
# Call all the hooks that were registered for `verification_plugin_start`.
self.execute_hooks("on_each_plugin_start", index, verification_plugin)
state_data = plugin.run_verification()
self.logger.info(
msg=f"Completed {verification_plugin.type.value} verification"
)
# Call all the hooks that were registered for `verification_plugin_end`.
self.execute_hooks(
"on_each_plugin_end", index, verification_plugin, state_data
)
data.replace_data(self.current_state, state_data)
if verification_plugin.strict:
_verify_list.append(state_data.rc == 0)
else:
data.add_data(self.current_state, None)
# Delay after verifying
time.sleep(verification_plugin.delay_after)
# Call all the hooks that were registered for `verification_end`.
self.execute_hooks("on_end", _verify_list)
return all(_verify_list)
def get_encoded_verification_data(self):
return [data.encoded_dict() for data in self.verification_data]
def dump_verification_json(self, fp):
import json
json.dump(self.get_encoded_verification_data(), fp=fp, indent=4)
def dump_verification_yaml(self, fp):
import yaml
yaml.dump(
self.get_encoded_verification_data(),
fp,
default_flow_style=False,
sort_keys=False,
Dumper=Dumper,
indent=4,
)
| 37.40991
| 125
| 0.656352
| 906
| 8,305
| 5.846578
| 0.249448
| 0.064565
| 0.013593
| 0.016991
| 0.253162
| 0.201246
| 0.162356
| 0.111761
| 0.094393
| 0.058146
| 0
| 0.002651
| 0.273329
| 8,305
| 221
| 126
| 37.579186
| 0.875062
| 0.389043
| 0
| 0.056604
| 0
| 0
| 0.069456
| 0.013389
| 0
| 0
| 0
| 0
| 0.009434
| 1
| 0.04717
| false
| 0
| 0.150943
| 0.009434
| 0.235849
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
915138c1e205dea19655e55c824d89b847b800d5
| 6,160
|
py
|
Python
|
labgraph/graphs/node_test_harness.py
|
Yunusbcr/labgraph
|
a00ae7098b7b0e0eda8ce2e7e62dae86854616fb
|
[
"MIT"
] | 124
|
2021-07-14T21:25:59.000Z
|
2022-03-08T20:40:16.000Z
|
labgraph/graphs/node_test_harness.py
|
Yunusbcr/labgraph
|
a00ae7098b7b0e0eda8ce2e7e62dae86854616fb
|
[
"MIT"
] | 46
|
2021-07-16T18:41:11.000Z
|
2022-03-31T20:53:00.000Z
|
labgraph/graphs/node_test_harness.py
|
Yunusbcr/labgraph
|
a00ae7098b7b0e0eda8ce2e7e62dae86854616fb
|
[
"MIT"
] | 22
|
2021-07-16T18:34:56.000Z
|
2022-03-31T15:12:06.000Z
|
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import asyncio
import functools
import inspect
from contextlib import contextmanager
from typing import (
Any,
AsyncIterable,
Awaitable,
Callable,
Generic,
Iterator,
List,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from ..messages.message import Message
from ..util.testing import get_event_loop
from .config import Config
from .method import AsyncPublisher
from .node import Node
from .state import State
from .topic import Topic
N = TypeVar("N", bound=Node) # Node type
T = TypeVar("T", bound=Tuple[Topic, Message]) # Type yielded by async functions
class NodeTestHarness(Generic[N]):
"""
Utility class for testing Labgraph nodes. This allows a user to test some behavior
of a node in an asyncio event loop, with the harness taking care of setting up and
cleaning up the node.
Args:
node_type: The type of node this harness will test.
"""
def __init__(self, node_type: Type[N]) -> None:
self.node_type: Type[N] = node_type
@contextmanager
def get_node(
self, config: Optional[Config] = None, state: Optional[State] = None
) -> Iterator[N]:
"""
Context manager to create, configure and yield a node of specified type.
Node is cleaned up when the context manager exits.
Args:
config: The configuration to set on the node, if provided.
state: The state to set on the Node, if provided.
"""
node = None
try:
node = self.node_type(config=config, state=state)
node.setup()
yield node
finally:
if node is not None:
node.cleanup()
@overload
def run_with_harness(
node_type: Type[N],
fn: Callable[[N], AsyncIterable[T]],
config: Optional[Config],
state: Optional[State],
max_num_results: Optional[int] = None,
) -> List[T]:
...
@overload
def run_with_harness(
node_type: Type[N],
fn: Callable[[N], Awaitable[T]],
config: Optional[Config],
state: Optional[State],
) -> T:
...
def run_with_harness(node_type, fn, config=None, state=None, max_num_results=None):
"""
Runs an async function on a new node of the provided type using `NodeTestHarness`.
Args:
node_type: The type of node to create.
fn:
The async function to run. An instance of a node typed `node_type` will be
provided to the function as an argument.
config: The configuration to set on the node, if provided.
state: The state to set on the node, if provided.
max_num_results:
If `fn` is an async generator, the maximum number of results it will generate.
If this is `None`, then the generator can produce an unbounded number of
results.
"""
# Check whether the max_num_results argument was improperly provided
_check_max_num_results_arg(run_with_harness.__name__, fn, max_num_results)
test_harness = NodeTestHarness(node_type=node_type)
with test_harness.get_node(config=config, state=state) as node:
return run_async(fn, args=[node], max_num_results=max_num_results)
@overload
def run_async(
fn: Callable[..., Awaitable[T]],
args: Optional[Sequence[Any]] = None,
kwargs: Optional[Mapping[str, Any]] = None,
) -> T:
...
@overload
def run_async(
fn: Callable[..., AsyncIterable[T]],
args: Optional[Sequence[Any]] = None,
kwargs: Optional[Mapping[str, Any]] = None,
max_num_results: Optional[int] = None,
) -> List[T]:
...
def run_async(fn, args=None, kwargs=None, max_num_results=None):
"""
Runs an async function to completion. Uses the current thread's event loop. Blocks
until the async function has finished executing. Forwards all arguments after `fn`
to the async function.
Args:
fn: The async function to run.
args: Positional arguments to forward to the function.
kwargs: Keyword arguments to forward to the function.
max_num_results:
If `fn` is an async generator, the maximum number of results it will generate.
If this is `None`, then the generator can produce an unbounded number of
results.
"""
# Check whether the max_num_results argument was improperly provided
_check_max_num_results_arg(run_async.__name__, fn, max_num_results)
# Unwrap functools.partial so we can check whether it is async
if isinstance(fn, functools.partial):
test_fn = fn.func
else:
test_fn = fn
if inspect.isasyncgenfunction(test_fn):
return get_event_loop().run_until_complete(
_async_generator_to_list(
fn=fn,
args=args or [],
kwargs=kwargs or {},
max_num_results=max_num_results,
)
)
elif asyncio.iscoroutinefunction(test_fn):
return get_event_loop().run_until_complete(fn(*(args or []), **(kwargs or {})))
else:
raise TypeError(f"{run_async.__name__}: function '{fn}' is not async")
def _check_max_num_results_arg(
called_fn_name: str,
fn: Union[Callable[..., Awaitable[Any]], Callable[..., AsyncIterable[Any]]],
max_num_results: Optional[int] = None,
) -> None:
if not inspect.isasyncgenfunction(fn) and max_num_results is not None:
raise TypeError(
f"{called_fn_name}: function '{fn}' is not an async generator but "
"max_num_results was provided"
)
async def _async_generator_to_list(
fn: Callable[..., AsyncIterable[T]],
args: Sequence[Any],
kwargs: Mapping[str, Any],
max_num_results: Optional[int] = None,
) -> List[T]:
if max_num_results is not None and max_num_results < 0:
raise ValueError("max_num_results must be non-negative")
result = []
async for retval in fn(*args, **kwargs):
result.append(retval)
if max_num_results is not None and len(result) >= max_num_results:
return result
return result
| 30.49505
| 88
| 0.65276
| 827
| 6,160
| 4.70133
| 0.218863
| 0.040123
| 0.086934
| 0.013374
| 0.456276
| 0.401235
| 0.333076
| 0.291409
| 0.252058
| 0.210905
| 0
| 0.001308
| 0.255195
| 6,160
| 201
| 89
| 30.646766
| 0.84612
| 0.316396
| 0
| 0.296
| 0
| 0
| 0.044955
| 0.005245
| 0
| 0
| 0
| 0
| 0
| 1
| 0.072
| false
| 0
| 0.096
| 0
| 0.216
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9151eafe84027e81a61010f1c158d9786b978a93
| 837
|
py
|
Python
|
pygamelearning/lrud.py
|
edward70/2021Computing
|
df8fb818480a6e23f2eac736744294871ec0e38c
|
[
"MIT"
] | null | null | null |
pygamelearning/lrud.py
|
edward70/2021Computing
|
df8fb818480a6e23f2eac736744294871ec0e38c
|
[
"MIT"
] | null | null | null |
pygamelearning/lrud.py
|
edward70/2021Computing
|
df8fb818480a6e23f2eac736744294871ec0e38c
|
[
"MIT"
] | null | null | null |
import pygame
import sys
pygame.init()
clock = pygame.time.Clock()
screen = pygame.display.set_mode([500, 500])
gameOn = True
x1 = 0
y1 = 100
x2 = 100
y2 = 0
while gameOn == True:
screen.fill([255,255,255])
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if x1 == 500:
moveRight = False
elif x1 == 0:
moveRight = True
if y2 == 500:
moveDown = False
elif y2 == 0:
moveDown = True
if moveRight:
x1 = x1+1
else:
x1 = x1-1
if moveDown:
y2 = y2+1
else:
y2 = y2-1
pygame.draw.circle(screen, [0,0,0], [x1,y1], 10)
pygame.draw.rect(screen, [0,0,0], [x2,y2,30,30])
clock.tick(100)
pygame.display.flip()
pygame.quit()
| 17.808511
| 52
| 0.520908
| 117
| 837
| 3.717949
| 0.367521
| 0.018391
| 0.022989
| 0.041379
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.127273
| 0.342891
| 837
| 46
| 53
| 18.195652
| 0.663636
| 0
| 0
| 0.108108
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.054054
| 0
| 0.054054
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91522a760e718a02b548df8a5987a17cb9ed54b7
| 3,198
|
py
|
Python
|
pytorch/xor/training_a_perceptron.py
|
e93fem/PyTorchNLPBook
|
c9ea9e0b3d1b8bba6a983b425c6c03dd79d3d6b0
|
[
"Apache-2.0"
] | null | null | null |
pytorch/xor/training_a_perceptron.py
|
e93fem/PyTorchNLPBook
|
c9ea9e0b3d1b8bba6a983b425c6c03dd79d3d6b0
|
[
"Apache-2.0"
] | null | null | null |
pytorch/xor/training_a_perceptron.py
|
e93fem/PyTorchNLPBook
|
c9ea9e0b3d1b8bba6a983b425c6c03dd79d3d6b0
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import torch
import matplotlib.pyplot as plt
from torch import optim, nn
from pytorch.xor.multilayer_perceptron import MultilayerPerceptron
from pytorch.xor.utils import LABELS, get_toy_data, visualize_results, plot_intermediate_representations
input_size = 2
output_size = len(set(LABELS))
num_hidden_layers = 0
hidden_size = 2 # isn't ever used but we still set it
seed = 24
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
mlp1 = MultilayerPerceptron(input_size=input_size,
hidden_size=hidden_size,
num_hidden_layers=num_hidden_layers,
output_size=output_size)
print(mlp1)
batch_size = 1000
x_data_static, y_truth_static = get_toy_data(batch_size)
fig, ax = plt.subplots(1, 1, figsize=(10,5))
visualize_results(mlp1, x_data_static, y_truth_static,
ax=ax, title='Initial Perceptron State', levels=[0.5])
plt.axis('off')
plt.savefig('images/perceptron_initial.png')
plt.show()
losses = []
batch_size = 10000
n_batches = 10
max_epochs = 10
loss_change = 1.0
last_loss = 10.0
change_threshold = 1e-3
epoch = 0
all_imagefiles = []
lr = 0.01
optimizer = optim.Adam(params=mlp1.parameters(), lr=lr)
cross_ent_loss = nn.CrossEntropyLoss()
def early_termination(loss_change, change_threshold, epoch, max_epochs):
terminate_for_loss_change = loss_change < change_threshold
terminate_for_epochs = epoch > max_epochs
# return terminate_for_loss_change or
return terminate_for_epochs
while not early_termination(loss_change, change_threshold, epoch, max_epochs):
for _ in range(n_batches):
# step 0: fetch the data
x_data, y_target = get_toy_data(batch_size)
# step 1: zero the gradients
mlp1.zero_grad()
# step 2: run the forward pass
y_pred = mlp1(x_data).squeeze()
# step 3: compute the loss
loss = cross_ent_loss(y_pred, y_target.long())
# step 4: compute the backward pass
loss.backward()
# step 5: have the optimizer take an optimization step
optimizer.step()
# auxillary: bookkeeping
loss_value = loss.item()
losses.append(loss_value)
loss_change = abs(last_loss - loss_value)
last_loss = loss_value
print("epoch: {}: loss_value: {}".format(epoch, loss_value))
fig, ax = plt.subplots(1, 1, figsize=(10, 5))
visualize_results(mlp1, x_data_static, y_truth_static, ax=ax, epoch=epoch,
title=f"{loss_value:0.2f}; {loss_change:0.4f}")
plt.axis('off')
epoch += 1
all_imagefiles.append(f'images/perceptron_epoch{epoch}_toylearning.png')
plt.savefig(all_imagefiles[-1])
_, ax = plt.subplots(1,1,figsize=(10,5))
visualize_results(mlp1, x_data_static, y_truth_static, epoch=None, levels=[0.5], ax=ax)
plt.axis('off');
plt.savefig('images/perceptron_final.png')
plot_intermediate_representations(mlp1,
"The Perceptron's Input and Intermediate Representation",
figsize=(9, 3))
plt.savefig("images/perceptron_intermediate.png")
plt.savefig("images/figure_4_5.pdf")
| 30.169811
| 104
| 0.688555
| 451
| 3,198
| 4.629712
| 0.32816
| 0.038314
| 0.021073
| 0.022989
| 0.220785
| 0.202586
| 0.191571
| 0.157088
| 0.157088
| 0.104406
| 0
| 0.028899
| 0.210131
| 3,198
| 106
| 105
| 30.169811
| 0.797704
| 0.089118
| 0
| 0.055556
| 0
| 0
| 0.105372
| 0.054063
| 0
| 0
| 0
| 0
| 0
| 1
| 0.013889
| false
| 0
| 0.083333
| 0
| 0.111111
| 0.027778
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91539993c3d566be3d6ad8bdfd6ab2f85574f003
| 8,157
|
py
|
Python
|
mysite/api/v0/tests.py
|
raccoongang/socraticqs2
|
06201005136ee139846f857dbb2f518736e441de
|
[
"Apache-2.0"
] | 3
|
2015-11-20T07:33:28.000Z
|
2017-01-15T23:33:50.000Z
|
mysite/api/v0/tests.py
|
raccoongang/socraticqs2
|
06201005136ee139846f857dbb2f518736e441de
|
[
"Apache-2.0"
] | 28
|
2015-07-14T11:33:24.000Z
|
2017-11-17T15:21:22.000Z
|
mysite/api/v0/tests.py
|
raccoongang/socraticqs2
|
06201005136ee139846f857dbb2f518736e441de
|
[
"Apache-2.0"
] | 4
|
2015-04-29T09:04:59.000Z
|
2017-07-19T14:11:16.000Z
|
import json
import mock
from django.core.urlresolvers import reverse
from pymongo.errors import ServerSelectionTimeoutError
from analytics.models import CourseReport
from core.common.mongo import c_onboarding_status, _conn
from core.common import onboarding
from ct.models import UnitLesson, StudentError
from ctms.tests import MyTestCase
HEALTH_URL = reverse('api:v0:health-check')
def test_health_positive(client, db):
result = client.get(HEALTH_URL)
assert result.status_code == 200
assert 'ok' in json.loads(result.content)
def test_health_non_ok(client, db, mocker):
"""
Ping and Stats Mongo command return non ok results.
"""
do_health = mocker.patch('api.v0.views.do_health')
do_health.return_value = {}, {}
result = client.get(HEALTH_URL)
assert result.status_code == 503
def test_health_exception(client, db, mocker):
"""
Mongo query raises exception.
"""
do_health = mocker.patch('api.v0.views.do_health')
do_health.side_effect = ServerSelectionTimeoutError()
result = client.get(HEALTH_URL)
assert result.status_code == 503
class TestOnboardingStatus(MyTestCase):
namespace = 'api:v0:onboarding-status'
def setUp(self):
super(TestOnboardingStatus, self).setUp()
# # Hack: remove all test_ databases before test
# for db in _conn.connector.list_databases():
# if 'test_' in db.get('name') and:
# _conn.connector.drop_database(db.get('name'))
self.data = {
onboarding.USER_ID: self.user.id,
onboarding.STEP_1: False,
onboarding.STEP_2: False,
onboarding.STEP_3: False,
onboarding.STEP_4: False,
}
def test_put_valid_data(self):
data_to_update = {onboarding.STEP_2: True}
c_onboarding_status().remove()
c_onboarding_status().insert(self.data.copy())
ensure_saved = c_onboarding_status().find_one({onboarding.USER_ID: self.user.id}, {'_id': False})
self.assertEqual(ensure_saved, self.data)
self.assertEqual(self.client.login(username=self.username, password=self.password), True)
response = self.client.put(
reverse('api:v0:onboarding-status'),
data=json.dumps(data_to_update),
content_type="application/json"
)
data = self.data.copy()
self.assertEqual(response.status_code, 200)
data.update(data_to_update)
mongo_data = c_onboarding_status().find_one({onboarding.USER_ID: self.user.id}, {'_id': False})
self.assertEqual(mongo_data, data)
def test_put_invalid_keys(self):
data_to_update = {'invalid_key': True}
c_onboarding_status().remove()
c_onboarding_status().insert(self.data.copy())
ensure_saved = c_onboarding_status().find_one({onboarding.USER_ID: self.user.id}, {'_id': False})
self.assertEqual(ensure_saved, self.data)
response = self.client.put(
reverse('api:v0:onboarding-status'),
data=json.dumps(data_to_update),
content_type="application/json"
)
self.assertEqual(response.status_code, 400)
def test_wo_user_403(self):
c_onboarding_status().remove()
self.client.logout()
response = self.client.get(reverse(self.namespace))
self.assertEqual(response.status_code, 403)
def test_get_with_user_200(self):
c_onboarding_status().remove()
c_onboarding_status().insert(self.data.copy())
response = self.client.get(reverse(self.namespace))
expected_data = {
"done": True,
}
response_data = json.loads(response.content)['data']
for key in response_data.keys():
self.assertSetEqual(set(expected_data), set(response_data[key]))
class ApiAccessMixinTest(object):
def test_permissions_instructor_allowed(self):
response = self.client.get(reverse(self.namespace, kwargs={'course_id': self.course.id}))
self.assertEqual(response.status_code, 200)
def test_permissions_not_instructor_disallowed(self):
self.client.login(username=self.username2, password=self.password2)
response = self.client.get(reverse(self.namespace, kwargs={'course_id': self.course.id}))
self.assertEqual(response.status_code, 403)
def test_permissions_user_not_authenticated(self):
self.client.logout()
response = self.client.get(reverse(self.namespace, kwargs={'course_id': self.course.id}))
self.assertEqual(response.status_code, 403)
def test_course_doesnt_exist(self):
response = self.client.get(reverse(self.namespace, kwargs={'course_id': 100}))
self.assertEqual(response.status_code, 404)
class TestResponseViewSet(ApiAccessMixinTest, MyTestCase):
namespace = 'api:v0:responses'
def test_serializer_author_name(self):
response = self.client.get(reverse(self.namespace, kwargs={'course_id': self.course.id}))
self.assertEqual(
json.loads(response.content)[0].get('author_name'),
self.user.get_full_name() or self.user.username
)
class TestErrorViewSet(ApiAccessMixinTest, MyTestCase):
namespace = 'api:v0:errors'
def setUp(self):
super(TestErrorViewSet, self).setUp()
self.unit_lesson_error = UnitLesson(
unit=self.unit, order=0,
lesson=self.lesson, addedBy=self.user,
treeID=self.lesson.id
)
self.unit_lesson_error.save()
self.student_error = StudentError(
response=self.resp1,
errorModel=self.unit_lesson_error,
author=self.user
)
self.student_error.save()
def test_serializer_em_data(self):
response = self.client.get(reverse(self.namespace, kwargs={'course_id': self.course.id}))
fields_set = set([
'id', 'lesson_concept_id', 'lesson_concept_isAbort', 'lesson_concept_isFail', 'lesson_text', 'treeID'
])
em_data_set = set(json.loads(response.content)[0]['em_data'])
self.assertSetEqual(fields_set, em_data_set)
class TestGenReportView(MyTestCase):
namespace = 'api:v0:gen-report'
def test_missed_course_id(self):
response = self.client.get(reverse(self.namespace))
self.assertEqual(response.status_code, 400)
def test_course_doesnt_exist(self):
response = self.client.get(reverse(self.namespace), data={'course_id': 100})
self.assertEqual(response.status_code, 404)
def test_not_allowed(self):
self.client.login(username=self.username2, password=self.password2)
response = self.client.get(reverse(self.namespace), data={'course_id': self.course.id})
self.assertEqual(response.status_code, 403)
@mock.patch('api.v0.views.report.delay')
def test_report_generated(self, report):
response = self.client.get(reverse(self.namespace), data={'course_id': self.course.id})
self.assertEqual(response.status_code, 200)
report.assert_called_with(str(self.course.id), self.user.id)
class TestCourseReportViewSet(ApiAccessMixinTest, MyTestCase):
namespace = 'api:v0:reports'
def test_serializer_data(self):
report = CourseReport(
course=self.course
)
report.save()
response = self.client.get(reverse(self.namespace, kwargs={'course_id': self.course.id}))
fields_set = {'date', 'response_report'}
data_set = set(json.loads(response.content)[0])
self.assertSetEqual(fields_set, data_set)
class TestEchoDataView(MyTestCase):
namespace = 'api:v0:echo-data'
def test_echo_405(self):
get_response = self.client.get(reverse(self.namespace))
self.assertEqual(get_response.status_code, 405)
def test_echo_200(self):
post_response = self.client.post(reverse(self.namespace))
self.assertEqual(post_response.status_code, 200)
self.client.logout()
post_response = self.client.post(reverse(self.namespace))
self.assertEqual(post_response.status_code, 200)
| 32.369048
| 113
| 0.674635
| 991
| 8,157
| 5.350151
| 0.177598
| 0.045266
| 0.061109
| 0.055451
| 0.541305
| 0.501886
| 0.490192
| 0.482459
| 0.45794
| 0.423991
| 0
| 0.014697
| 0.207552
| 8,157
| 251
| 114
| 32.498008
| 0.805538
| 0.032365
| 0
| 0.371951
| 0
| 0
| 0.064138
| 0.023416
| 0
| 0
| 0
| 0
| 0.164634
| 1
| 0.134146
| false
| 0.018293
| 0.054878
| 0
| 0.268293
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9153c783ea6530b33a82747aab7d0a7d6aae69be
| 8,934
|
py
|
Python
|
signbank/settings/base.py
|
anthonymark33/Global-signbank
|
ae61984a24f1cc0801d4621c81b882154ce99098
|
[
"BSD-3-Clause"
] | null | null | null |
signbank/settings/base.py
|
anthonymark33/Global-signbank
|
ae61984a24f1cc0801d4621c81b882154ce99098
|
[
"BSD-3-Clause"
] | 2
|
2021-06-10T23:11:53.000Z
|
2021-12-13T20:44:56.000Z
|
signbank/settings/base.py
|
anthonymark33/Global-signbank
|
ae61984a24f1cc0801d4621c81b882154ce99098
|
[
"BSD-3-Clause"
] | null | null | null |
# Django settings for signbank project.
import os
from signbank.settings.server_specific import *
from datetime import datetime
DEBUG = True
PROJECT_DIR = os.path.dirname(BASE_DIR)
MANAGERS = ADMINS
TIME_ZONE = 'Europe/Amsterdam'
LOCALE_PATHS = [BASE_DIR+'conf/locale']
# in the database, SITE_ID 1 is example.com
SITE_ID = 2
USE_I18N = True
USE_L10N = True
USE_TZ = True
MEDIA_ROOT = WRITABLE_FOLDER
MEDIA_URL = PREFIX_URL+'/media/'
MEDIA_MOBILE_URL = MEDIA_URL
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = PREFIX_URL
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = PREFIX_URL+'/static/'
# Additional locations of static files
STATICFILES_DIRS = (
os.path.join(PROJECT_DIR, "media"),
)
# STATICFILES_STORAGE = ( os.path.join(PROJECT_DIR, "static"), )
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '^g=q21r_nnmbz49d!vs*2gvpll-y9b@&t3k2r3c$*u&2la5!%s'
MIDDLEWARE_CLASSES = (
# 'debug_toolbar.middleware.DebugToolbarMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'signbank.pages.middleware.PageFallbackMiddleware',
# 'django_mobile.middleware.MobileDetectionMiddleware',
# 'django_mobile.middleware.SetFlavourMiddleware',
# 'debug_toolbar.middleware.DebugToolbarMiddleware',
'reversion.middleware.RevisionMiddleware',
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(PROJECT_DIR, 'templates/' + SIGNBANK_VERSION_CODE + '-templates'),
os.path.join(PROJECT_DIR, 'signbank/registration/templates/')],
'OPTIONS': {
'context_processors': [
"django.template.context_processors.debug",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"signbank.context_processors.url",
"signbank.pages.context_processors.menu",
# "django_mobile.context_processors.flavour",
],
'loaders': [
# 'django_mobile.loader.Loader',
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]
},
},
]
# add the Email backend to allow logins using email as username
AUTHENTICATION_BACKENDS = (
"signbank.registration.EmailBackend",
"django.contrib.auth.backends.ModelBackend",
'guardian.backends.ObjectPermissionBackend',
)
AUTH_PROFILE_MODULE = 'dictionary.UserProfile'
INTERNAL_IPS = ('127.0.0.1','131.174.132.138')
ROOT_URLCONF = 'signbank.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'signbank.wsgi.application'
INSTALLED_APPS = (
'modeltranslation',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.staticfiles',
'bootstrap3',
'django_summernote',
# 'django_select2',
# 'easy_select2',
'signbank.dictionary',
'signbank.feedback',
#'signbank.registration',
'signbank.pages',
'signbank.attachments',
'signbank.video',
'reversion',
#'django_mobile',
'tagging',
'guardian',
#'debug_toolbar'
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# turn on lots of logging or not
DO_LOGGING = False
LOG_FILENAME = "debug.log"
SOUTH_TESTS_MIGRATE = False
## Application settings for signbank
## Settings controlling page contents
# do we implement safe search for anonymous users?
# if True, any gloss that is tagged lexis:crude will be removed from
# search results for users who are not logged in
ANON_SAFE_SEARCH = False
# do we show the tag based search for anonymous users?
ANON_TAG_SEARCH = False
# do we display the previous/next links to signs, requires gloss.sn to be used consistently
SIGN_NAVIGATION = False
# which definition fields do we show and in what order?
DEFINITION_FIELDS = ['general', 'noun', 'verb', 'interact', 'deictic', 'modifier', 'question', 'augment', 'note']
HANDSHAPE_RESULT_FIELDS = ['machine_value', 'english_name', 'dutch_name', 'chinese_name',
'hsFingSel', 'hsFingConf', 'hsFingSel2', 'hsFingConf2', 'hsFingUnsel', 'hsSpread', 'hsAperture']
# location and URL for uploaded files
UPLOAD_ROOT = MEDIA_ROOT + "upload/"
UPLOAD_URL = MEDIA_URL + "upload/"
# Location for comment videos relative to MEDIA_ROOT
COMMENT_VIDEO_LOCATION = "comments"
# Location for videos associated with pages
PAGES_VIDEO_LOCATION = 'pages'
# location for upload of videos relative to MEDIA_ROOT
# videos are stored here prior to copying over to the main
# storage location
VIDEO_UPLOAD_LOCATION = "upload"
# path to store uploaded attachments relative to MEDIA_ROOT
ATTACHMENT_LOCATION = 'attachments'
# which fields from the Gloss model should be included in the quick update form on the sign view
QUICK_UPDATE_GLOSS_FIELDS = ['signlanguage', 'dialect']
# should we always require a login for viewing dictionary content
ALWAYS_REQUIRE_LOGIN = True
# do we allow people to register for the site
ALLOW_REGISTRATION = True
ACCOUNT_ACTIVATION_DAYS = 7
# show the number signs page or an under construction page?
SHOW_NUMBERSIGNS = True
LOGIN_URL = PREFIX_URL+'/accounts/login/'
LOGIN_REDIRECT_URL = PREFIX_URL+'/signs/recently_added/'
# location of ffmpeg, used to convert uploaded videos
# FFMPEG_PROGRAM = "/Applications/ffmpegX.app/Contents/Resources/ffmpeg"
FFMPEG_TIMEOUT = 60
FFMPEG_OPTIONS = ["-vcodec", "h264", "-an"]
# defines the aspect ratio for videos
VIDEO_ASPECT_RATIO = 3.0/4.0
# settings for django-tagging
FORCE_LOWERCASE_TAGS = False
PRIMARY_CSS = "css/"+SIGNBANK_VERSION_CODE+"/main.css"
import mimetypes
mimetypes.add_type("video/mp4", ".mov", True)
# a list of tags we're allowed to use
XALLOWED_TAGS = [ '',
'workflow:needs video',
'workflow:redo video',
'workflow:problematic',
'corpus:attested',
'lexis:doubtlex',
'phonology:alternating',
'phonology:dominant hand only',
'phonology:double handed',
'phonology:forearm rotation',
'phonology:handshape change',
'phonology:onehand',
'phonology:parallel',
'phonology:symmetrical',
'phonology:two handed',
]
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
EARLIEST_GLOSS_CREATION_DATE = datetime(2015,1,1)
SUPPORTED_CITATION_IMAGE_EXTENSIONS = ['.jpg','.jpeg','.png']
MAXIMUM_UPLOAD_SIZE = 5000000
MINIMUM_OVERLAP_BETWEEN_SIGNING_HANDS_IN_CNGT = 40
DISABLE_MOVING_THUMBNAILS_ABOVE_NR_OF_GLOSSES = 200
DATA_UPLOAD_MAX_NUMBER_FIELDS = None
DATA_UPLOAD_MAX_MEMORY_SIZE = None
| 31.020833
| 123
| 0.694426
| 1,015
| 8,934
| 5.95468
| 0.42266
| 0.036565
| 0.020847
| 0.030774
| 0.041363
| 0.007942
| 0
| 0
| 0
| 0
| 0
| 0.010481
| 0.199015
| 8,934
| 288
| 124
| 31.020833
| 0.834125
| 0.312066
| 0
| 0.011976
| 0
| 0.005988
| 0.443733
| 0.267784
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.023952
| 0
| 0.023952
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e66c3efb17fe57a58924ade4ac24258abd570c92
| 50,042
|
py
|
Python
|
ocs_ci/ocs/cluster.py
|
crombus/ocs-ci
|
20340365882bdd06ddb6cd65bbd7df0ba7e2c2d8
|
[
"MIT"
] | null | null | null |
ocs_ci/ocs/cluster.py
|
crombus/ocs-ci
|
20340365882bdd06ddb6cd65bbd7df0ba7e2c2d8
|
[
"MIT"
] | null | null | null |
ocs_ci/ocs/cluster.py
|
crombus/ocs-ci
|
20340365882bdd06ddb6cd65bbd7df0ba7e2c2d8
|
[
"MIT"
] | null | null | null |
"""
A module for all rook functionalities and abstractions.
This module has rook related classes, support for functionalities to work with
rook cluster. This works with assumptions that an OCP cluster is already
functional and proper configurations are made for interaction.
"""
import base64
import logging
import random
import re
import threading
import yaml
import time
import ocs_ci.ocs.resources.pod as pod
from ocs_ci.ocs.exceptions import UnexpectedBehaviour
from ocs_ci.ocs.resources import ocs, storage_cluster
import ocs_ci.ocs.constants as constant
from ocs_ci.ocs.resources.mcg import MCG
from ocs_ci.utility.retry import retry
from ocs_ci.utility.utils import (
TimeoutSampler,
run_cmd,
convert_device_size,
get_trim_mean,
)
from ocs_ci.ocs.utils import get_pod_name_by_pattern
from ocs_ci.framework import config
from ocs_ci.ocs import ocp, constants, exceptions
from ocs_ci.ocs.exceptions import PoolNotFound
from ocs_ci.ocs.resources.pvc import get_all_pvc_objs
logger = logging.getLogger(__name__)
class CephCluster(object):
"""
Handles all cluster related operations from ceph perspective
This class has depiction of ceph cluster. Contains references to
pod objects which represents ceph cluster entities.
Attributes:
pods (list) : A list of ceph cluster related pods
cluster_name (str): Name of ceph cluster
namespace (str): openshift Namespace where this cluster lives
"""
def __init__(self):
"""
Cluster object initializer, this object needs to be initialized
after cluster deployment. However its harmless to do anywhere.
"""
# cluster_name is name of cluster in rook of type CephCluster
self.POD = ocp.OCP(kind="Pod", namespace=config.ENV_DATA["cluster_namespace"])
self.CEPHCLUSTER = ocp.OCP(
kind="CephCluster", namespace=config.ENV_DATA["cluster_namespace"]
)
self.CEPHFS = ocp.OCP(
kind="CephFilesystem", namespace=config.ENV_DATA["cluster_namespace"]
)
self.DEP = ocp.OCP(
kind="Deployment", namespace=config.ENV_DATA["cluster_namespace"]
)
self.cluster_resource_config = self.CEPHCLUSTER.get().get("items")[0]
try:
self.cephfs_config = self.CEPHFS.get().get("items")[0]
except IndexError as e:
logging.warning(e)
logging.warning("No CephFS found")
self.cephfs_config = None
self._cluster_name = self.cluster_resource_config.get("metadata").get("name")
self._namespace = self.cluster_resource_config.get("metadata").get("namespace")
# We are not invoking ocs.create() here
# assuming cluster creation is done somewhere after deployment
# So just load ocs with existing cluster details
self.cluster = ocs.OCS(**self.cluster_resource_config)
if self.cephfs_config:
self.cephfs = ocs.OCS(**self.cephfs_config)
else:
self.cephfs = None
self.mon_selector = constant.MON_APP_LABEL
self.mds_selector = constant.MDS_APP_LABEL
self.tool_selector = constant.TOOL_APP_LABEL
self.mgr_selector = constant.MGR_APP_LABEL
self.osd_selector = constant.OSD_APP_LABEL
self.noobaa_selector = constant.NOOBAA_APP_LABEL
self.noobaa_core_selector = constant.NOOBAA_CORE_POD_LABEL
self.mons = []
self._ceph_pods = []
self.mdss = []
self.mgrs = []
self.osds = []
self.noobaas = []
self.rgws = []
self.toolbox = None
self.mds_count = 0
self.mon_count = 0
self.mgr_count = 0
self.osd_count = 0
self.noobaa_count = 0
self.rgw_count = 0
self._mcg_obj = None
self.scan_cluster()
logging.info(f"Number of mons = {self.mon_count}")
logging.info(f"Number of mds = {self.mds_count}")
self.used_space = 0
@property
def mcg_obj(self):
if not self._mcg_obj:
self._mcg_obj = MCG()
return self._mcg_obj
@property
def cluster_name(self):
return self._cluster_name
@property
def namespace(self):
return self._namespace
@property
def pods(self):
return self._ceph_pods
def scan_cluster(self):
"""
Get accurate info on current state of pods
"""
self._ceph_pods = pod.get_all_pods(self._namespace)
# TODO: Workaround for BZ1748325:
mons = pod.get_mon_pods(self.mon_selector, self.namespace)
for mon in mons:
if mon.ocp.get_resource_status(mon.name) == constant.STATUS_RUNNING:
self.mons.append(mon)
# TODO: End of workaround for BZ1748325
self.mdss = pod.get_mds_pods(self.mds_selector, self.namespace)
self.mgrs = pod.get_mgr_pods(self.mgr_selector, self.namespace)
self.osds = pod.get_osd_pods(self.osd_selector, self.namespace)
self.noobaas = pod.get_noobaa_pods(self.noobaa_selector, self.namespace)
self.rgws = pod.get_rgw_pods()
self.toolbox = pod.get_ceph_tools_pod()
# set port attrib on mon pods
self.mons = list(map(self.set_port, self.mons))
self.cluster.reload()
if self.cephfs:
self.cephfs.reload()
else:
try:
self.cephfs_config = self.CEPHFS.get().get("items")[0]
self.cephfs = ocs.OCS(**self.cephfs_config)
self.cephfs.reload()
except IndexError as e:
logging.warning(e)
logging.warning("No CephFS found")
self.mon_count = len(self.mons)
self.mds_count = len(self.mdss)
self.mgr_count = len(self.mgrs)
self.osd_count = len(self.osds)
self.noobaa_count = len(self.noobaas)
self.rgw_count = len(self.rgws)
@staticmethod
def set_port(pod):
"""
Set port attribute on pod.
port attribute for mon is required for secrets and this attrib
is not a member for original pod class.
Args:
pod(Pod): Pod object without 'port' attribute
Returns:
pod(Pod): A modified pod object with 'port' attribute set
"""
container = pod.pod_data.get("spec").get("containers")
port = container[0]["ports"][0]["containerPort"]
# Dynamically added attribute 'port'
pod.port = port
logging.info(f"port={pod.port}")
return pod
def is_health_ok(self):
"""
Returns:
bool: True if "HEALTH_OK" else False
"""
self.cluster.reload()
return self.cluster.data["status"]["ceph"]["health"] == "HEALTH_OK"
def cluster_health_check(self, timeout=None):
"""
Check overall cluster health.
Relying on health reported by CephCluster.get()
Args:
timeout (int): in seconds. By default timeout value will be scaled
based on number of ceph pods in the cluster. This is just a
crude number. Its been observed that as the number of pods
increases it takes more time for cluster's HEALTH_OK.
Returns:
bool: True if "HEALTH_OK" else False
Raises:
CephHealthException: if cluster is not healthy
"""
# Scale timeout only if user hasn't passed any value
timeout = timeout or (10 * len(self.pods))
sample = TimeoutSampler(timeout=timeout, sleep=3, func=self.is_health_ok)
if not sample.wait_for_func_status(result=True):
raise exceptions.CephHealthException("Cluster health is NOT OK")
# This way of checking health of different cluster entities and
# raising only CephHealthException is not elegant.
# TODO: add an attribute in CephHealthException, called "reason"
# which should tell because of which exact cluster entity health
# is not ok ?
expected_mon_count = self.mon_count
expected_mds_count = self.mds_count
self.scan_cluster()
try:
self.mon_health_check(expected_mon_count)
except exceptions.MonCountException as e:
logger.error(e)
raise exceptions.CephHealthException("Cluster health is NOT OK")
try:
if not expected_mds_count:
pass
else:
self.mds_health_check(expected_mds_count)
except exceptions.MDSCountException as e:
logger.error(e)
raise exceptions.CephHealthException("Cluster health is NOT OK")
# TODO: OSD and MGR health check
logger.info("Cluster HEALTH_OK")
# This scan is for reconcilation on *.count
# because during first scan in this function some of the
# pods may not be up and would have set count to lesser number
self.scan_cluster()
# Check Noobaa health
self.wait_for_noobaa_health_ok()
def noobaa_health_check(self):
"""
Check Noobaa health
"""
if not self.mcg_obj.status:
raise exceptions.NoobaaHealthException("Cluster health is NOT OK")
def wait_for_noobaa_health_ok(self, tries=60, delay=5):
"""
Wait for Noobaa health to be OK
"""
return retry(
exceptions.NoobaaHealthException, tries=tries, delay=delay, backoff=1
)(self.noobaa_health_check)()
def mon_change_count(self, new_count):
"""
Change mon count in the cluster
Args:
new_count(int): Absolute number of mons required
"""
self.cluster.reload()
self.cluster.data["spec"]["mon"]["count"] = new_count
logger.info(self.cluster.data)
self.cluster.apply(**self.cluster.data)
self.mon_count = new_count
self.cluster_health_check()
logger.info(f"Mon count changed to {new_count}")
self.cluster.reload()
def mon_health_check(self, count):
"""
Mon health check based on pod count
Args:
count (int): Expected number of mon pods
Raises:
MonCountException: if mon pod count doesn't match
"""
timeout = 10 * len(self.pods)
logger.info(f"Expected MONs = {count}")
try:
assert self.POD.wait_for_resource(
condition="Running",
selector=self.mon_selector,
resource_count=count,
timeout=timeout,
sleep=3,
)
# TODO: Workaround for BZ1748325:
actual_mons = pod.get_mon_pods()
actual_running_mons = list()
for mon in actual_mons:
if mon.ocp.get_resource_status(mon.name) == constant.STATUS_RUNNING:
actual_running_mons.append(mon)
actual = len(actual_running_mons)
# TODO: End of workaround for BZ1748325
assert count == actual, f"Expected {count}, Got {actual}"
except exceptions.TimeoutExpiredError as e:
logger.error(e)
raise exceptions.MonCountException(
f"Failed to achieve desired Mon count" f" {count}"
)
def mds_change_count(self, new_count):
"""
Change mds count in the cluster
Args:
new_count(int): Absolute number of active mdss required
"""
self.cephfs.data["spec"]["metadataServer"]["activeCount"] = new_count
self.cephfs.apply(**self.cephfs.data)
logger.info(f"MDS active count changed to {new_count}")
if self.cephfs.data["spec"]["metadataServer"]["activeStandby"]:
expected = new_count * 2
else:
expected = new_count
self.mds_count = expected
self.cluster_health_check()
self.cephfs.reload()
def mds_health_check(self, count):
"""
MDS health check based on pod count
Args:
count (int): number of pods expected
Raises:
MDACountException: if pod count doesn't match
"""
timeout = 10 * len(self.pods)
try:
assert self.POD.wait_for_resource(
condition="Running",
selector=self.mds_selector,
resource_count=count,
timeout=timeout,
sleep=3,
)
except AssertionError as e:
logger.error(e)
raise exceptions.MDSCountException(
f"Failed to achieve desired MDS count" f" {count}"
)
def get_admin_key(self):
"""
Returns:
adminkey (str): base64 encoded key
"""
return self.get_user_key("client.admin")
def set_noout(self):
"""
Set noout flag for maintainance
"""
self.toolbox.exec_cmd_on_pod("ceph osd set noout")
def unset_noout(self):
"""
unset noout flag for peering
"""
self.toolbox.exec_cmd_on_pod("ceph osd unset noout")
def get_user_key(self, user):
"""
Args:
user (str): ceph username ex: client.user1
Returns:
key (str): base64 encoded user key
"""
out = self.toolbox.exec_cmd_on_pod(f"ceph auth get-key {user} --format json")
if "ENOENT" in out:
return False
key_base64 = base64.b64encode(out["key"].encode()).decode()
return key_base64
def create_user(self, username, caps):
"""
Create a ceph user in the cluster
Args:
username (str): ex client.user1
caps (str): ceph caps ex: mon 'allow r' osd 'allow rw'
Return:
return value of get_user_key()
"""
cmd = f"ceph auth add {username} {caps}"
# As of now ceph auth command gives output to stderr
# To be handled
out = self.toolbox.exec_cmd_on_pod(cmd)
logging.info(type(out))
return self.get_user_key(username)
def get_mons_from_cluster(self):
"""
Getting the list of mons from the cluster
Returns:
available_mon (list): Returns the mons from the cluster
"""
ret = self.DEP.get(
resource_name="", out_yaml_format=False, selector="app=rook-ceph-mon"
)
available_mon = re.findall(r"[\w-]+mon-+[\w-]", ret)
return available_mon
def remove_mon_from_cluster(self):
"""
Removing the mon pod from deployment
Returns:
remove_mon(bool): True if removal of mon is successful, False otherwise
"""
mons = self.get_mons_from_cluster()
after_delete_mon_count = len(mons) - 1
random_mon = random.choice(mons)
remove_mon = self.DEP.delete(resource_name=random_mon)
assert self.POD.wait_for_resource(
condition=constant.STATUS_RUNNING,
resource_count=after_delete_mon_count,
selector="app=rook-ceph-mon",
)
logging.info(f"Removed the mon {random_mon} from the cluster")
return remove_mon
@retry(UnexpectedBehaviour, tries=20, delay=10, backoff=1)
def check_ceph_pool_used_space(self, cbp_name):
"""
Check for the used space of a pool in cluster
Returns:
used_in_gb (float): Amount of used space in pool (in GBs)
Raises:
UnexpectedBehaviour: If used size keeps varying in Ceph status
"""
ct_pod = pod.get_ceph_tools_pod()
rados_status = ct_pod.exec_ceph_cmd(ceph_cmd=f"rados df -p {cbp_name}")
assert rados_status is not None
used = rados_status["pools"][0]["size_bytes"]
used_in_gb = format(used / constants.GB, ".4f")
if self.used_space and self.used_space == used_in_gb:
return float(self.used_space)
self.used_space = used_in_gb
raise UnexpectedBehaviour("In Rados df, Used size is varying")
def get_ceph_health(self, detail=False):
"""
Exec `ceph health` cmd on tools pod and return the status of the ceph
cluster.
Args:
detail (bool): If True the 'ceph health detail' is executed
Returns:
str: Output of the ceph health command.
"""
ceph_health_cmd = "ceph health"
if detail:
ceph_health_cmd = f"{ceph_health_cmd} detail"
return self.toolbox.exec_cmd_on_pod(
ceph_health_cmd,
out_yaml_format=False,
)
def get_ceph_status(self, format=None):
"""
Exec `ceph status` cmd on tools pod and return its output.
Args:
format (str) : Format of the output (e.g. json-pretty, json, plain)
Returns:
str: Output of the ceph status command.
"""
cmd = "ceph status"
if format:
cmd += f" -f {format}"
return self.toolbox.exec_cmd_on_pod(cmd, out_yaml_format=False)
def get_ceph_capacity(self):
"""
The function gets the total mount of storage capacity of the ocs cluster.
the calculation is <Num of OSD> * <OSD size> / <replica number>
it will not take into account the current used capacity.
Returns:
int : Total storage capacity in GiB (GiB is for development environment)
"""
storage_cluster_obj = storage_cluster.StorageCluster(
resource_name=config.ENV_DATA["storage_cluster_name"],
namespace=config.ENV_DATA["cluster_namespace"],
)
replica = int(
storage_cluster_obj.data["spec"]["storageDeviceSets"][0]["replica"]
)
ceph_pod = pod.get_ceph_tools_pod()
ceph_status = ceph_pod.exec_ceph_cmd(ceph_cmd="ceph df")
usable_capacity = (
int(ceph_status["stats"]["total_bytes"]) / replica / constant.GB
)
return usable_capacity
def get_ceph_cluster_iops(self):
"""
The function gets the IOPS from the ocs cluster
Returns:
Total IOPS in the cluster
"""
ceph_pod = pod.get_ceph_tools_pod()
ceph_status = ceph_pod.exec_ceph_cmd(ceph_cmd="ceph status")
read_ops = ceph_status["pgmap"]["read_op_per_sec"]
write_ops = ceph_status["pgmap"]["write_op_per_sec"]
cluster_iops = read_ops + write_ops
return cluster_iops
def get_iops_percentage(self, osd_size=2):
"""
The function calculates the IOPS percentage
of the cluster depending on number of osds in the cluster
Args:
osd_size (int): Size of 1 OSD in Ti
Returns:
IOPS percentage of the OCS cluster
"""
osd_count = count_cluster_osd()
iops_per_osd = osd_size * constants.IOPS_FOR_1TiB_OSD
iops_in_cluster = self.get_ceph_cluster_iops()
osd_iops_limit = iops_per_osd * osd_count
iops_percentage = (iops_in_cluster / osd_iops_limit) * 100
logging.info(f"The IOPS percentage of the cluster is {iops_percentage}%")
return iops_percentage
def get_cluster_throughput(self):
"""
Function to get the throughput of ocs cluster
Returns:
float: The write throughput of the cluster in MiB/s
"""
ceph_status = self.get_ceph_status()
for item in ceph_status.split("\n"):
if "client" in item:
throughput_data = item.strip("client: ").split(",")
throughput_data = throughput_data[:2:1]
# Converting all B/s and KiB/s to MiB/s
throughput = 0
for val in throughput_data:
throughput += [
float(re.findall(r"\d+", val)[0]) * constants.TP_CONVERSION[key]
for key in constants.TP_CONVERSION.keys()
if key in val
][0]
logger.info(
f"The {val[-2:].upper()} throughput is {throughput} MiB/s"
)
return throughput
def get_throughput_percentage(self):
"""
Function to get throughput percentage of the ocs cluster
Returns:
Throughput percentage of the cluster
"""
throughput_of_cluster = self.get_cluster_throughput()
throughput_percentage = (
throughput_of_cluster / constants.THROUGHPUT_LIMIT_OSD
) * 100
logging.info(
f"The throughput percentage of the cluster is {throughput_percentage}%"
)
return throughput_percentage
def calc_trim_mean_throughput(self, samples=8):
"""
Calculate the cluster average throughput out of a few samples
Args:
samples (int): The number of samples to take
Returns:
float: The average cluster throughput
"""
throughput_vals = [self.get_cluster_throughput() for _ in range(samples)]
return round(get_trim_mean(throughput_vals), 3)
def get_rebalance_status(self):
"""
This function gets the rebalance status
Returns:
bool: True if rebalance is completed, False otherwise
"""
ceph_pod = pod.get_ceph_tools_pod()
ceph_status = ceph_pod.exec_ceph_cmd(ceph_cmd="ceph status")
ceph_health = ceph_pod.exec_ceph_cmd(ceph_cmd="ceph health")
total_pg_count = ceph_status["pgmap"]["num_pgs"]
pg_states = ceph_status["pgmap"]["pgs_by_state"]
logger.info(ceph_health)
logger.info(pg_states)
for states in pg_states:
return (
states["state_name"] == "active+clean"
and states["count"] == total_pg_count
)
def wait_for_rebalance(self, timeout=600):
"""
Wait for re-balance to complete
Args:
timeout (int): Time to wait for the completion of re-balance
Returns:
bool: True if rebalance completed, False otherwise
"""
try:
for rebalance in TimeoutSampler(
timeout=timeout, sleep=10, func=self.get_rebalance_status
):
if rebalance:
logging.info("Re-balance is completed")
return True
except exceptions.TimeoutExpiredError:
logger.error(
f"Data re-balance failed to complete within the given "
f"timeout of {timeout} seconds"
)
return False
def time_taken_to_complete_rebalance(self, timeout=600):
"""
This function calculates the time taken to complete
rebalance
Args:
timeout (int): Time to wait for the completion of rebalance
Returns:
int : Time taken in minutes for the completion of rebalance
"""
start_time = time.time()
assert self.wait_for_rebalance(timeout=timeout), (
f"Data re-balance failed to complete within the given "
f"timeout of {timeout} seconds"
)
time_taken = time.time() - start_time
return time_taken / 60
class CephHealthMonitor(threading.Thread):
"""
Context manager class for monitoring ceph health status of CephCluster.
If CephCluster will get to HEALTH_ERROR state it will save the ceph status
to health_error_status variable and will stop monitoring.
"""
def __init__(self, ceph_cluster, sleep=5):
"""
Constructor for ceph health status thread.
Args:
ceph_cluster (CephCluster): Reference to CephCluster object.
sleep (int): Number of seconds to sleep between health checks.
"""
self.ceph_cluster = ceph_cluster
self.sleep = sleep
self.health_error_status = None
self.health_monitor_enabled = False
self.latest_health_status = None
super(CephHealthMonitor, self).__init__()
def run(self):
self.health_monitor_enabled = True
while self.health_monitor_enabled and (not self.health_error_status):
time.sleep(self.sleep)
self.latest_health_status = self.ceph_cluster.get_ceph_health(detail=True)
if "HEALTH_ERROR" in self.latest_health_status:
self.health_error_status = self.ceph_cluster.get_ceph_status()
self.log_error_status()
def __enter__(self):
self.start()
def __exit__(self, exception_type, value, traceback):
"""
Exit method for context manager
Raises:
CephHealthException: If no other exception occurred during
execution of context manager and HEALTH_ERROR is detected
during the monitoring.
exception_type: In case of exception raised during processing of
the context manager.
"""
self.health_monitor_enabled = False
if self.health_error_status:
self.log_error_status()
if exception_type:
raise exception_type.with_traceback(value, traceback)
if self.health_error_status:
raise exceptions.CephHealthException(
f"During monitoring of Ceph health status hit HEALTH_ERROR: "
f"{self.health_error_status}"
)
return True
def log_error_status(self):
logger.error(
f"ERROR HEALTH STATUS DETECTED! " f"Status: {self.health_error_status}"
)
def validate_ocs_pods_on_pvc(pods, pvc_names):
"""
Validate if ocs pod has PVC. This validation checking if there is the pvc
like: rook-ceph-mon-a for the pod rook-ceph-mon-a-56f67f5968-6j4px.
Args:
pods (list): OCS pod names
pvc_names (list): names of all PVCs
Raises:
AssertionError: If no PVC found for one of the pod
"""
logger.info(f"Validating if each pod from: {pods} has PVC from {pvc_names}.")
for pod_name in pods:
found_pvc = ""
for pvc in pvc_names:
if pvc in pod_name:
found_pvc = pvc
if found_pvc:
logger.info(f"PVC {found_pvc} found for pod {pod_name}")
continue
assert found_pvc, f"No PVC found for pod: {pod_name}!"
def validate_cluster_on_pvc():
"""
Validate creation of PVCs for MON and OSD pods.
Also validate that those PVCs are attached to the OCS pods
Raises:
AssertionError: If PVC is not mounted on one or more OCS pods
"""
# Get the PVCs for selected label (MON/OSD)
ns = config.ENV_DATA["cluster_namespace"]
ocs_pvc_obj = get_all_pvc_objs(namespace=ns)
# Check all pvc's are in bound state
pvc_names = []
for pvc_obj in ocs_pvc_obj:
if pvc_obj.name.startswith(
constants.DEFAULT_DEVICESET_PVC_NAME
) or pvc_obj.name.startswith(constants.DEFAULT_MON_PVC_NAME):
assert (
pvc_obj.status == constants.STATUS_BOUND
), f"PVC {pvc_obj.name} is not Bound"
logger.info(f"PVC {pvc_obj.name} is in Bound state")
pvc_names.append(pvc_obj.name)
mon_pods = get_pod_name_by_pattern("rook-ceph-mon", ns)
if not config.DEPLOYMENT.get("local_storage"):
logger.info("Validating all mon pods have PVC")
validate_ocs_pods_on_pvc(mon_pods, pvc_names)
else:
logger.debug(
"Skipping validation if all mon pods have PVC because in LSO "
"deployment we don't have mon pods backed by PVC"
)
logger.info("Validating all osd pods have PVC")
osd_deviceset_pods = get_pod_name_by_pattern(
"rook-ceph-osd-prepare-ocs-deviceset", ns
)
validate_ocs_pods_on_pvc(osd_deviceset_pods, pvc_names)
osd_pods = get_pod_name_by_pattern("rook-ceph-osd", ns, filter="prepare")
for ceph_pod in mon_pods + osd_pods:
out = run_cmd(f"oc -n {ns} get pods {ceph_pod} -o yaml")
out_yaml = yaml.safe_load(out)
for vol in out_yaml["spec"]["volumes"]:
if vol.get("persistentVolumeClaim"):
claimName = vol.get("persistentVolumeClaim").get("claimName")
logger.info(f"{ceph_pod} backed by pvc {claimName}")
assert claimName in pvc_names, "Ceph Internal Volume not backed by PVC"
def count_cluster_osd():
"""
The function returns the number of cluster OSDs
Returns:
osd_count (int): number of OSD pods in current cluster
"""
storage_cluster_obj = storage_cluster.StorageCluster(
resource_name=config.ENV_DATA["storage_cluster_name"],
namespace=config.ENV_DATA["cluster_namespace"],
)
storage_cluster_obj.reload_data()
osd_count = int(
storage_cluster_obj.data["spec"]["storageDeviceSets"][0]["count"]
) * int(storage_cluster_obj.data["spec"]["storageDeviceSets"][0]["replica"])
return osd_count
def validate_pdb_creation():
"""
Validate creation of PDBs for MON, MDS and OSD pods.
Raises:
AssertionError: If required PDBs were not created.
"""
pdb_obj = ocp.OCP(kind="PodDisruptionBudget")
item_list = pdb_obj.get().get("items")
pdb_list = [item["metadata"]["name"] for item in item_list]
osd_count = count_cluster_osd()
pdb_required = [constants.MDS_PDB, constants.MON_PDB]
for num in range(osd_count):
pdb_required.append(constants.OSD_PDB + str(num))
pdb_list.sort()
pdb_required.sort()
for required, given in zip(pdb_required, pdb_list):
assert required == given, f"{required} was not created"
logger.info(f"All required PDBs created: {pdb_required}")
def get_osd_utilization():
"""
Get osd utilization value
Returns:
osd_filled (dict): Dict of osd name and its used value
i.e {'osd.1': 15.276289408185841, 'osd.0': 15.276289408185841, 'osd.2': 15.276289408185841}
"""
osd_filled = {}
ceph_cmd = "ceph osd df"
ct_pod = pod.get_ceph_tools_pod()
output = ct_pod.exec_ceph_cmd(ceph_cmd=ceph_cmd)
for osd in output.get("nodes"):
osd_filled[osd["name"]] = osd["utilization"]
return osd_filled
def get_ceph_df_detail():
"""
Get ceph osd df detail
Returns:
dict: 'ceph df details' command output
"""
ceph_cmd = "ceph df detail"
ct_pod = pod.get_ceph_tools_pod()
return ct_pod.exec_ceph_cmd(ceph_cmd=ceph_cmd)
def validate_replica_data(pool_name, replica):
"""
Check if data is replica 2 or 3
Args:
replica (int): size of the replica(2,3)
pool_name (str): name of the pool to check replica
Returns:
Bool: True if replicated data size is meet rep config and False if dont
"""
ceph_df_detail_output = get_ceph_df_detail()
pool_list = ceph_df_detail_output.get("pools")
for pool in pool_list:
if pool.get("name") == pool_name:
logger.info(f"{pool_name}")
stored = pool["stats"]["stored"]
byte_used = pool["stats"]["bytes_used"]
compress_bytes_used = pool["stats"]["compress_bytes_used"]
compress_under_bytes = pool["stats"]["compress_under_bytes"]
byte_used = byte_used + compress_under_bytes - compress_bytes_used
store_ratio = byte_used / stored
if (replica + 0.2) > store_ratio > (replica - 0.2):
logger.info(f"pool {pool_name} meet rep {replica} size")
return True
else:
logger.info(
f"pool {pool_name} meet do not meet rep {replica}"
f" size Store ratio is {store_ratio}"
)
return False
raise PoolNotFound(f"Pool {pool_name} not found on cluster")
def validate_compression(pool_name):
"""
Check if data was compressed
Args:
pool_name (str): name of the pool to check replica
Returns:
bool: True if compression works. False if not
"""
ceph_df_detail_output = get_ceph_df_detail()
pool_list = ceph_df_detail_output.get("pools")
for pool in pool_list:
if pool.get("name") == pool_name:
logger.info(f"{pool_name}")
byte_used = pool["stats"]["bytes_used"]
compress_bytes_used = pool["stats"]["compress_bytes_used"]
compress_under_bytes = pool["stats"]["compress_under_bytes"]
all_byte_used = byte_used + compress_under_bytes - compress_bytes_used
compression_ratio = byte_used / all_byte_used
logger.info(f"this is the comp_ratio {compression_ratio}")
if 0.6 < compression_ratio:
logger.info(
f"Compression ratio {compression_ratio} is " f"larger than 0.6"
)
return True
else:
logger.info(
f"Compression ratio {compression_ratio} is " f"smaller than 0.6"
)
return False
raise PoolNotFound(f"Pool {pool_name} not found on cluster")
def validate_osd_utilization(osd_used=80):
"""
Validates osd utilization matches osd_used value
Args:
osd_used (int): osd used value
Returns:
bool: True if all osd values is equal or greater to osd_used.
False Otherwise.
"""
_rc = True
osd_filled = get_osd_utilization()
for osd, value in osd_filled.items():
if int(value) >= osd_used:
logger.info(f"{osd} used value {value}")
else:
_rc = False
logger.warning(f"{osd} used value {value}")
return _rc
def get_pgs_per_osd():
"""
Function to get ceph pg count per OSD
Returns:
osd_dict (dict): Dict of osd name and its used value
i.e {'osd.0': 136, 'osd.2': 136, 'osd.1': 136}
"""
osd_dict = {}
ceph_cmd = "ceph osd df"
ct_pod = pod.get_ceph_tools_pod()
output = ct_pod.exec_ceph_cmd(ceph_cmd=ceph_cmd)
for osd in output.get("nodes"):
osd_dict[osd["name"]] = osd["pgs"]
return osd_dict
def get_balancer_eval():
"""
Function to get ceph pg balancer eval value
Returns:
eval_out (float): Eval output of pg balancer
"""
ceph_cmd = "ceph balancer eval"
ct_pod = pod.get_ceph_tools_pod()
eval_out = ct_pod.exec_ceph_cmd(ceph_cmd=ceph_cmd).split(" ")
return float(eval_out[3])
def get_pg_balancer_status():
"""
Function to check pg_balancer active and mode is upmap
Returns:
bool: True if active and upmap is set else False
"""
# Check either PG balancer is active or not
ceph_cmd = "ceph balancer status"
ct_pod = pod.get_ceph_tools_pod()
output = ct_pod.exec_ceph_cmd(ceph_cmd=ceph_cmd)
# Check 'mode' is 'upmap', based on suggestion from Ceph QE
# TODO: Revisit this if mode needs change.
if output["active"] and output["mode"] == "upmap":
logging.info("PG balancer is active and mode is upmap")
return True
else:
logging.error("PG balancer is not active")
return False
def validate_pg_balancer():
"""
Validate either data is equally distributed to OSDs
Returns:
bool: True if avg PG's per osd difference is <=10 else False
"""
# Check OSD utilization either pg balancer is active
# TODO: Revisit this if pg difference value needs change
# TODO: Revisit eval value if pg balancer mode changes from 'upmap'
if get_pg_balancer_status():
eval = get_balancer_eval()
osd_dict = get_pgs_per_osd()
osd_avg_pg_value = round(sum(osd_dict.values()) / len(osd_dict))
osd_pg_value_flag = True
for key, value in osd_dict.items():
diff = abs(value - osd_avg_pg_value)
if diff <= 10:
logging.info(f"{key} PG difference {diff} is acceptable")
else:
logging.error(f"{key} PG difference {diff} is not acceptable")
osd_pg_value_flag = False
if osd_pg_value_flag and eval <= 0.025:
logging.info(
f"Eval value is {eval} and pg distribution "
f"average difference is <=10 which is acceptable"
)
return True
else:
logging.error(
f"Eval value is {eval} and pg distribution "
f"average difference is >=10 which is high and not acceptable"
)
return False
else:
logging.info("pg_balancer is not active")
def get_percent_used_capacity():
"""
Function to calculate the percentage of used capacity in a cluster
Returns:
float: The percentage of the used capacity in the cluster
"""
ct_pod = pod.get_ceph_tools_pod()
output = ct_pod.exec_ceph_cmd(ceph_cmd="ceph df")
total_used = output.get("stats").get("total_used_raw_bytes")
total_avail = output.get("stats").get("total_bytes")
return 100.0 * total_used / total_avail
def get_osd_pods_memory_sum():
"""
Get the sum of memory of all OSD pods. This is used to determine the size
needed for a PVC so when IO will be running over it the OSDs cache will be filled
Returns:
int: The sum of the OSD pods memory in GB
"""
osd_pods = pod.get_osd_pods()
num_of_osd_pods = len(osd_pods)
osd_pod_mem_size_str = osd_pods[0].get_memory().get("osd")
osd_pod_mem_size = convert_device_size(
unformatted_size=osd_pod_mem_size_str, units_to_covert_to="GB"
)
return num_of_osd_pods * osd_pod_mem_size
def get_child_nodes_osd_tree(node_id, osd_tree):
"""
This function finds the children of a node from the 'ceph osd tree' and returns them as list
Args:
node_id (int): the id of the node for which the children to be retrieved
osd_tree (dict): dictionary containing the output of 'ceph osd tree'
Returns:
list: of 'children' of a given node_id
"""
for i in range(len(osd_tree["nodes"])):
if osd_tree["nodes"][i]["id"] == node_id:
return osd_tree["nodes"][i]["children"]
def check_osds_in_hosts_osd_tree(hosts, osd_tree):
"""
Checks if osds are formed correctly after cluster expansion
Args:
hosts (list) : List of hosts
osd_tree (str) : 'ceph osd tree' command output
Returns:
bool : True if osd tree formatted correctly
"""
for each_host in hosts:
osd_in_each_host = get_child_nodes_osd_tree(each_host, osd_tree)
if len(osd_in_each_host) > 1 or len(osd_in_each_host) <= 0:
logger.error(
"Error. ceph osd tree is NOT formed correctly after cluster expansion"
)
return False
logger.info("osd tree verification Passed")
return True
def check_osd_tree_1az_vmware(osd_tree, number_of_osds):
"""
Checks whether an OSD tree is created/modified correctly. This can be used as a verification step for
deployment and cluster expansion tests.
This function is specifically for ocs cluster created on 1 AZ VMWare setup
Args:
osd_tree (dict): Dictionary of the values which represent 'osd tree'.
number_of_osds (int): total number of osds in the cluster
Returns:
bool: True, if the ceph osd tree is formed correctly. Else False
"""
# in case of vmware, there will be only one zone as of now. The OSDs are arranged as follows:
# ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF
# -1 0.99326 root default
# -8 0.33109 rack rack0
# -7 0.33109 host ocs-deviceset-0-0-dktqc
# 1 hdd 0.33109 osd.1 up 1.00000 1.00000
# There will be 3 racks - rack0, rack1, rack2.
# When cluster expansion is successfully done, a host and an osd are added in each rack.
# The number of hosts will be equal to the number osds the cluster has. Each rack can
# have multiple hosts but each host will have only one osd under it.
number_of_hosts_expected = int(number_of_osds / 3)
all_hosts = []
racks = osd_tree["nodes"][0]["children"]
for rack in racks:
hosts = get_child_nodes_osd_tree(rack, osd_tree)
if len(hosts) != number_of_hosts_expected:
logging.error(
f"Number of hosts under rack {rack} "
f"is not matching the expected ={number_of_hosts_expected} "
)
return False
else:
all_hosts.append(hosts)
all_hosts_flatten = [item for sublist in all_hosts for item in sublist]
return check_osds_in_hosts_osd_tree(all_hosts_flatten, osd_tree)
def check_osd_tree_3az_aws(osd_tree, number_of_osds):
"""
Checks whether an OSD tree is created/modified correctly. This can be used as a verification step for
deployment and cluster expansion tests.
This function is specifically for ocs cluster created on 3 AZ AWS config
Args:
osd_tree (dict): Dictionary of the values which represent 'osd tree'.
number_of_osds (int): total number of osds in the cluster
Returns:
Boolean: True, if the ceph osd tree is formed correctly. Else False
"""
all_hosts = []
region = osd_tree["nodes"][0]["children"]
zones = get_child_nodes_osd_tree(region[0], osd_tree)
for each_zone in zones:
hosts_in_each_zone = get_child_nodes_osd_tree(each_zone, osd_tree)
if len(hosts_in_each_zone) != number_of_osds / 3: # 3 is replica_factor
logger.error("number of hosts in zone is incorrect")
return False
else:
all_hosts.append(hosts_in_each_zone)
all_hosts_flatten = [item for sublist in all_hosts for item in sublist]
return check_osds_in_hosts_osd_tree(all_hosts_flatten, osd_tree)
def check_osd_tree_1az_aws(osd_tree, number_of_osds):
"""
Checks whether an OSD tree is created/modified correctly. This can be used as a verification step for
deployment and cluster expansion tests.
This function is specifically for ocs cluster created on 1 AZ AWS config
Args:
osd_tree (dict): Dictionary of the values which represent 'osd tree'.
number_of_osds (int): total number of osds in the cluster
Returns:
Boolean: True, if the ceph osd tree is formed correctly. Else False
"""
all_hosts = []
region = osd_tree["nodes"][0]["children"]
zones = get_child_nodes_osd_tree(region[0], osd_tree)
racks = get_child_nodes_osd_tree(zones[0], osd_tree)
logging.info(f"racks = {racks}")
if len(racks) != 3:
logging.error(f"Expected 3 racks but got {len(racks)}")
for each_rack in racks:
hosts_in_each_rack = get_child_nodes_osd_tree(each_rack, osd_tree)
if len(hosts_in_each_rack) != number_of_osds / 3: # 3 is replica_factor
logging.error("number of hosts in rack is incorrect")
return False
else:
logging.info(f"adding host...{hosts_in_each_rack}")
all_hosts.append(hosts_in_each_rack)
all_hosts_flatten = [item for sublist in all_hosts for item in sublist]
return check_osds_in_hosts_osd_tree(all_hosts_flatten, osd_tree)
def check_osds_in_hosts_are_up(osd_tree):
"""
Check if all the OSD's in status 'up'
Args:
osd_tree (dict): The ceph osd tree
Returns:
bool: True if all the OSD's in status 'up'. Else False
"""
for n in osd_tree["nodes"]:
if n["type"] == "osd":
if n["status"] != "up":
logger.warning(f"osd with name {n['name']} is not up")
return False
return True
def check_ceph_osd_tree():
"""
Checks whether an OSD tree is created/modified correctly.
It is a summary of the previous functions: 'check_osd_tree_1az_vmware',
'check_osd_tree_3az_aws', 'check_osd_tree_1az_aws'.
Returns:
bool: True, if the ceph osd tree is formed correctly. Else False
"""
osd_pods = pod.get_osd_pods()
# 'ceph osd tree' should show the new osds under right nodes/hosts
# Verification is different for 3 AZ and 1 AZ configs
ct_pod = pod.get_ceph_tools_pod()
tree_output = ct_pod.exec_ceph_cmd(ceph_cmd="ceph osd tree")
if config.ENV_DATA["platform"].lower() == constants.VSPHERE_PLATFORM:
return check_osd_tree_1az_vmware(tree_output, len(osd_pods))
aws_number_of_zones = 3
if config.ENV_DATA["platform"].lower() == constants.AWS_PLATFORM:
# parse the osd tree. if it contains a node 'rack' then it's a
# AWS_1AZ cluster. Else, 3 AWS_3AZ cluster
for i in range(len(tree_output["nodes"])):
if tree_output["nodes"][i]["name"] in "rack0":
aws_number_of_zones = 1
if aws_number_of_zones == 1:
return check_osd_tree_1az_aws(tree_output, len(osd_pods))
else:
return check_osd_tree_3az_aws(tree_output, len(osd_pods))
def check_ceph_osd_tree_after_node_replacement():
"""
Check the ceph osd tree after the process of node replacement.
Returns:
bool: True if the ceph osd tree formation is correct,
and all the OSD's are up. Else False
"""
ct_pod = pod.get_ceph_tools_pod()
osd_tree = ct_pod.exec_ceph_cmd(ceph_cmd="ceph osd tree")
if not check_ceph_osd_tree():
logger.warning("Incorrect ceph osd tree formation found")
return False
if not check_osds_in_hosts_are_up(osd_tree):
logger.warning("Not all the osd's are in status 'up'")
return False
return True
def silence_ceph_osd_crash_warning(osd_pod_name):
"""
Silence the osd crash warning of a specific osd pod
Args:
osd_pod_name (str): The name of the osd pod which we need to
silence the crash warning
Returns:
bool: True if it found the osd crash with name 'osd_pod_name'. False otherwise
"""
ct_pod = pod.get_ceph_tools_pod()
new_crash_objects_list = ct_pod.exec_ceph_cmd(ceph_cmd="ceph crash ls-new")
for crash_obj in new_crash_objects_list:
if crash_obj.get("utsname_hostname") == osd_pod_name:
logger.info(f"Found osd crash with name {osd_pod_name}")
obj_crash_id = crash_obj.get("crash_id")
crash_info = ct_pod.exec_ceph_cmd(
ceph_cmd=f"ceph crash info {obj_crash_id}"
)
logger.info(f"ceph crash info: {crash_info}")
logger.info("silence the osd crash warning")
ct_pod.exec_ceph_cmd(ceph_cmd=f"ceph crash archive {obj_crash_id}")
return True
logger.info(
f"Didn't find osd crash with name {osd_pod_name} in ceph crash warnings"
)
return False
def wait_for_silence_ceph_osd_crash_warning(osd_pod_name, timeout=900):
"""
Wait for 'timeout' seconds to check for the ceph osd crash warning,
and silence it.
Args:
osd_pod_name (str): The name of the osd pod which we need to
silence the crash warning
timeout (int): time in seconds to wait for silence the osd crash warning
Returns:
bool: True if it found the osd crash with name 'osd_pod_name'. False otherwise
"""
try:
for silence_old_osd_crash_warning in TimeoutSampler(
timeout=timeout,
sleep=30,
func=silence_ceph_osd_crash_warning,
osd_pod_name=osd_pod_name,
):
if silence_old_osd_crash_warning:
return True
except TimeoutError:
return False
class CephClusterExternal(CephCluster):
"""
Handle all external ceph cluster related functionalities
Assumption: Cephcluster Kind resource exists
"""
def __init__(self):
self.POD = ocp.OCP(kind="Pod", namespace=config.ENV_DATA["cluster_namespace"])
self.CEPHCLUSTER = ocp.OCP(
kind="CephCluster", namespace=config.ENV_DATA["cluster_namespace"]
)
self.wait_for_cluster_cr()
self._cluster_name = self.cluster_resource.get("metadata").get("name")
self._namespace = self.cluster_resource.get("metadata").get("namespace")
self.cluster = ocs.OCS(**self.cluster_resource)
self.wait_for_nooba_cr()
@property
def cluster_name(self):
return self._cluster_name
@property
def namespace(self):
return self._namespace
@retry(IndexError, 10, 3, 1)
def wait_for_cluster_cr(self):
"""
we have to wait for cluster cr to appear else
it leads to list index out of range error
"""
cluster_cr = self.CEPHCLUSTER.get()
self.cluster_resource = cluster_cr.get("items")[0]
@retry((IndexError, AttributeError, TypeError), 100, 3, 1)
def wait_for_nooba_cr(self):
self._mcg_obj = MCG()
def cluster_health_check(self, timeout=300):
"""
This would be a comprehensive cluster health check
which includes checking pods, external ceph cluster health.
raise exceptions.CephHealthException("Cluster health is NOT OK")
"""
sample = TimeoutSampler(timeout=timeout, sleep=3, func=self.is_health_ok)
if not sample.wait_for_func_status(result=True):
raise exceptions.CephHealthException("Cluster health is NOT OK")
self.wait_for_noobaa_health_ok()
self.validate_pvc()
def validate_pvc(self):
"""
Check whether all PVCs are in bound state
"""
ocs_pvc_obj = get_all_pvc_objs(namespace=self.namespace)
for pvc_obj in ocs_pvc_obj:
assert pvc_obj.status == constants.STATUS_BOUND, {
f"PVC {pvc_obj.name} is not Bound"
}
logger.info(f"PVC {pvc_obj.name} is in Bound state")
| 33.56271
| 105
| 0.624675
| 6,633
| 50,042
| 4.5049
| 0.104779
| 0.018741
| 0.012516
| 0.009839
| 0.395536
| 0.312573
| 0.272749
| 0.241759
| 0.213647
| 0.193166
| 0
| 0.009078
| 0.291215
| 50,042
| 1,490
| 106
| 33.585235
| 0.833376
| 0.271952
| 0
| 0.285528
| 0
| 0
| 0.142667
| 0.006108
| 0
| 0
| 0
| 0.003356
| 0.015645
| 1
| 0.092568
| false
| 0.002608
| 0.024772
| 0.006519
| 0.207301
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e66d5e1f08dc9a4e5c8cb49651bf2a219e4f50a8
| 3,621
|
py
|
Python
|
scenic/projects/baselines/detr/configs/detr_config.py
|
techthiyanes/scenic
|
05585b1189364e29d82413b9d4a50ffa8c246f0c
|
[
"Apache-2.0"
] | null | null | null |
scenic/projects/baselines/detr/configs/detr_config.py
|
techthiyanes/scenic
|
05585b1189364e29d82413b9d4a50ffa8c246f0c
|
[
"Apache-2.0"
] | null | null | null |
scenic/projects/baselines/detr/configs/detr_config.py
|
techthiyanes/scenic
|
05585b1189364e29d82413b9d4a50ffa8c246f0c
|
[
"Apache-2.0"
] | null | null | null |
# pylint: disable=line-too-long
r"""Default configs for COCO detection using DETR.
"""
# pylint: enable=line-too-long
import copy
import ml_collections
_COCO_TRAIN_SIZE = 118287
NUM_EPOCHS = 300
def get_config():
"""Returns the configuration for COCO detection using DETR."""
config = ml_collections.ConfigDict()
config.experiment_name = 'coco_detection_detr'
# Dataset.
config.dataset_name = 'coco_detr_detection'
config.dataset_configs = ml_collections.ConfigDict()
config.dataset_configs.prefetch_to_device = 2
config.dataset_configs.shuffle_buffer_size = 10_000
config.dataset_configs.max_boxes = 99
config.data_dtype_str = 'float32'
# Model.
config.model_dtype_str = 'float32'
config.model_name = 'detr'
config.matcher = 'hungarian_cover_tpu'
config.hidden_dim = 256
config.num_queries = 100
config.query_emb_size = None # Same as hidden_size.
config.transformer_num_heads = 8
config.transformer_num_encoder_layers = 6
config.transformer_num_decoder_layers = 6
config.transformer_qkv_dim = 256
config.transformer_mlp_dim = 2048
config.transformer_normalize_before = False
config.backbone_num_filters = 64
config.backbone_num_layers = 50
config.dropout_rate = 0.
config.attention_dropout_rate = 0.1
# Loss.
config.aux_loss = True
config.bbox_loss_coef = 5.0
config.giou_loss_coef = 2.0
config.class_loss_coef = 1.0
config.eos_coef = 0.1
# Training.
config.trainer_name = 'detr_trainer'
config.optimizer = 'adam'
config.optimizer_configs = ml_collections.ConfigDict()
config.optimizer_configs.weight_decay = 1e-4
config.optimizer_configs.beta1 = 0.9
config.optimizer_configs.beta2 = 0.999
config.max_grad_norm = 0.1
config.num_training_epochs = NUM_EPOCHS
config.batch_size = 64
config.rng_seed = 0
decay_events = {500: 400}
# Learning rate.
steps_per_epoch = _COCO_TRAIN_SIZE // config.batch_size
config.lr_configs = ml_collections.ConfigDict()
config.lr_configs.learning_rate_schedule = 'compound'
config.lr_configs.factors = 'constant*piecewise_constant'
config.lr_configs.decay_events = [
decay_events.get(NUM_EPOCHS, NUM_EPOCHS * 2 // 3) * steps_per_epoch,
]
# Note: this is absolute (not relative):
config.lr_configs.decay_factors = [.1]
config.lr_configs.base_learning_rate = 1e-4
# Backbone training configs: optimizer and learning rate.
config.backbone_training = ml_collections.ConfigDict()
config.backbone_training.optimizer = copy.deepcopy(config.optimizer)
config.backbone_training.optimizer_configs = copy.deepcopy(
config.optimizer_configs)
config.backbone_training.lr_configs = copy.deepcopy(config.lr_configs)
config.backbone_training.lr_configs.base_learning_rate = 1e-5
# Pretrained_backbone.
config.load_pretrained_backbone = True
config.freeze_backbone_batch_stats = True
config.pretrained_backbone_configs = ml_collections.ConfigDict()
# Download pretrained ResNet50 checkpoints from here:
# https://github.com/google-research/scenic/tree/main/scenic/projects/baselines pylint: disable=line-too-long
config.pretrained_backbone_configs.checkpoint_path = 'path_to_checkpoint_of_resnet_50'
# Logging.
config.write_summary = True
config.xprof = True # Profile using xprof.
config.log_summary_steps = 50 # train summary steps
config.log_large_summary_steps = 1000 # Expensive summary operations freq
config.checkpoint = True # Do checkpointing.
config.checkpoint_steps = steps_per_epoch
config.debug_train = False # Debug mode during training.
config.debug_eval = False # Debug mode during eval.
return config
| 34.160377
| 111
| 0.775753
| 491
| 3,621
| 5.421589
| 0.358452
| 0.030428
| 0.039444
| 0.05447
| 0.12284
| 0.045455
| 0
| 0
| 0
| 0
| 0
| 0.028865
| 0.138912
| 3,621
| 105
| 112
| 34.485714
| 0.824888
| 0.182546
| 0
| 0
| 0
| 0
| 0.053657
| 0.019822
| 0
| 0
| 0
| 0
| 0
| 1
| 0.013514
| false
| 0
| 0.027027
| 0
| 0.054054
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e66dd9b0c4524178c41ae4349d387915dbfbc5a0
| 2,105
|
py
|
Python
|
prepare_cicero_peaks.py
|
lab-medvedeva/SCABFA-feature-selection
|
d5cd7568e667a75f75e753d9ab9dc645f3166902
|
[
"MIT"
] | null | null | null |
prepare_cicero_peaks.py
|
lab-medvedeva/SCABFA-feature-selection
|
d5cd7568e667a75f75e753d9ab9dc645f3166902
|
[
"MIT"
] | null | null | null |
prepare_cicero_peaks.py
|
lab-medvedeva/SCABFA-feature-selection
|
d5cd7568e667a75f75e753d9ab9dc645f3166902
|
[
"MIT"
] | null | null | null |
from scale.dataset import read_mtx
from argparse import ArgumentParser
import pandas as pd
import numpy as np
import os
def parse_args():
parser = ArgumentParser('Preparing raw peaks from cicero pipeline')
parser.add_argument('--dataset_path', help='Path to Scale dataset: count, feature, barcode folder')
parser.add_argument('--label_path', help='Path to cell labels')
parser.add_argument('--num_peaks_threshold', type=int, help='Num peaks to filter')
parser.add_argument('--output_path', help='Path to save peaks in bed folder')
parser.add_argument('--suffix', help='Suffix to path')
return parser.parse_args()
def main():
args = parse_args()
labels = pd.read_csv(args.label_path, sep='\t', header=None)
count, feature, barcode = read_mtx(args.dataset_path)
os.makedirs(args.output_path, exist_ok=True)
cell_types = labels[1].unique()
cell_barcodes = {}
for cell_type in cell_types:
cell_barcodes[cell_type] = list(labels[labels[1] == cell_type].index)
for cell_type, barcode in cell_barcodes.items():
cell_by_feature = np.asarray(count[barcode].sum(axis=0)).flatten()
feature_threshold = cell_by_feature[np.argsort(cell_by_feature)[-args.num_peaks_threshold]]
print(f'{cell_type}: {feature_threshold}')
filtered_features = (cell_by_feature > 0) & (cell_by_feature >= feature_threshold)
print(f'{cell_type}: filtered {np.sum(filtered_features)}')
output = pd.DataFrame(feature[filtered_features])
# print(cell_type, cell_by_feature[np.argsort(cell_by_feature)[-args.num_peaks_threshold:]][:10])
output['chr'] = output[0].apply(lambda x: x.split('_')[0])
output['start'] = output[0].apply(lambda x: x.split('_')[1])
output['end'] = output[0].apply(lambda x: x.split('_')[2])
output.drop(0, axis=1).to_csv(
os.path.join(args.output_path, f'{cell_type.replace(" ", "_").replace("/", "_")}_{args.suffix}.bed'),
header=None,
index=None,
sep='\t'
)
if __name__ == '__main__':
main()
| 40.480769
| 113
| 0.669359
| 291
| 2,105
| 4.591065
| 0.309278
| 0.047904
| 0.068114
| 0.031437
| 0.167665
| 0.13997
| 0.13997
| 0.083832
| 0.083832
| 0.083832
| 0
| 0.008125
| 0.181473
| 2,105
| 51
| 114
| 41.27451
| 0.767266
| 0.045131
| 0
| 0
| 0
| 0.025
| 0.207773
| 0.035376
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.125
| 0
| 0.2
| 0.05
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e670e0b486388fd350ec3090250f4bbe49211d07
| 6,225
|
py
|
Python
|
wasch/tests.py
|
waschag-tvk/pywaschedv
|
8f0428827c4c1c7e9462eaa94ba02290db1c340f
|
[
"MIT"
] | 1
|
2020-01-17T16:35:10.000Z
|
2020-01-17T16:35:10.000Z
|
wasch/tests.py
|
waschag-tvk/pywaschedv
|
8f0428827c4c1c7e9462eaa94ba02290db1c340f
|
[
"MIT"
] | 6
|
2018-06-01T15:02:11.000Z
|
2018-09-04T15:33:05.000Z
|
wasch/tests.py
|
waschag-tvk/pywaschedv
|
8f0428827c4c1c7e9462eaa94ba02290db1c340f
|
[
"MIT"
] | null | null | null |
import datetime
from django.utils import timezone
from django.test import TestCase
from django.contrib.auth.models import (
User,
)
from wasch.models import (
Appointment,
WashUser,
WashParameters,
# not models:
AppointmentError,
StatusRights,
)
from wasch import tvkutils, payment
class WashUserTestCase(TestCase):
def test_god(self):
god, _ = WashUser.objects.get_or_create_god()
self.assertTrue(god.isActivated)
self.assertTrue(god.user.is_staff)
self.assertTrue(god.user.is_superuser)
group_names = (group.name for group in god.user.groups.all())
for expected_group in StatusRights(9).groups:
self.assertIn(expected_group, group_names)
class AppointmentTestCase(TestCase):
exampleUserName = 'waschexample'
examplePoorUserName = 'poor'
exampleTime = Appointment.manager.scheduled_appointment_times()[-1]
exampleTooOldTime = timezone.make_aware(datetime.datetime(1991, 12, 25))
exampleTooOldReference = 4481037
exampleMachine, exampleBrokenMachine, lastMachine = \
tvkutils.get_or_create_machines()[0]
def setUp(self):
tvkutils.setup()
self.exampleMachine.isAvailable = True # though this is default
self.exampleMachine.save()
self.exampleBrokenMachine.isAvailable = False
self.exampleMachine.save()
WashUser.objects.create_enduser(self.exampleUserName, isActivated=True)
WashUser.objects.create_enduser(
self.examplePoorUserName, isActivated=False)
def _createExample(self):
user = User.objects.get(username=self.exampleUserName)
return Appointment.objects.create(
time=self.exampleTime, machine=self.exampleMachine, user=user,
wasUsed=False)
def test_create(self):
result = self._createExample()
self.assertEqual(result.time, self.exampleTime)
self.assertEqual(result.machine, self.exampleMachine)
self.assertEqual(result.user.username, self.exampleUserName)
self.assertTrue(Appointment.manager.appointment_exists(
result.time, result.machine))
self.assertFalse(Appointment.manager.bookable(
result.time, result.machine, result.user))
self.assertEqual(
Appointment.manager.why_not_bookable(
result.time, result.machine, result.user),
41, # Appointment taken
)
result.cancel()
self.assertTrue(Appointment.manager.bookable(
result.time, result.machine, result.user))
def test_bookable(self):
user = User.objects.get(username=self.exampleUserName)
poorUser = User.objects.get(username=self.examplePoorUserName)
god, _ = WashUser.objects.get_or_create_god()
self.assertEqual(
Appointment.manager.why_not_bookable(
self.exampleTime, self.exampleMachine, poorUser),
31, # User not active
)
self.assertTrue(Appointment.manager.bookable(
self.exampleTime, self.exampleMachine, user))
self.assertTrue(Appointment.manager.bookable(
self.exampleTime, self.exampleMachine, god.user))
self.assertEqual(
Appointment.manager.why_not_bookable(
self.exampleTooOldTime, self.exampleMachine, user),
11, # Unsupported time
)
unsavedTooOldAppointment = Appointment.from_reference(
self.exampleTooOldReference, user)
self.assertEqual(self.exampleTooOldReference, Appointment(
time=self.exampleTooOldTime, machine=self.exampleMachine,
user=user).reference)
self.assertEqual(unsavedTooOldAppointment.time, self.exampleTooOldTime)
self.assertEqual(unsavedTooOldAppointment.machine, self.exampleMachine)
self.assertEqual(
unsavedTooOldAppointment.user.username, self.exampleUserName)
self.assertEqual(
unsavedTooOldAppointment.reference, self.exampleTooOldReference)
self.assertEqual(
Appointment.manager.why_not_bookable(
self.exampleTime, self.exampleBrokenMachine, user),
21, # Machine out of service
)
def test_make_appointment(self):
user = User.objects.get(username=self.exampleUserName)
god, _ = WashUser.objects.get_or_create_god()
appointment = Appointment.manager.make_appointment(
self.exampleTime, self.exampleMachine, user)
reference = appointment.reference
self.assertEqual(
Appointment.manager.why_not_bookable(
self.exampleTime, self.exampleMachine, god.user),
41, # Appointment taken
)
with self.assertRaises(AppointmentError) as ae:
Appointment.manager.make_appointment(
self.exampleTime, self.exampleMachine, user)
self.assertEqual(ae.exception.reason, 41)
appointment.cancel()
self.assertEqual(
appointment,
Appointment.manager.filter_for_reference(reference).get())
WashParameters.objects.update_value('bonus-method', 'empty')
self.assertTrue(Appointment.manager.bookable(
self.exampleTime, self.exampleMachine, user))
with self.assertRaises(payment.PaymentError):
Appointment.manager.make_appointment(
self.exampleTime, self.exampleMachine, user)
def test_use(self):
user = User.objects.get(username=self.exampleUserName)
appointment = Appointment.manager.make_appointment(
self.exampleTime, self.exampleMachine, user)
appointment.use()
with self.assertRaises(AppointmentError) as ae:
appointment.use()
self.assertEqual(ae.exception.reason, 61) # Appointment already used
with self.assertRaises(AppointmentError) as ae:
appointment.rebook()
self.assertEqual(ae.exception.reason, 41) # Appointment taken
with self.assertRaises(AppointmentError) as ae:
appointment.cancel()
self.assertEqual(ae.exception.reason, 61) # Appointment already used
self.assertTrue(appointment.wasUsed)
| 42.060811
| 79
| 0.676948
| 591
| 6,225
| 7.040609
| 0.204738
| 0.064888
| 0.050228
| 0.071377
| 0.504206
| 0.416246
| 0.416246
| 0.355203
| 0.267243
| 0.163422
| 0
| 0.007566
| 0.235663
| 6,225
| 147
| 80
| 42.346939
| 0.866961
| 0.031165
| 0
| 0.382353
| 0
| 0
| 0.005482
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.051471
| false
| 0
| 0.044118
| 0
| 0.154412
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e672d8fb22849a3e49b4cf1505ef89fb8d62430d
| 2,018
|
py
|
Python
|
day17/module.py
|
arcadecoffee/advent-2021
|
57d24cd6ba6e2b4d7e68ea492b955b73eaad7b6a
|
[
"MIT"
] | null | null | null |
day17/module.py
|
arcadecoffee/advent-2021
|
57d24cd6ba6e2b4d7e68ea492b955b73eaad7b6a
|
[
"MIT"
] | null | null | null |
day17/module.py
|
arcadecoffee/advent-2021
|
57d24cd6ba6e2b4d7e68ea492b955b73eaad7b6a
|
[
"MIT"
] | null | null | null |
"""
Advent of Code 2021 - Day 17
https://adventofcode.com/2021/day/17
"""
import re
from math import ceil, sqrt
from typing import List, Tuple
DAY = 17
FULL_INPUT_FILE = f'../inputs/day{DAY:02d}/input.full.txt'
TEST_INPUT_FILE = f'../inputs/day{DAY:02d}/input.test.txt'
def load_data(infile_path: str) -> Tuple[int, int, int, int]:
regex = r'target area: x=(-?\d*)\.\.(-?\d*), y=(-?\d*)\.\.(-?\d*)'
with open(infile_path, 'r', encoding='ascii') as infile:
x1, x2, y1, y2 = [int(i) for i in re.match(regex, infile.readline()).groups()]
return x1, x2, y1, y2
def maximum_altitude(y: int) -> int:
return int(y * -1 * (y * -1 - 1) / 2)
def shot_good(x_velocity: int, y_velocity: int, x1: int, x2: int, y1: int, y2: int) -> bool:
x_position = y_position = 0
while x_position <= x2 and y_position >= y1:
if x_position >= x1 and y_position <= y2:
return True
x_position += x_velocity
y_position += y_velocity
x_velocity -= 1 if x_velocity > 0 else -1 if x_velocity < 0 else 0
y_velocity -= 1
return False
def count_good_shots(x1: int, x2: int, y1: int, y2: int) -> int:
x_min = ceil(sqrt(x1 * 8 + 1) / 2 - 1 / 2)
x_max = round(x2 / 2) + 1
y_min = y1
y_max = y1 * -1
arcing_good_shots = []
for x in range(x_min, x_max):
for y in range(y_min, y_max):
if shot_good(x, y, x1, x2, y1, y2):
arcing_good_shots.append((x, y))
direct_shot_count = (x2 + 1 - x1) * (y2 + 1 - y1)
return len(arcing_good_shots) + direct_shot_count
def part_1(infile_path: str) -> int:
target_area = load_data(infile_path)
return maximum_altitude(target_area[2])
def part_2(infile_path: str) -> int:
target_area = load_data(infile_path)
return count_good_shots(*target_area)
if __name__ == '__main__':
part1_answer = part_1(FULL_INPUT_FILE)
print(f'Part 1: {part1_answer}')
part2_answer = part_2(FULL_INPUT_FILE)
print(f'Part 2: {part2_answer}')
| 29.246377
| 92
| 0.617939
| 335
| 2,018
| 3.486567
| 0.268657
| 0.05137
| 0.03339
| 0.046233
| 0.239726
| 0.239726
| 0.171233
| 0.171233
| 0.085616
| 0.085616
| 0
| 0.05304
| 0.233895
| 2,018
| 68
| 93
| 29.676471
| 0.702458
| 0.03221
| 0
| 0.043478
| 0
| 0
| 0.096144
| 0.048843
| 0
| 0
| 0
| 0
| 0
| 1
| 0.130435
| false
| 0
| 0.065217
| 0.021739
| 0.347826
| 0.043478
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e67406a638efa86479227542aee6a924595e4826
| 4,235
|
py
|
Python
|
src/main/python/depysible/domain/rete.py
|
stefano-bragaglia/DePYsible
|
6b53ede459a10f5e24da89d3ebaa05f08ec7af12
|
[
"BSD-2-Clause"
] | 4
|
2018-09-24T23:51:05.000Z
|
2021-01-06T09:13:52.000Z
|
src/main/python/depysible/domain/rete.py
|
stefano-bragaglia/DefeasiblePython
|
6b53ede459a10f5e24da89d3ebaa05f08ec7af12
|
[
"BSD-2-Clause"
] | 1
|
2020-05-26T01:14:44.000Z
|
2020-05-27T07:54:15.000Z
|
src/main/python/depysible/domain/rete.py
|
stefano-bragaglia/DePYsible
|
6b53ede459a10f5e24da89d3ebaa05f08ec7af12
|
[
"BSD-2-Clause"
] | null | null | null |
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
Payload = Tuple[List['Literal'], 'Substitutions']
class Root:
def __init__(self):
self.children = set()
def notify(self, ground: 'Literal'):
for child in self.children:
child.notify(ground, {}, self)
class Alfa:
def __init__(self, pattern: 'Literal', parent: Root):
self.parent = parent
self.pattern = pattern
self.name = repr(pattern)
self.memory = []
self.children = set()
parent.children.add(self)
def notify(self, ground: 'Literal', subs: 'Substitutions', parent: Root):
subs = self.pattern.unifies(ground)
if subs is not None:
payload = ([ground], subs)
if payload not in self.memory:
self.memory.append(payload)
for child in self.children:
child.notify([ground], subs, self)
class Beta:
def __init__(self, parent_1: Union[Alfa, 'Beta'], parent_2: Alfa):
self.parent_1 = parent_1
self.parent_2 = parent_2
self.name = '%s, %s' % (parent_1.name, parent_2.name)
self.memory = []
self.children = set()
parent_1.children.add(self)
parent_2.children.add(self)
def notify(self, ground: List['Literal'], subs: 'Substitutions', parent: Union[Alfa, 'Beta']):
if parent is self.parent_1:
for ground_2, subs_2 in self.parent_2.memory:
self._notify(ground, subs, ground_2, subs_2)
elif parent is self.parent_2:
for ground_1, subs_1 in self.parent_1.memory:
self._notify(ground_1, subs_1, ground, subs)
@staticmethod
def _unifies(subs_1: 'Substitutions', subs_2: 'Substitutions') -> Optional['Substitutions']:
for var in set(subs_1).intersection(subs_2):
if subs_1[var] != subs_2[var]:
return None
return {**subs_1, **subs_2}
def _notify(self, ground_1: List['Literal'], subs_1: 'Substitutions', ground_2: List['Literal'],
subs_2: 'Substitutions'):
subs = self._unifies(subs_1, subs_2)
if subs is not None:
ground = [*ground_1, *ground_2]
payload = (ground, subs)
if payload not in self.memory:
self.memory.append(payload)
for child in self.children:
child.notify(ground, subs, self)
class Leaf:
def __init__(self, rule: 'Rule', parent: Union[Alfa, Beta], root: Root, agenda: List):
self.parent = parent
self.rule = rule
self.name = repr(rule)
self.memory = []
self.root = root
self.agenda = agenda
parent.children.add(self)
def notify(self, ground: List['Literal'], subs: 'Substitutions', parent: Union[Alfa, 'Beta']):
from depysible.domain.definitions import Rule
payload = (ground, subs)
if payload not in self.memory:
self.memory.append(payload)
lit = self.rule.head.substitutes(subs)
# if self.rule.type is RuleType.STRICT:
# fact = Rule(lit, self.rule.type, [])
# if fact not in self.agenda:
# self.agenda.append(fact)
rule = Rule(lit, self.rule.type, ground)
if rule not in self.agenda:
self.agenda.append(rule)
self.root.notify(lit)
def fire_rules(program: 'Program') -> List['Rule']:
if program.is_ground():
return program
rules = []
table = {}
root = Root()
for rule in program.rules:
if rule.is_fact():
rules.append(rule)
else:
beta = None
for lit in rule.body:
name = repr(lit)
alfa = table.setdefault(name, Alfa(lit, root))
if beta is None:
beta = alfa
else:
name = '%s, %s' % (beta.name, alfa.name)
beta = table.setdefault(name, Beta(beta, alfa))
Leaf(rule, beta, root, rules)
for fact in program.get_facts():
root.notify(fact.head)
return rules
| 32.083333
| 100
| 0.563872
| 515
| 4,235
| 4.514563
| 0.130097
| 0.025806
| 0.036129
| 0.04086
| 0.327742
| 0.284301
| 0.260215
| 0.233548
| 0.196989
| 0.196989
| 0
| 0.013236
| 0.322078
| 4,235
| 131
| 101
| 32.328244
| 0.796587
| 0.033766
| 0
| 0.267327
| 0
| 0
| 0.051872
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.108911
| false
| 0
| 0.049505
| 0
| 0.237624
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6751ce031099f22bcc8f169d0324a7aff0147ed
| 15,501
|
py
|
Python
|
pythonbot_1.0/GameData.py
|
jeffreyzli/pokerbot-2017
|
df2aa31d6aaf0e3162d24ae5f4c2a918ab19831f
|
[
"MIT"
] | 1
|
2017-01-18T21:25:21.000Z
|
2017-01-18T21:25:21.000Z
|
pythonbot_1.0/GameData.py
|
jeffreyzli/pokerbot-2017
|
df2aa31d6aaf0e3162d24ae5f4c2a918ab19831f
|
[
"MIT"
] | null | null | null |
pythonbot_1.0/GameData.py
|
jeffreyzli/pokerbot-2017
|
df2aa31d6aaf0e3162d24ae5f4c2a918ab19831f
|
[
"MIT"
] | 3
|
2017-02-06T04:35:02.000Z
|
2020-03-08T18:56:25.000Z
|
import HandRankings as Hand
from deuces.deuces import Card, Evaluator
class GameData:
def __init__(self, name, opponent_name, stack_size, bb):
# match stats
self.name = name
self.opponent_name = opponent_name
self.starting_stack_size = int(stack_size)
self.num_hands = 0
self.num_wins = 0
self.num_flop = 0
self.big_blind = int(bb)
# self pre-flop stats
self.pfr = 0
self.vpip = 0
self.three_bet = 0
self.fold_big_bet = 0
# opponent pre-flop stats
self.opponent_pfr = 0
self.opponent_vpip = 0
self.opponent_three_bet = 0
self.opponent_fold_pfr = 0
self.opponent_fold_three_bet = 0
# self post-flop stats
self.aggression_factor = False
self.showdown = 0
self.c_bet = 0
self.showdown_win = 0
self.double_barrel = 0
self.discarded_card = None
# opponent post-flop stats
self.opponent_c_bet = 0
self.opponent_fold_c_bet = 0
self.opponent_double_barrel = 0
# current hand stats
self.button = True
self.current_pot_size = 0
self.current_hand = []
self.current_hand_strength = 0.0
self.hand_class = ''
self.hand_score = 0
self.current_game_state = ''
self.board_cards = []
self.last_actions = []
self.current_legal_actions = []
self.has_called = False
self.opponent_has_called = False
self.has_two_bet = False
self.opponent_has_two_bet = False
self.has_three_bet = False
self.opponent_has_three_bet = False
self.has_four_bet = False
self.opponent_has_four_bet = False
self.street_dict = {'0': 0, '3': 0, '4': 0, '5': 0}
self.discard = False
self.has_five_bet = False
self.has_bet_aggressively = False
self.time_bank = 0.0
self.opc = 0
def new_hand(self, data_list):
self.num_hands += 1
self.button = data_list[2]
if "true" in self.button:
self.button = True
else:
self.button = False
self.current_hand = [data_list[3], data_list[4]]
self.current_hand_strength = Hand.hand_win_odds(self.current_hand)
self.current_game_state = 'PREFLOP'
self.board_cards = []
self.last_actions = []
self.current_legal_actions = []
self.street_dict = {'0': 0, '3': 0, '4': 0, '5': 0}
self.has_two_bet = False
self.opponent_has_two_bet = False
self.has_three_bet = False
self.opponent_has_three_bet = False
self.has_four_bet = False
self.opponent_has_four_bet = False
self.has_bet_aggressively = False
self.aggression_factor = False
self.discarded_card = None
def get_action(self, data_list):
self.current_pot_size = int(data_list[1])
self.opc = self.starting_stack_size - self.current_pot_size
self.time_bank = float(data_list[-1])
num_board_cards = int(data_list[2])
self.street_dict[str(num_board_cards)] += 1
if self.current_game_state == 'PREFLOP':
if self.street_dict['3'] > 0 and self.street_dict['4'] == 0:
self.has_two_bet = False
self.opponent_has_two_bet = False
self.has_three_bet = False
self.opponent_has_three_bet = False
self.has_four_bet = False
self.opponent_has_four_bet = False
self.has_bet_aggressively = False
self.current_game_state = 'FLOPTURN'
self.num_flop += 1
elif self.current_game_state == 'FLOPTURN':
if self.street_dict['4'] > 0 and self.street_dict['5'] == 0:
self.has_two_bet = False
self.opponent_has_two_bet = False
self.has_three_bet = False
self.opponent_has_three_bet = False
self.has_four_bet = False
self.opponent_has_four_bet = False
self.has_bet_aggressively = False
self.current_game_state = 'TURNRIVER'
elif self.current_game_state == 'TURNRIVER':
if self.street_dict['5'] > 0:
self.has_two_bet = False
self.opponent_has_two_bet = False
self.has_three_bet = False
self.opponent_has_three_bet = False
self.has_four_bet = False
self.opponent_has_four_bet = False
self.has_bet_aggressively = False
self.current_game_state = 'POSTRIVER'
for i in range(num_board_cards):
board_card = data_list[3 + i]
if board_card not in self.board_cards:
self.board_cards.append(data_list[3 + i])
if num_board_cards > 0:
board_cards = []
for board_card in self.board_cards:
board_cards.append(Card.new(board_card))
hand = []
for card in self.current_hand:
hand.append(Card.new(card))
self.hand_score = Evaluator().evaluate(hand, board_cards)
self.hand_class = Evaluator().class_to_string(Evaluator().get_rank_class(self.hand_score))
index = 3 + num_board_cards
num_last_actions = int(data_list[index])
index += 1
current_last_actions = []
for i in range(num_last_actions):
current_last_actions.append(data_list[index + i])
self.last_actions.append(current_last_actions)
if self.discard:
for action in current_last_actions:
if 'DISCARD' in action and self.name in action:
old_card = action[8:10]
new_card = action[11:13]
self.current_hand[self.current_hand.index(old_card)] = new_card
self.current_hand_strength = Hand.hand_win_odds(self.current_hand)
self.discard = False
break
if self.current_game_state == 'PREFLOP':
if self.current_pot_size == 4:
if self.button:
self.vpip += 1
self.has_called = True
else:
self.opponent_vpip += 1
self.opponent_has_called = True
else:
for action in current_last_actions:
if 'RAISE' in action:
round_num = self.street_dict['0']
if round_num == 1:
self.opponent_pfr += 1
self.opponent_vpip += 1
self.opponent_has_two_bet = True
elif round_num == 2:
if self.button:
if self.name in action:
self.pfr += 1
self.vpip += 1
self.has_two_bet = True
else:
self.opponent_pfr += 1
self.opponent_vpip += 1
self.opponent_has_three_bet = True
else:
if self.name in action:
self.pfr += 1
self.vpip += 1
self.has_three_bet = True
else:
self.opponent_pfr += 1
self.opponent_vpip += 1
self.opponent_has_four_bet = True
elif round_num == 3:
if self.name in action:
self.pfr += 1
self.vpip += 1
elif 'CALL' in action:
if self.name in action:
self.vpip += 1
else:
self.opponent_vpip += 1
elif self.current_game_state == 'FLOPTURN':
round_num = self.street_dict['3']
if round_num == 1:
self.discard = True
elif round_num == 2:
for action in current_last_actions:
if 'BET' in action:
self.opponent_c_bet += 1
break
elif round_num == 3:
for action in current_last_actions:
if 'BET' in action:
if self.name in action:
self.c_bet += 1
else:
self.opponent_c_bet += 1
elif 'RAISE' in action:
if self.name in action:
self.has_two_bet = True
else:
if self.button:
self.opponent_has_three_bet = True
else:
self.opponent_has_two_bet = True
elif round_num == 4:
for action in current_last_actions:
if 'RAISE' in action:
if self.name in action:
if self.button:
self.has_four_bet = True
else:
self.has_three_bet = True
break
elif self.current_game_state == 'TURNRIVER':
round_num = self.street_dict['4']
if round_num == 1:
self.discard = True
for action in current_last_actions:
if 'BET' in action:
if self.name in action:
self.c_bet += 1
else:
self.opponent_c_bet += 1
break
elif round_num == 2:
for action in current_last_actions:
if 'BET' in action:
self.opponent_c_bet += 1
break
elif round_num == 3:
for action in current_last_actions:
if 'BET' in action:
if self.name in action:
self.c_bet += 1
else:
self.opponent_c_bet += 1
elif 'RAISE' in action:
if self.name in action:
self.has_two_bet = True
else:
if self.button:
self.opponent_has_three_bet = True
else:
self.opponent_has_two_bet = True
elif round_num == 4:
for action in current_last_actions:
if 'RAISE' in action:
if self.name in action:
if self.button:
self.has_four_bet = True
else:
self.has_three_bet = True
break
elif self.current_game_state == 'POSTRIVER':
round_num = self.street_dict['5']
if round_num == 1:
for action in current_last_actions:
if 'BET' in action:
if self.name in action:
self.double_barrel += 1
else:
self.opponent_double_barrel += 1
break
index += num_last_actions
num_legal_actions = int(data_list[index])
index += 1
self.current_legal_actions = []
for i in range(num_legal_actions):
self.current_legal_actions.append(data_list[index + i])
def legal_action(self, action):
for legal_action in self.current_legal_actions:
if action in legal_action:
if action == 'BET' or action == 'RAISE':
index = legal_action.index(':') + 1
sub = legal_action[index:]
index = sub.index(':')
return [int(sub[:index]), int(sub[index+1:])]
if action == 'CALL':
for last_action in self.last_actions[-1]:
if 'RAISE' in last_action and self.opponent_name in last_action:
sub = last_action[last_action.index(':')+1:]
return int(sub[:sub.index(':')])
return True
return None
def hand_over(self, data_list):
num_board_cards = data_list[3]
index = 4+num_board_cards
num_last_actions = data_list[index]
current_last_actions = []
for i in range(num_last_actions):
current_last_actions.append(data_list[index+i])
if self.current_game_state == 'PREFLOP':
for action in current_last_actions:
if 'FOLD' in action and self.opponent_name in action:
if self.button:
for last_action in self.last_actions[-1]:
if 'RAISE' in last_action and self.name in last_action:
self.opponent_fold_pfr += 1
if self.has_three_bet and not self.has_four_bet:
self.opponent_fold_three_bet += 1
self.num_wins += 1
else:
for last_action in current_last_actions:
if 'RAISE' in last_action and self.name in last_action:
self.opponent_fold_pfr += 1
if self.has_three_bet and not self.has_four_bet:
self.opponent_fold_three_bet += 1
self.num_wins += 1
elif self.current_game_state == 'FLOPTURN':
for action in current_last_actions:
if self.button:
if 'FOLD' in action and self.opponent_name in action:
for last_action in self.last_actions[-1]:
if 'BET' in last_action and self.name in last_action:
self.opponent_fold_c_bet += 1
self.num_wins += 1
else:
if 'FOLD' in action and self.opponent_name in action:
for last_action in current_last_actions:
if 'BET' in last_action and self.name in last_action:
self.opponent_fold_c_bet += 1
self.num_wins += 1
elif self.current_game_state == 'POSTRIVER':
for action in current_last_actions:
if 'WIN' in action:
if self.name in action:
self.num_wins += 1
for last_action in current_last_actions:
if 'SHOW' in last_action:
self.showdown += 1
self.showdown_win += 1
break
break
| 43.298883
| 102
| 0.48055
| 1,701
| 15,501
| 4.100529
| 0.068783
| 0.098065
| 0.053333
| 0.048746
| 0.698351
| 0.629247
| 0.588387
| 0.535771
| 0.499211
| 0.493047
| 0
| 0.016226
| 0.451326
| 15,501
| 357
| 103
| 43.420168
| 0.80388
| 0.007741
| 0
| 0.657817
| 0
| 0
| 0.015936
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014749
| false
| 0
| 0.0059
| 0
| 0.035398
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e675c9e19056933d226c148a0c8e55351caf07f1
| 20,377
|
py
|
Python
|
examples/Tutorial/Example/app.py
|
DrewLazzeriKitware/trame
|
fdc73f07f17d2601e1b1d3934d2d6326a3c0281e
|
[
"BSD-3-Clause"
] | null | null | null |
examples/Tutorial/Example/app.py
|
DrewLazzeriKitware/trame
|
fdc73f07f17d2601e1b1d3934d2d6326a3c0281e
|
[
"BSD-3-Clause"
] | null | null | null |
examples/Tutorial/Example/app.py
|
DrewLazzeriKitware/trame
|
fdc73f07f17d2601e1b1d3934d2d6326a3c0281e
|
[
"BSD-3-Clause"
] | null | null | null |
import os
from trame import change, update_state
from trame.layouts import SinglePageWithDrawer
from trame.html import vtk, vuetify, widgets
from vtkmodules.vtkCommonDataModel import vtkDataObject
from vtkmodules.vtkFiltersCore import vtkContourFilter
from vtkmodules.vtkIOXML import vtkXMLUnstructuredGridReader
from vtkmodules.vtkRenderingAnnotation import vtkCubeAxesActor
from vtkmodules.vtkRenderingCore import (
vtkActor,
vtkDataSetMapper,
vtkRenderer,
vtkRenderWindow,
vtkRenderWindowInteractor,
)
# Required for interacter factory initialization
from vtkmodules.vtkInteractionStyle import vtkInteractorStyleSwitch # noqa
# Required for remote rendering factory initialization, not necessary for
# local rendering, but doesn't hurt to include it
import vtkmodules.vtkRenderingOpenGL2 # noqa
CURRENT_DIRECTORY = os.path.abspath(os.path.dirname(__file__))
# -----------------------------------------------------------------------------
# Constants
# -----------------------------------------------------------------------------
class Representation:
Points = 0
Wireframe = 1
Surface = 2
SurfaceWithEdges = 3
class LookupTable:
Rainbow = 0
Inverted_Rainbow = 1
Greyscale = 2
Inverted_Greyscale = 3
# -----------------------------------------------------------------------------
# VTK pipeline
# -----------------------------------------------------------------------------
renderer = vtkRenderer()
renderWindow = vtkRenderWindow()
renderWindow.AddRenderer(renderer)
renderWindowInteractor = vtkRenderWindowInteractor()
renderWindowInteractor.SetRenderWindow(renderWindow)
renderWindowInteractor.GetInteractorStyle().SetCurrentStyleToTrackballCamera()
# Read Data
reader = vtkXMLUnstructuredGridReader()
reader.SetFileName(os.path.join(CURRENT_DIRECTORY, "../data/disk_out_ref.vtu"))
reader.Update()
# Extract Array/Field information
dataset_arrays = []
fields = [
(reader.GetOutput().GetPointData(), vtkDataObject.FIELD_ASSOCIATION_POINTS),
(reader.GetOutput().GetCellData(), vtkDataObject.FIELD_ASSOCIATION_CELLS),
]
for field in fields:
field_arrays, association = field
for i in range(field_arrays.GetNumberOfArrays()):
array = field_arrays.GetArray(i)
array_range = array.GetRange()
dataset_arrays.append(
{
"text": array.GetName(),
"value": i,
"range": list(array_range),
"type": association,
}
)
default_array = dataset_arrays[0]
default_min, default_max = default_array.get("range")
# Mesh
mesh_mapper = vtkDataSetMapper()
mesh_mapper.SetInputConnection(reader.GetOutputPort())
mesh_actor = vtkActor()
mesh_actor.SetMapper(mesh_mapper)
renderer.AddActor(mesh_actor)
# Mesh: Setup default representation to surface
mesh_actor.GetProperty().SetRepresentationToSurface()
mesh_actor.GetProperty().SetPointSize(1)
mesh_actor.GetProperty().EdgeVisibilityOff()
# Mesh: Apply rainbow color map
mesh_lut = mesh_mapper.GetLookupTable()
mesh_lut.SetHueRange(0.666, 0.0)
mesh_lut.SetSaturationRange(1.0, 1.0)
mesh_lut.SetValueRange(1.0, 1.0)
mesh_lut.Build()
# Mesh: Color by default array
mesh_mapper.SelectColorArray(default_array.get("text"))
mesh_mapper.GetLookupTable().SetRange(default_min, default_max)
if default_array.get("type") == vtkDataObject.FIELD_ASSOCIATION_POINTS:
mesh_mapper.SetScalarModeToUsePointFieldData()
else:
mesh_mapper.SetScalarModeToUseCellFieldData()
mesh_mapper.SetScalarVisibility(True)
mesh_mapper.SetUseLookupTableScalarRange(True)
# Contour
contour = vtkContourFilter()
contour.SetInputConnection(reader.GetOutputPort())
contour_mapper = vtkDataSetMapper()
contour_mapper.SetInputConnection(contour.GetOutputPort())
contour_actor = vtkActor()
contour_actor.SetMapper(contour_mapper)
renderer.AddActor(contour_actor)
# Contour: ContourBy default array
contour_value = 0.5 * (default_max + default_min)
contour.SetInputArrayToProcess(
0, 0, 0, default_array.get("type"), default_array.get("text")
)
contour.SetValue(0, contour_value)
# Contour: Setup default representation to surface
contour_actor.GetProperty().SetRepresentationToSurface()
contour_actor.GetProperty().SetPointSize(1)
contour_actor.GetProperty().EdgeVisibilityOff()
# Contour: Apply rainbow color map
contour_lut = contour_mapper.GetLookupTable()
contour_lut.SetHueRange(0.666, 0.0)
contour_lut.SetSaturationRange(1.0, 1.0)
contour_lut.SetValueRange(1.0, 1.0)
contour_lut.Build()
# Contour: Color by default array
contour_mapper.GetLookupTable().SetRange(default_min, default_max)
contour_mapper.SelectColorArray(default_array.get("text"))
if default_array.get("type") == vtkDataObject.FIELD_ASSOCIATION_POINTS:
contour_mapper.SetScalarModeToUsePointFieldData()
else:
contour_mapper.SetScalarModeToUseCellFieldData()
contour_mapper.SetScalarVisibility(True)
contour_mapper.SetUseLookupTableScalarRange(True)
# Cube Axes
cube_axes = vtkCubeAxesActor()
renderer.AddActor(cube_axes)
# Cube Axes: Boundaries, camera, and styling
cube_axes.SetBounds(mesh_actor.GetBounds())
cube_axes.SetCamera(renderer.GetActiveCamera())
cube_axes.SetXLabelFormat("%6.1f")
cube_axes.SetYLabelFormat("%6.1f")
cube_axes.SetZLabelFormat("%6.1f")
cube_axes.SetFlyModeToOuterEdges()
renderer.ResetCamera()
# -----------------------------------------------------------------------------
# trame Views
# -----------------------------------------------------------------------------
local_view = vtk.VtkLocalView(renderWindow)
remote_view = vtk.VtkRemoteView(renderWindow, interactive_ratio=(1,))
html_view = local_view
# -----------------------------------------------------------------------------
# Callbacks
# -----------------------------------------------------------------------------
def update_view(**kwargs):
html_view.update()
# -----------------------------------------------------------------------------
# Toolbar Callbacks
# -----------------------------------------------------------------------------
@change("cube_axes_visibility")
def update_cube_axes_visibility(cube_axes_visibility, **kwargs):
cube_axes.SetVisibility(cube_axes_visibility)
update_view()
@change("local_vs_remote")
def update_local_vs_remote(local_vs_remote, **kwargs):
# Switch html_view
global html_view
if local_vs_remote:
html_view = local_view
else:
html_view = remote_view
# Update layout
layout.content.children[0].children[0] = html_view
layout.flush_content()
# Update View
update_view()
# -----------------------------------------------------------------------------
# Representation Callbacks
# -----------------------------------------------------------------------------
def update_representation(actor, mode):
property = actor.GetProperty()
if mode == Representation.Points:
property.SetRepresentationToPoints()
property.SetPointSize(5)
property.EdgeVisibilityOff()
elif mode == Representation.Wireframe:
property.SetRepresentationToWireframe()
property.SetPointSize(1)
property.EdgeVisibilityOff()
elif mode == Representation.Surface:
property.SetRepresentationToSurface()
property.SetPointSize(1)
property.EdgeVisibilityOff()
elif mode == Representation.SurfaceWithEdges:
property.SetRepresentationToSurface()
property.SetPointSize(1)
property.EdgeVisibilityOn()
@change("mesh_representation")
def update_mesh_representation(mesh_representation, **kwargs):
update_representation(mesh_actor, mesh_representation)
update_view()
@change("contour_representation")
def update_contour_representation(contour_representation, **kwargs):
update_representation(contour_actor, contour_representation)
update_view()
# -----------------------------------------------------------------------------
# ColorBy Callbacks
# -----------------------------------------------------------------------------
def color_by_array(actor, array):
_min, _max = array.get("range")
mapper = actor.GetMapper()
mapper.SelectColorArray(array.get("text"))
mapper.GetLookupTable().SetRange(_min, _max)
if array.get("type") == vtkDataObject.FIELD_ASSOCIATION_POINTS:
mesh_mapper.SetScalarModeToUsePointFieldData()
else:
mesh_mapper.SetScalarModeToUseCellFieldData()
mapper.SetScalarModeToUsePointFieldData()
mapper.SetScalarVisibility(True)
mapper.SetUseLookupTableScalarRange(True)
@change("mesh_color_array_idx")
def update_mesh_color_by_name(mesh_color_array_idx, **kwargs):
array = dataset_arrays[mesh_color_array_idx]
color_by_array(mesh_actor, array)
update_view()
@change("contour_color_array_idx")
def update_contour_color_by_name(contour_color_array_idx, **kwargs):
array = dataset_arrays[contour_color_array_idx]
color_by_array(contour_actor, array)
update_view()
# -----------------------------------------------------------------------------
# ColorMap Callbacks
# -----------------------------------------------------------------------------
def use_preset(actor, preset):
lut = actor.GetMapper().GetLookupTable()
if preset == LookupTable.Rainbow:
lut.SetHueRange(0.666, 0.0)
lut.SetSaturationRange(1.0, 1.0)
lut.SetValueRange(1.0, 1.0)
elif preset == LookupTable.Inverted_Rainbow:
lut.SetHueRange(0.0, 0.666)
lut.SetSaturationRange(1.0, 1.0)
lut.SetValueRange(1.0, 1.0)
elif preset == LookupTable.Greyscale:
lut.SetHueRange(0.0, 0.0)
lut.SetSaturationRange(0.0, 0.0)
lut.SetValueRange(0.0, 1.0)
elif preset == LookupTable.Inverted_Greyscale:
lut.SetHueRange(0.0, 0.666)
lut.SetSaturationRange(0.0, 0.0)
lut.SetValueRange(1.0, 0.0)
lut.Build()
@change("mesh_color_preset")
def update_mesh_color_preset(mesh_color_preset, **kwargs):
use_preset(mesh_actor, mesh_color_preset)
update_view()
@change("contour_color_preset")
def update_contour_color_preset(contour_color_preset, **kwargs):
use_preset(contour_actor, contour_color_preset)
update_view()
# -----------------------------------------------------------------------------
# Opacity Callbacks
# -----------------------------------------------------------------------------
@change("mesh_opacity")
def update_mesh_opacity(mesh_opacity, **kwargs):
mesh_actor.GetProperty().SetOpacity(mesh_opacity)
update_view()
@change("contour_opacity")
def update_contour_opacity(contour_opacity, **kwargs):
contour_actor.GetProperty().SetOpacity(contour_opacity)
update_view()
# -----------------------------------------------------------------------------
# Contour Callbacks
# -----------------------------------------------------------------------------
@change("contour_by_array_idx")
def update_contour_by(contour_by_array_idx, **kwargs):
array = dataset_arrays[contour_by_array_idx]
contour_min, contour_max = array.get("range")
contour_step = 0.01 * (contour_max - contour_min)
contour_value = 0.5 * (contour_max + contour_min)
contour.SetInputArrayToProcess(0, 0, 0, array.get("type"), array.get("text"))
contour.SetValue(0, contour_value)
# Update UI
update_state("contour_min", contour_min)
update_state("contour_max", contour_max)
update_state("contour_value", contour_value)
update_state("contour_step", contour_step)
# Update View
update_view()
@change("contour_value")
def update_contour_value(contour_value, **kwargs):
contour.SetValue(0, float(contour_value))
update_view()
# -----------------------------------------------------------------------------
# Pipeline Widget Callbacks
# -----------------------------------------------------------------------------
# Selection Change
def actives_change(ids):
_id = ids[0]
if _id == "1": # Mesh
update_state("active_ui", "mesh")
elif _id == "2": # Contour
update_state("active_ui", "contour")
else:
update_state("active_ui", "nothing")
# Visibility Change
def visibility_change(event):
_id = event["id"]
_visibility = event["visible"]
if _id == "1": # Mesh
mesh_actor.SetVisibility(_visibility)
elif _id == "2": # Contour
contour_actor.SetVisibility(_visibility)
update_view()
# -----------------------------------------------------------------------------
# GUI Toolbar Buttons
# -----------------------------------------------------------------------------
def standard_buttons():
vuetify.VCheckbox(
v_model=("cube_axes_visibility", True),
on_icon="mdi-cube-outline",
off_icon="mdi-cube-off-outline",
classes="mx-1",
hide_details=True,
dense=True,
)
vuetify.VCheckbox(
v_model="$vuetify.theme.dark",
on_icon="mdi-lightbulb-off-outline",
off_icon="mdi-lightbulb-outline",
classes="mx-1",
hide_details=True,
dense=True,
)
vuetify.VCheckbox(
v_model=("local_vs_remote", True),
on_icon="mdi-lan-disconnect",
off_icon="mdi-lan-connect",
classes="mx-1",
hide_details=True,
dense=True,
)
with vuetify.VBtn(icon=True, click="$refs.view.resetCamera()"):
vuetify.VIcon("mdi-crop-free")
# -----------------------------------------------------------------------------
# GUI Pipelines Widget
# -----------------------------------------------------------------------------
def pipeline_widget():
widgets.GitTree(
sources=(
"pipeline",
[
{"id": "1", "parent": "0", "visible": 1, "name": "Mesh"},
{"id": "2", "parent": "1", "visible": 1, "name": "Contour"},
],
),
actives_change=(actives_change, "[$event]"),
visibility_change=(visibility_change, "[$event]"),
)
# -----------------------------------------------------------------------------
# GUI Cards
# -----------------------------------------------------------------------------
def ui_card(title, ui_name):
with vuetify.VCard(v_show=f"active_ui == '{ui_name}'"):
vuetify.VCardTitle(
title,
classes="grey lighten-1 py-1 grey--text text--darken-3",
style="user-select: none; cursor: pointer",
hide_details=True,
dense=True,
)
content = vuetify.VCardText(classes="py-2")
return content
def mesh_card():
with ui_card(title="Mesh", ui_name="mesh"):
vuetify.VSelect(
v_model=("mesh_representation", Representation.Surface),
items=(
"representations",
[
{"text": "Points", "value": 0},
{"text": "Wireframe", "value": 1},
{"text": "Surface", "value": 2},
{"text": "SurfaceWithEdges", "value": 3},
],
),
label="Representation",
hide_details=True,
dense=True,
outlined=True,
classes="pt-1",
)
with vuetify.VRow(classes="pt-2", dense=True):
with vuetify.VCol(cols="6"):
vuetify.VSelect(
label="Color by",
v_model=("mesh_color_array_idx", 0),
items=("array_list", dataset_arrays),
hide_details=True,
dense=True,
outlined=True,
classes="pt-1",
)
with vuetify.VCol(cols="6"):
vuetify.VSelect(
label="Colormap",
v_model=("mesh_color_preset", LookupTable.Rainbow),
items=(
"colormaps",
[
{"text": "Rainbow", "value": 0},
{"text": "Inv Rainbow", "value": 1},
{"text": "Greyscale", "value": 2},
{"text": "Inv Greyscale", "value": 3},
],
),
hide_details=True,
dense=True,
outlined=True,
classes="pt-1",
)
vuetify.VSlider(
v_model=("mesh_opacity", 1.0),
min=0,
max=1,
step=0.1,
label="Opacity",
classes="mt-1",
hide_details=True,
dense=True,
)
def contour_card():
with ui_card(title="Contour", ui_name="contour"):
vuetify.VSelect(
label="Contour by",
v_model=("contour_by_array_idx", 0),
items=("array_list", dataset_arrays),
hide_details=True,
dense=True,
outlined=True,
classes="pt-1",
)
vuetify.VSlider(
v_model=("contour_value", contour_value),
min=("contour_min", default_min),
max=("contour_max", default_max),
step=("contour_step", 0.01 * (default_max - default_min)),
label="Value",
classes="my-1",
hide_details=True,
dense=True,
)
vuetify.VSelect(
v_model=("contour_representation", Representation.Surface),
items=(
"representations",
[
{"text": "Points", "value": 0},
{"text": "Wireframe", "value": 1},
{"text": "Surface", "value": 2},
{"text": "SurfaceWithEdges", "value": 3},
],
),
label="Representation",
hide_details=True,
dense=True,
outlined=True,
classes="pt-1",
)
with vuetify.VRow(classes="pt-2", dense=True):
with vuetify.VCol(cols="6"):
vuetify.VSelect(
label="Color by",
v_model=("contour_color_array_idx", 0),
items=("array_list", dataset_arrays),
hide_details=True,
dense=True,
outlined=True,
classes="pt-1",
)
with vuetify.VCol(cols="6"):
vuetify.VSelect(
label="Colormap",
v_model=("contour_color_preset", LookupTable.Rainbow),
items=(
"colormaps",
[
{"text": "Rainbow", "value": 0},
{"text": "Inv Rainbow", "value": 1},
{"text": "Greyscale", "value": 2},
{"text": "Inv Greyscale", "value": 3},
],
),
hide_details=True,
dense=True,
outlined=True,
classes="pt-1",
)
vuetify.VSlider(
v_model=("contour_opacity", 1.0),
min=0,
max=1,
step=0.1,
label="Opacity",
classes="mt-1",
hide_details=True,
dense=True,
)
# -----------------------------------------------------------------------------
# GUI
# -----------------------------------------------------------------------------
layout = SinglePageWithDrawer("Viewer", on_ready=update_view)
layout.title.set_text("Viewer")
with layout.toolbar:
# toolbar components
vuetify.VSpacer()
vuetify.VDivider(vertical=True, classes="mx-2")
standard_buttons()
with layout.drawer as drawer:
# drawer components
drawer.width = 325
pipeline_widget()
vuetify.VDivider(classes="mb-2")
mesh_card()
contour_card()
with layout.content:
# content components
vuetify.VContainer(
fluid=True,
classes="pa-0 fill-height",
children=[html_view],
)
# State use to track active ui card
layout.state = {
"active_ui": None,
}
# -----------------------------------------------------------------------------
# Main
# -----------------------------------------------------------------------------
if __name__ == "__main__":
layout.start()
| 31.739875
| 81
| 0.55332
| 1,855
| 20,377
| 5.863073
| 0.162803
| 0.00423
| 0.019309
| 0.025745
| 0.334958
| 0.297812
| 0.250736
| 0.224255
| 0.190051
| 0.184075
| 0
| 0.01246
| 0.224125
| 20,377
| 641
| 82
| 31.789392
| 0.675459
| 0.171517
| 0
| 0.360262
| 0
| 0
| 0.097857
| 0.010952
| 0
| 0
| 0
| 0
| 0
| 1
| 0.050218
| false
| 0
| 0.024017
| 0
| 0.098253
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6773e141755afe2a0e2167251aa0bc85bd1863f
| 2,849
|
py
|
Python
|
webots_ros2_tutorials/webots_ros2_tutorials/master.py
|
AleBurzio11/webots_ros2
|
99fa4a1a9d467e4ba71eff17ddf4e82444c78938
|
[
"Apache-2.0"
] | 1
|
2021-09-09T13:11:15.000Z
|
2021-09-09T13:11:15.000Z
|
webots_ros2_tutorials/webots_ros2_tutorials/master.py
|
fmrico/webots_ros2
|
38d88e01fe174a8a00731f554f1a8646b9127bd2
|
[
"Apache-2.0"
] | 1
|
2021-07-08T08:29:26.000Z
|
2021-10-01T07:57:12.000Z
|
webots_ros2_tutorials/webots_ros2_tutorials/master.py
|
fmrico/webots_ros2
|
38d88e01fe174a8a00731f554f1a8646b9127bd2
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 1996-2021 Soft_illusion.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import rclpy
from rclpy.node import Node
from std_msgs.msg import Float64
from geometry_msgs.msg import Twist
class LineFollower(Node):
def __init__(self):
super().__init__('linefollower_cmdvel')
# Subscribe Infra Red sensors
self.subs_right_ir = self.create_subscription(
Float64, 'right_IR', self.right_infrared_callback, 1)
self.subs_left_ir = self.create_subscription(
Float64, 'left_IR', self.left_infrared_callback, 1)
self.subs_mid_ir = self.create_subscription(
Float64, 'mid_IR', self.mid_infrared_callback, 1)
# Publish cmd vel
self.pubs_cmdvel = self.create_publisher(Twist, 'cmd_vel', 1)
# vehicle parameters
self.speed = 0.2
self.angle_correction = 0.01
# Initialize parameters
self.ground_right, self.ground_mid, self.ground_left = 0, 0, 0
self.delta = 0
self.cmd = Twist()
self.stop = False
self.count = 0
self.count_threshold = 10
def lineFollowingModule(self):
# Constant velocity
self.cmd.linear.x = self.speed
# Correction parameters
self.delta = self.ground_right - self.ground_left
self.cmd.angular.z = self.angle_correction*self.delta
# Logic for stop if black line not seen .
if self.ground_right > 500 and self.ground_left > 500 and self.ground_mid > 500:
self.count += 1
else:
self.count = 0
if self.count > self.count_threshold:
self.stop = True
if self.stop:
self.cmd.linear.x = 0.0
self.cmd.angular.z = 0.0
# Publish cmd vel
self.pubs_cmdvel.publish(self.cmd)
self.stop = False
# Call backs to update sensor reading variables
def right_infrared_callback(self, msg):
self.ground_right = msg.data
self.lineFollowingModule()
def left_infrared_callback(self, msg):
self.ground_left = msg.data
def mid_infrared_callback(self, msg):
self.ground_mid = msg.data
def main(args=None):
rclpy.init(args=args)
ls = LineFollower()
rclpy.spin(ls)
ls.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
| 29.677083
| 88
| 0.657073
| 383
| 2,849
| 4.720627
| 0.375979
| 0.060841
| 0.033186
| 0.039823
| 0.191372
| 0.084624
| 0
| 0
| 0
| 0
| 0
| 0.024125
| 0.257985
| 2,849
| 95
| 89
| 29.989474
| 0.831126
| 0.274833
| 0
| 0.076923
| 0
| 0
| 0.026921
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.115385
| false
| 0
| 0.076923
| 0
| 0.211538
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e67851bbe8e0d15c96340d34374c9950c15106d4
| 13,892
|
py
|
Python
|
favorite_files.py
|
jasondavis/FavoriteFiles
|
be088259ac36383399eebe85d8d5b35e235d25b0
|
[
"MIT",
"Unlicense"
] | 1
|
2019-04-27T20:13:19.000Z
|
2019-04-27T20:13:19.000Z
|
favorite_files.py
|
jasondavis/FavoriteFiles
|
be088259ac36383399eebe85d8d5b35e235d25b0
|
[
"MIT",
"Unlicense"
] | null | null | null |
favorite_files.py
|
jasondavis/FavoriteFiles
|
be088259ac36383399eebe85d8d5b35e235d25b0
|
[
"MIT",
"Unlicense"
] | null | null | null |
'''
Favorite Files
Licensed under MIT
Copyright (c) 2012 Isaac Muse <isaacmuse@gmail.com>
'''
import sublime
import sublime_plugin
from os.path import join, exists, normpath
from favorites import Favorites
Favs = Favorites(join(sublime.packages_path(), 'User', 'favorite_files_list.json'))
class Refresh:
dummy_file = normpath(join(sublime.packages_path(), 'FavoriteFiles', 'refresh.txt'))
on = False
class CleanOrphanedFavoritesCommand(sublime_plugin.WindowCommand):
def run(self):
# Clean out all dead links
if not Favs.load(clean=True, win_id=self.window.id()):
Favs.load(force=True, clean=True, win_id=self.window.id())
class SelectFavoriteFileCommand(sublime_plugin.WindowCommand):
def open_file(self, value, group=False):
if value >= 0:
active_group = self.window.active_group()
if value < self.num_files or (group and value < self.num_files + 1):
# Open global file, file in group, or all fiels in group
names = []
if group:
if value == 0:
# Open all files in group
names = [self.files[x][1] for x in range(0, self.num_files)]
else:
# Open file in group
names.append(self.files[value - 1][1])
else:
# Open global file
names.append(self.files[value][1])
# Iterate through file list ensure they load in proper view index order
count = 0
for n in names:
if exists(n):
view = self.window.open_file(n)
if view != None:
if active_group >= 0:
self.window.set_view_index(view, active_group, count)
count += 1
else:
sublime.error_message("The following file does not exist:\n%s" % n)
else:
# Decend into group
value -= self.num_files
self.files = Favs.all_files(group_name=self.groups[value][0].replace("Group: ", "", 1))
self.num_files = len(self.files)
self.groups = []
self.num_groups = 0
# Show files in group
if self.num_files:
self.window.show_quick_panel(
["Open Group"] + self.files,
lambda x: self.open_file(x, group=True)
)
else:
sublime.error_message("No favorites found! Try adding some.")
def run(self):
if not Favs.load(win_id=self.window.id()):
self.files = Favs.all_files()
self.num_files = len(self.files)
self.groups = Favs.all_groups()
self.num_groups = len(self.groups)
if self.num_files + self.num_groups > 0:
self.window.show_quick_panel(
self.files + self.groups,
self.open_file
)
else:
sublime.error_message("No favorites found! Try adding some.")
class AddFavoriteFileCommand(sublime_plugin.WindowCommand):
def add(self, names, group_name=None):
disk_omit_count = 0
added = 0
# Iterate names and add them to group/global if not already added
for n in names:
if not Favs.exists(n, group_name=group_name):
if exists(n):
Favs.set(n, group_name=group_name)
added += 1
else:
# File does not exist on disk; cannot add
disk_omit_count += 1
if added:
# Save if files were added
Favs.save(True)
if disk_omit_count:
# Alert that files could be added
message = "1 file does not exist on disk!" if disk_omit_count == 1 else "%d file(s) do not exist on disk!" % disk_omit_count
sublime.error_message(message)
def create_group(self, value):
repeat = False
if value == "":
# Require an actual name
sublime.error_message("Please provide a valid group name.")
repeat = True
elif Favs.exists(value, group=True):
# Do not allow duplicates
sublime.error_message("Group \"%s\" already exists.")
repeat = True
else:
# Add group
Favs.add_group(value)
self.add(self.name, value)
if repeat:
# Ask again if name was not sufficient
v = self.window.show_input_panel(
"Create Group: ",
"New Group",
self.create_group,
None,
None
)
v.run_command("select_all")
def select_group(self, value, replace=False):
if value >= 0:
group_name = self.groups[value][0].replace("Group: ", "", 1)
if replace:
# Start with empty group for "Replace Group" selection
Favs.add_group(group_name)
# Add favorites
self.add(self.name, group_name)
def show_groups(self, replace=False):
# Show availabe groups
self.groups = Favs.all_groups()
self.window.show_quick_panel(
self.groups,
lambda x: self.select_group(x, replace=replace)
)
def group_answer(self, value):
if value >= 0:
if value == 0:
# No group; add file to favorites
self.add(self.name)
elif value == 1:
# Request new group name
v = self.window.show_input_panel(
"Create Group: ",
"New Group",
self.create_group,
None,
None
)
v.run_command("select_all")
elif value == 2:
# "Add to Group"
self.show_groups()
elif value == 3:
# "Replace Group"
self.show_groups(replace=True)
def group_prompt(self):
# Default options
self.group = ["No Group", "Create Group"]
if Favs.group_count() > 0:
# Options if groups already exit
self.group += ["Add to Group", "Replace Group"]
# Present group options
self.window.show_quick_panel(
self.group,
self.group_answer
)
def file_answer(self, value):
if value >= 0:
view = self.window.active_view()
if view != None:
if value == 0:
# Single file
name = view.file_name()
if name != None:
self.name.append(name)
self.group_prompt()
if value == 1:
# All files in window
views = self.window.views()
if len(views) > 0:
for v in views:
name = v.file_name()
if name != None:
self.name.append(name)
if len(self.name) > 0:
self.group_prompt()
if value == 2:
# All files in layout group
group, idx = self.window.get_view_index(view)
views = self.window.views_in_group(group)
if len(views) > 0:
for v in views:
name = v.file_name()
if name != None:
self.name.append(name)
if len(self.name) > 0:
self.group_prompt()
def file_prompt(self, view_code):
# Add current active file
options = ["Add Current File to Favorites"]
if view_code > 0:
# Add all files in window
options.append("Add All Files to Favorites")
if view_code > 1:
# Add all files in layout group
options.append("Add All Files to in Active Group to Favorites")
# Preset file options
self.window.show_quick_panel(
options,
self.file_answer
)
def run(self):
view = self.window.active_view()
self.name = []
if view != None:
view_code = 0
views = self.window.views()
# If there is more than one view open allow saving all views
# TODO: Widget views probably show up here too, maybe look into exclduing them
if len(views) > 1:
view_code = 1
# See if there is more than one group; if so allow saving of a specific group
if self.window.num_groups() > 1:
group, idx = self.window.get_view_index(view)
group_views = self.window.views_in_group(group)
if len(group_views) > 1:
view_code = 2
self.file_prompt(view_code)
else:
# Only single file open, proceed without file options
name = view.file_name()
if name != None:
self.name.append(name)
self.group_prompt()
class RemoveFavoriteFileCommand(sublime_plugin.WindowCommand):
def remove(self, value, group=False, group_name=None):
if value >= 0:
# Remove file from global, file from group list, or entire group
if value < self.num_files or (group and value < self.num_files + 1):
name = None
if group:
if group_name == None:
return
if value == 0:
# Remove group
Favs.remove_group(group_name)
Favs.save(True)
return
else:
# Remove group file
name = self.files[value - 1][1]
else:
# Remove global file
name = self.files[value][1]
# Remove file and save
Favs.remove(name, group_name=group_name)
Favs.save(True)
else:
# Decend into group
value -= self.num_files
group_name = self.groups[value][0].replace("Group: ", "", 1)
self.files = Favs.all_files(group_name=group_name)
self.num_files = len(self.files)
self.groups = []
self.num_groups = 0
# Show group files
if self.num_files:
self.window.show_quick_panel(
["Remove Group"] + self.files,
lambda x: self.remove(x, group=True, group_name=group_name)
)
else:
sublime.error_message("No favorites found! Try adding some.")
def run(self):
if not Favs.load(win_id=self.window.id()):
# Present both files and groups for removal
self.files = Favs.all_files()
self.num_files = len(self.files)
self.groups = Favs.all_groups()
self.num_groups = len(self.groups)
# Show panel
if self.num_files + self.num_groups > 0:
self.window.show_quick_panel(
self.files + self.groups,
self.remove
)
else:
sublime.error_message("No favorites to remove!")
class FavoritesForceRefreshListenerCommand(sublime_plugin.EventListener):
def on_post_save(self, view):
if Refresh.on:
path = view.file_name()
if path != None:
if normpath(view.file_name()) == Refresh.dummy_file:
# Close refresh file if more than one view is open
if len(view.window().views()) > 1:
sublime.set_timeout(lambda: sublime.active_window().run_command("close_file"), 100)
# Attempt toggle again
sublime.set_timeout(lambda: sublime.active_window().run_command("toggle_per_project_favorites"), 1000)
class TogglePerProjectFavoritesCommand(sublime_plugin.WindowCommand):
def save(self, view):
if Refresh.on:
path = view.file_name()
if path != None:
if normpath(view.file_name()) == Refresh.dummy_file:
view.run_command('save')
def run(self):
refresh = True
win_id = self.window.id()
if Refresh.on:
Refresh.on = False
refresh = False
# Try and toggle back to global first
if not Favs.toggle_global(win_id):
return
# Try and toggle per project
if refresh:
view = self.window.open_file(Refresh.dummy_file)
if view != None:
Refresh.on = True
self.window.focus_view(view)
sublime.set_timeout(lambda: self.save(view), 100)
else:
sublime.error_message('Could not find a project file!')
else:
if Favs.toggle_per_projects(win_id):
sublime.error_message('Could not find a project file!')
else:
Favs.open(win_id=self.window.id())
def is_enabled(self):
return sublime.load_settings("favorite_files.sublime-settings").get("enable_per_projects", False)
| 37.444744
| 136
| 0.500864
| 1,532
| 13,892
| 4.413185
| 0.140339
| 0.042893
| 0.026623
| 0.019672
| 0.450081
| 0.391066
| 0.315338
| 0.301435
| 0.281467
| 0.239018
| 0
| 0.008734
| 0.414843
| 13,892
| 370
| 137
| 37.545946
| 0.822979
| 0.115678
| 0
| 0.512727
| 0
| 0
| 0.061008
| 0.006788
| 0
| 0
| 0
| 0.002703
| 0
| 1
| 0.065455
| false
| 0
| 0.014545
| 0.003636
| 0.127273
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e679989ea74254d7fd372bced3748665b5351845
| 4,361
|
py
|
Python
|
sc2clanman/views.py
|
paskausks/sc2cm
|
9c80e581933531496333d4a54c40174d4fb583a5
|
[
"MIT"
] | null | null | null |
sc2clanman/views.py
|
paskausks/sc2cm
|
9c80e581933531496333d4a54c40174d4fb583a5
|
[
"MIT"
] | null | null | null |
sc2clanman/views.py
|
paskausks/sc2cm
|
9c80e581933531496333d4a54c40174d4fb583a5
|
[
"MIT"
] | null | null | null |
#!/bin/env python3
from collections import Counter
from django.conf import settings
from django.contrib.auth.decorators import login_required, permission_required
from django.db import models as dm
from django.shortcuts import get_object_or_404, render
from django.views.generic.list import BaseListView
from django.views.generic import TemplateView
from django.utils.decorators import method_decorator
from . import models, apps, sc2, mixins
class BaseView(TemplateView):
"""
A TemplateView subclass which adds the Opts object to context.
"""
current_model = 'clanmember'
def get_context_data(self, **kwargs):
ctx = super(BaseView, self).get_context_data(**kwargs)
# Get links so we can display links to admin.
class Opts(object):
app_label = 'sc2clanman'
model_name = self.current_model
ctx['opts'] = Opts()
ctx['is_authorized'] = self.request.user.is_superuser or self.request.user.is_staff
return ctx
class AuthenticatedView(BaseView):
"""
BaseView subclass with the login required decorator applied.
"""
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(AuthenticatedView, self).dispatch(*args, **kwargs)
class ListView(BaseListView, BaseView):
"""
Combines BaseView with capability to show a paginated object list
"""
pass
class MemberView(ListView):
""" Show the clanmembers in a list ordered by ladder score"""
template_name = 'sc2clanman/members.html'
# No ordering since it's done by the front-end
queryset = models.ClanMember.clanmembers.all()
def get_context_data(self, **kwargs):
ctx = super(MemberView, self).get_context_data(**kwargs)
ctx['last_member_update'] = models.SyncLog.objects.filter(
action=models.SyncLog.CLAN_MEMBER_SYNC,
success=True,
).order_by('-time')[0].time
ctx['last_detail_update'] = models.SyncLog.objects.filter(
action=models.SyncLog.CLAN_MEMBER_DETAIL_SYNC,
success=True
).order_by('-time')[0].time
# Calculate quick stats
# Game stats - aggregate and sum wins and losses
gp = self.queryset.aggregate(dm.Sum('wins'), dm.Sum('losses'))
ctx['total_games_played'] = gp['wins__sum'] + gp['losses__sum']
# Annotate games played and winrate for each member
games_played = self.queryset.annotate(
games_played=dm.F('wins') + dm.F('losses')
).order_by('games_played')
ctx['least_games_played'] = games_played.filter(games_played__gt=0).first()
ctx['most_games_played'] = games_played.order_by('-games_played').first()
# Last game date
ctx['least_passionate'] = self.queryset.order_by('last_game').first()
# Most prominent league, country and race
league_breakdown = Counter(
self.queryset.exclude(score=models.ClanMember.SCORE_UNRANKED).values_list('league', flat=True)
).most_common()
ctx['league_breakdown'] = (
(sc2.League(l[0]), l[1]) for l in league_breakdown
)
ctx['country_breakdown'] = Counter(
self.queryset.exclude(country='').values_list('country', flat=True)
).most_common()
race_breakdown = Counter(
self.queryset.exclude(score=models.ClanMember.SCORE_UNRANKED).values_list('race', flat=True)
).most_common(4)
ctx['race_breakdown'] = (
(sc2.Race(r[0]), r[1]) for r in race_breakdown
)
ctx['version'] = apps.ClanManConfig.version_id
return ctx
class ClanWarView(BaseView):
template_name = 'sc2clanman/cw.html'
current_model = 'clanwar'
def get_context_data(self, **kwargs):
ctx = super(ClanWarView, self).get_context_data(**kwargs)
ctx['clanwars'] = models.ClanWar.objects.all()
return ctx
class ClanWarDetailView(BaseView):
template_name = 'sc2clanman/cwdetail.html'
current_model = 'clanwar'
def get_context_data(self, **kwargs):
ctx = super(ClanWarDetailView, self).get_context_data(**kwargs)
ctx['cw'] = get_object_or_404(models.ClanWar, id=kwargs.get('cw_id'))
ctx['clan_tag'] = settings.SC2_CLANMANAGER_CLAN_TAG
return ctx
| 33.037879
| 110
| 0.661546
| 532
| 4,361
| 5.244361
| 0.315789
| 0.043369
| 0.040143
| 0.024373
| 0.239427
| 0.21828
| 0.189247
| 0.189247
| 0.141935
| 0.141935
| 0
| 0.006807
| 0.225178
| 4,361
| 131
| 111
| 33.290076
| 0.818881
| 0.120385
| 0
| 0.179487
| 0
| 0
| 0.105639
| 0.012444
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064103
| false
| 0.025641
| 0.115385
| 0.012821
| 0.423077
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e67c1789de35ce33eb29e291ba0e431b4c1c574b
| 4,002
|
py
|
Python
|
tacker/api/v1/resource.py
|
mail2nsrajesh/tacker
|
dce6690659836c2885f1cf8227c19be234f8fe25
|
[
"Apache-2.0"
] | null | null | null |
tacker/api/v1/resource.py
|
mail2nsrajesh/tacker
|
dce6690659836c2885f1cf8227c19be234f8fe25
|
[
"Apache-2.0"
] | null | null | null |
tacker/api/v1/resource.py
|
mail2nsrajesh/tacker
|
dce6690659836c2885f1cf8227c19be234f8fe25
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility methods for working with WSGI servers redux
"""
from oslo_log import log as logging
import webob.dec
from tacker.api import api_common
from tacker import wsgi
LOG = logging.getLogger(__name__)
class Request(wsgi.Request):
pass
def Resource(controller, faults=None, deserializers=None, serializers=None):
"""API entity resource.
Represents an API entity resource and the associated serialization and
deserialization logic
"""
default_deserializers = {'application/json': wsgi.JSONDeserializer()}
default_serializers = {'application/json': wsgi.JSONDictSerializer()}
format_types = {'json': 'application/json'}
action_status = dict(create=201, delete=204)
default_deserializers.update(deserializers or {})
default_serializers.update(serializers or {})
deserializers = default_deserializers
serializers = default_serializers
faults = faults or {}
@webob.dec.wsgify(RequestClass=Request)
def resource(request):
route_args = request.environ.get('wsgiorg.routing_args')
if route_args:
args = route_args[1].copy()
else:
args = {}
# NOTE(jkoelker) by now the controller is already found, remove
# it from the args if it is in the matchdict
args.pop('controller', None)
fmt = args.pop('format', None)
action = args.pop('action', None)
content_type = format_types.get(fmt,
request.best_match_content_type())
language = request.best_match_language()
deserializer = deserializers.get(content_type)
serializer = serializers.get(content_type)
try:
if request.body:
args['body'] = deserializer.deserialize(request.body)['body']
method = getattr(controller, action)
result = method(request=request, **args)
except Exception as e:
mapped_exc = api_common.convert_exception_to_http_exc(e, faults,
language)
if hasattr(mapped_exc, 'code') and 400 <= mapped_exc.code < 500:
LOG.info(_('%(action)s failed (client error): %(exc)s'),
{'action': action, 'exc': mapped_exc})
else:
LOG.exception(
_('%(action)s failed: %(details)s'),
{
'action': action,
'details': extract_exc_details(e),
}
)
raise mapped_exc
status = action_status.get(action, 200)
body = serializer.serialize(result)
# NOTE(jkoelker) Comply with RFC2616 section 9.7
if status == 204:
content_type = ''
body = None
return webob.Response(request=request, status=status,
content_type=content_type,
body=body)
return resource
_NO_ARGS_MARKER = object()
def extract_exc_details(e):
for attr in ('_error_context_msg', '_error_context_args'):
if not hasattr(e, attr):
return _('No details.')
details = e._error_context_msg
args = e._error_context_args
if args is _NO_ARGS_MARKER:
return details
return details % args
| 33.630252
| 78
| 0.613193
| 449
| 4,002
| 5.318486
| 0.400891
| 0.032245
| 0.010888
| 0.0134
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011761
| 0.298851
| 4,002
| 118
| 79
| 33.915254
| 0.839273
| 0.235382
| 0
| 0.028169
| 0
| 0
| 0.081842
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.042254
| false
| 0.014085
| 0.056338
| 0
| 0.183099
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e67c30a42d5e25d4e6e974aeebd81a4f702b3cd2
| 5,417
|
py
|
Python
|
akinator/utils.py
|
GitHubEmploy/akinator.py
|
67c688b0332f4caa72bacc8fbc8f95abfe2290c9
|
[
"MIT"
] | null | null | null |
akinator/utils.py
|
GitHubEmploy/akinator.py
|
67c688b0332f4caa72bacc8fbc8f95abfe2290c9
|
[
"MIT"
] | null | null | null |
akinator/utils.py
|
GitHubEmploy/akinator.py
|
67c688b0332f4caa72bacc8fbc8f95abfe2290c9
|
[
"MIT"
] | null | null | null |
"""
MIT License
Copyright (c) 2019 NinjaSnail1080
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from .exceptions import InvalidAnswerError, InvalidLanguageError, AkiConnectionFailure, AkiTimedOut, AkiNoQuestions, AkiServerDown, AkiTechnicalError
import re
import json
def ans_to_id(ans):
"""Convert an input answer string into an Answer ID for Akinator"""
ans = str(ans).lower()
if ans == "yes" or ans == "y" or ans == "0":
return "0"
elif ans == "no" or ans == "n" or ans == "1":
return "1"
elif ans == "i" or ans == "idk" or ans == "i dont know" or ans == "i don't know" or ans == "2":
return "2"
elif ans == "probably" or ans == "p" or ans == "3":
return "3"
elif ans == "probably not" or ans == "pn" or ans == "4":
return "4"
else:
raise InvalidAnswerError("""
You put "{}", which is an invalid answer.
The answer must be one of these:
- "yes" OR "y" OR "0" for YES
- "no" OR "n" OR "1" for NO
- "i" OR "idk" OR "i dont know" OR "i don't know" OR "2" for I DON'T KNOW
- "probably" OR "p" OR "3" for PROBABLY
- "probably not" OR "pn" OR "4" for PROBABLY NOT
""".format(ans))
def get_lang_and_theme(lang=None):
"""Returns the language code and theme based on what is input"""
if lang is None or lang == "en" or lang == "english":
return {"lang": "en", "theme": "c"}
elif lang == "en_animals" or lang == "english_animals":
return {"lang": "en", "theme": "a"}
elif lang == "en_objects" or lang == "english_objects":
return {"lang": "en", "theme": "o"}
elif lang == "ar" or lang == "arabic":
return {"lang": "ar", "theme": "c"}
elif lang == "cn" or lang == "chinese":
return {"lang": "cn", "theme": "c"}
elif lang == "de" or lang == "german":
return {"lang": "de", "theme": "c"}
elif lang == "de_animals" or lang == "german_animals":
return {"lang": "de", "theme": "a"}
elif lang == "es" or lang == "spanish":
return {"lang": "es", "theme": "c"}
elif lang == "es_animals" or lang == "spanish_animals":
return {"lang": "es", "theme": "a"}
elif lang == "fr" or lang == "french":
return {"lang": "fr", "theme": "c"}
elif lang == "fr_animals" or lang == "french_animals":
return {"lang": "fr", "theme": "a"}
elif lang == "fr_objects" or lang == "french_objects":
return {"lang": "fr", "theme": "o"}
elif lang == "il" or lang == "hebrew":
return {"lang": "il", "theme": "c"}
elif lang == "it" or lang == "italian":
return {"lang": "it", "theme": "c"}
elif lang == "it_animals" or lang == "italian_animals":
return {"lang": "it", "theme": "a"}
elif lang == "jp" or lang == "japanese":
return {"lang": "jp", "theme": "c"}
elif lang == "jp_animals" or lang == "japanese_animals":
return {"lang": "jp", "theme": "a"}
elif lang == "kr" or lang == "korean":
return {"lang": "kr", "theme": "c"}
elif lang == "nl" or lang == "dutch":
return {"lang": "nl", "theme": "c"}
elif lang == "pl" or lang == "polish":
return {"lang": "pl", "theme": "c"}
elif lang == "pt" or lang == "portuguese":
return {"lang": "pt", "theme": "c"}
elif lang == "ru" or lang == "russian":
return {"lang": "ru", "theme": "c"}
elif lang == "tr" or lang == "turkish":
return {"lang": "tr", "theme": "c"}
else:
raise InvalidLanguageError("You put \"{}\", which is an invalid language.".format(lang))
def raise_connection_error(response):
"""Raise the proper error if the API failed to connect"""
if response == "KO - SERVER DOWN":
raise AkiServerDown("Akinator's servers are down in this region. Try again later or use a different language")
elif response == "KO - TECHNICAL ERROR":
raise AkiTechnicalError("Akinator's servers have had a technical error. Try again later or use a different language")
elif response == "KO - TIMEOUT":
raise AkiTimedOut("Your Akinator session has timed out")
elif response == "KO - ELEM LIST IS EMPTY" or response == "WARN - NO QUESTION":
raise AkiNoQuestions("\"Akinator.step\" reached 80. No more questions")
else:
raise AkiConnectionFailure("An unknown error has occured. Server response: {}".format(response))
| 44.04065
| 149
| 0.606055
| 747
| 5,417
| 4.364123
| 0.305221
| 0.044172
| 0.042945
| 0.060123
| 0.080368
| 0.044172
| 0.030675
| 0.030675
| 0.030675
| 0.030675
| 0
| 0.006101
| 0.243493
| 5,417
| 122
| 150
| 44.401639
| 0.789409
| 0.229463
| 0
| 0.035294
| 0
| 0.011765
| 0.355095
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035294
| false
| 0
| 0.035294
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|