hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e0ae7c18938f65d603fdc5ccf0fe562cc30757c2
| 40
|
py
|
Python
|
tests/__init__.py
|
fabianSorn/semantic_text_similarity
|
0caba4797b81bc3b7b5647ac128cf31a263f2aaf
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
fabianSorn/semantic_text_similarity
|
0caba4797b81bc3b7b5647ac128cf31a263f2aaf
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
fabianSorn/semantic_text_similarity
|
0caba4797b81bc3b7b5647ac128cf31a263f2aaf
|
[
"MIT"
] | null | null | null |
"""Unit test package for semtextsim."""
| 20
| 39
| 0.7
| 5
| 40
| 5.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 40
| 1
| 40
| 40
| 0.8
| 0.825
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
e0b466cb2eba7f7b4fe51e2e9ab96616ac3425d5
| 70
|
py
|
Python
|
scintillant/controllers/__init__.py
|
PaperDevil/scintillant
|
369eb4e9613e21d436af8d5cdbd07d91632766a7
|
[
"Apache-2.0"
] | null | null | null |
scintillant/controllers/__init__.py
|
PaperDevil/scintillant
|
369eb4e9613e21d436af8d5cdbd07d91632766a7
|
[
"Apache-2.0"
] | null | null | null |
scintillant/controllers/__init__.py
|
PaperDevil/scintillant
|
369eb4e9613e21d436af8d5cdbd07d91632766a7
|
[
"Apache-2.0"
] | null | null | null |
from scintillant.controllers.context_controller import ContextUpdater
| 35
| 69
| 0.914286
| 7
| 70
| 9
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.057143
| 70
| 1
| 70
| 70
| 0.954545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
e0b92158609dc05e7f0caf8674364b3b2477ec93
| 916
|
py
|
Python
|
ENIAC/api/__init__.py
|
Ahrli/fast_tools
|
144d764e4f169d3ab3753dcc6a79db9f9449de59
|
[
"Apache-2.0"
] | 1
|
2021-12-11T16:33:47.000Z
|
2021-12-11T16:33:47.000Z
|
ENIAC/api/__init__.py
|
webclinic017/fast_tools
|
144d764e4f169d3ab3753dcc6a79db9f9449de59
|
[
"Apache-2.0"
] | null | null | null |
ENIAC/api/__init__.py
|
webclinic017/fast_tools
|
144d764e4f169d3ab3753dcc6a79db9f9449de59
|
[
"Apache-2.0"
] | 3
|
2021-11-22T09:46:43.000Z
|
2022-01-28T22:33:07.000Z
|
# api/__init__.py
from sanic import Blueprint
# 蓝图
# from .eniac_bps.factors import factor
# from .eniac_bps.loopback import loop
# from .eniac_bps.backtrader import backtrader
# from .eniac_bps.btresultApi import api
# from .eniac_bps.piplineStatus import pipline
# from .eniac_bps.trading import trading
# from .eniac_bps.ai import ai
# from .eniac_bps.validation import validation
# from .eniac_bps.loop_coin import loopcoin
# from .eniac_bps.financial import financial
# # from sanic_openapi import swagger_blueprint, openapi_blueprint
#
# from .loop_statistics import loop_indicators
# from .loop_stack import loop_indicators
# iquant_eniac = Blueprint.group(factor, loop, backtrader, api, pipline, trading,ai,validation, loopcoin,financial)
# eniac = Blueprint.group(factor, loop, swagger_blueprint, openapi_blueprint, url_prefix='/')
from .eniac_bps.car_info import car
iquant_eniac = Blueprint.group(car)
| 38.166667
| 115
| 0.804585
| 126
| 916
| 5.619048
| 0.277778
| 0.139831
| 0.186441
| 0.090395
| 0.081921
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113537
| 916
| 24
| 116
| 38.166667
| 0.871921
| 0.847162
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 5
|
e0ebfe8d9a54e504ec6c64f065c0eefb1e72dc27
| 107
|
py
|
Python
|
bob/pipelines/config/distributed/local_parallel.py
|
bioidiap/bob.pipelines
|
cbefdaf3b384ee11cb26a279281f007adc2d8f19
|
[
"BSD-3-Clause"
] | 1
|
2020-10-13T19:58:44.000Z
|
2020-10-13T19:58:44.000Z
|
bob/pipelines/config/distributed/local_parallel.py
|
bioidiap/bob.pipelines
|
cbefdaf3b384ee11cb26a279281f007adc2d8f19
|
[
"BSD-3-Clause"
] | null | null | null |
bob/pipelines/config/distributed/local_parallel.py
|
bioidiap/bob.pipelines
|
cbefdaf3b384ee11cb26a279281f007adc2d8f19
|
[
"BSD-3-Clause"
] | null | null | null |
from bob.pipelines.distributed import get_local_parallel_client
dask_client = get_local_parallel_client()
| 26.75
| 63
| 0.878505
| 15
| 107
| 5.8
| 0.666667
| 0.183908
| 0.367816
| 0.505747
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074766
| 107
| 3
| 64
| 35.666667
| 0.878788
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
4618b4718858aad80c72a2029c2b1e1c265b6990
| 306
|
py
|
Python
|
reamber/dummy/__init__.py
|
Bestfast/reamberPy
|
91b76ca6adf11fbe8b7cee7c186481776a4d7aaa
|
[
"MIT"
] | null | null | null |
reamber/dummy/__init__.py
|
Bestfast/reamberPy
|
91b76ca6adf11fbe8b7cee7c186481776a4d7aaa
|
[
"MIT"
] | null | null | null |
reamber/dummy/__init__.py
|
Bestfast/reamberPy
|
91b76ca6adf11fbe8b7cee7c186481776a4d7aaa
|
[
"MIT"
] | null | null | null |
from reamber.dummy.DmBpm import DmBpm
from reamber.dummy.DmHit import DmHit
from reamber.dummy.DmHold import DmHold
from reamber.dummy.DmMap import DmMap
from reamber.dummy.DmMapMeta import DmMapMeta
from reamber.dummy.DmSv import DmSv
__all__ = ['DmBpm', 'DmHit', 'DmHold', 'DmMap', 'DmMapMeta', 'DmSv']
| 34
| 68
| 0.787582
| 43
| 306
| 5.511628
| 0.255814
| 0.278481
| 0.405063
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107843
| 306
| 8
| 69
| 38.25
| 0.868132
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.857143
| 0
| 0.857143
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
1cb66cf33c42d1635a1a15ff6b8f76d546c04d2d
| 32
|
py
|
Python
|
test/run/t60.py
|
timmartin/skulpt
|
2e3a3fbbaccc12baa29094a717ceec491a8a6750
|
[
"MIT"
] | 2,671
|
2015-01-03T08:23:25.000Z
|
2022-03-31T06:15:48.000Z
|
test/run/t60.py
|
csev/skulpt
|
9aa25b7dbf29f23ee8d3140d01a6f4353d12e66f
|
[
"MIT"
] | 972
|
2015-01-05T08:11:00.000Z
|
2022-03-29T13:47:15.000Z
|
test/run/t60.py
|
csev/skulpt
|
9aa25b7dbf29f23ee8d3140d01a6f4353d12e66f
|
[
"MIT"
] | 845
|
2015-01-03T19:53:36.000Z
|
2022-03-29T18:34:22.000Z
|
if not "?" in "xyz": print "OK"
| 16
| 31
| 0.53125
| 6
| 32
| 2.833333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.21875
| 32
| 1
| 32
| 32
| 0.68
| 0
| 0
| 0
| 0
| 0
| 0.1875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
1cfe75cf3778a5714bb9a45a95de3d68688708f8
| 201
|
py
|
Python
|
dddm/samplers/__init__.py
|
JoranAngevaare/dddm
|
3461e37984bac4d850beafecc9d1881b84fb226c
|
[
"MIT"
] | null | null | null |
dddm/samplers/__init__.py
|
JoranAngevaare/dddm
|
3461e37984bac4d850beafecc9d1881b84fb226c
|
[
"MIT"
] | 85
|
2021-09-20T12:08:53.000Z
|
2022-03-30T12:48:06.000Z
|
dddm/samplers/__init__.py
|
JoranAngevaare/dddm
|
3461e37984bac4d850beafecc9d1881b84fb226c
|
[
"MIT"
] | null | null | null |
from . import emcee
from .emcee import *
from . import nestle
from .nestle import *
from . import pymultinest
from .pymultinest import *
from .multi_detectors import *
from . import multi_detectors
| 16.75
| 30
| 0.766169
| 26
| 201
| 5.846154
| 0.269231
| 0.263158
| 0.315789
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.174129
| 201
| 11
| 31
| 18.272727
| 0.915663
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
e80ae07d8d706641a0024783b60fe1d4244fde50
| 66
|
py
|
Python
|
apollo_ad/__init__.py
|
connorcapitolo/apollo_ad
|
6a56845ae299789b84fe3235da847ab31180855e
|
[
"MIT"
] | 1
|
2021-01-29T20:47:35.000Z
|
2021-01-29T20:47:35.000Z
|
apollo_ad/__init__.py
|
connorcapitolo/apollo_ad
|
6a56845ae299789b84fe3235da847ab31180855e
|
[
"MIT"
] | null | null | null |
apollo_ad/__init__.py
|
connorcapitolo/apollo_ad
|
6a56845ae299789b84fe3235da847ab31180855e
|
[
"MIT"
] | 2
|
2021-01-29T20:47:45.000Z
|
2021-10-03T13:06:58.000Z
|
from .apollo_ad import *
from .UI import UI
from .demo import demo
| 22
| 24
| 0.772727
| 12
| 66
| 4.166667
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 66
| 3
| 25
| 22
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
e81fbbd2b23821fc0c078a6c0ab96898d6e6529d
| 4,316
|
py
|
Python
|
lemur/tests/test_users.py
|
caiges/lemur
|
376b2b80517fb7f24fa505461d11bffeafefc713
|
[
"Apache-2.0"
] | null | null | null |
lemur/tests/test_users.py
|
caiges/lemur
|
376b2b80517fb7f24fa505461d11bffeafefc713
|
[
"Apache-2.0"
] | 1
|
2022-03-29T22:05:53.000Z
|
2022-03-29T22:05:53.000Z
|
lemur/tests/test_users.py
|
TinLe/lemur
|
dfb9e3a0c8f8f1f1bd908b1fcb8596af7c65f739
|
[
"Apache-2.0"
] | null | null | null |
import json
import pytest
from lemur.tests.factories import UserFactory, RoleFactory
from lemur.users.views import * # noqa
from .vectors import VALID_ADMIN_HEADER_TOKEN, VALID_USER_HEADER_TOKEN
def test_user_input_schema(client):
from lemur.users.schemas import UserInputSchema
input_data = {
'username': 'example',
'password': '1233432',
'email': 'example@example.com'
}
data, errors = UserInputSchema().load(input_data)
assert not errors
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 200),
(VALID_ADMIN_HEADER_TOKEN, 200),
('', 401)
])
def test_user_get(client, token, status):
assert client.get(api.url_for(Users, user_id=1), headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
('', 405)
])
def test_user_post_(client, token, status):
assert client.post(api.url_for(Users, user_id=1), data={}, headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 403),
(VALID_ADMIN_HEADER_TOKEN, 400),
('', 401)
])
def test_user_put(client, token, status):
assert client.put(api.url_for(Users, user_id=1), data={}, headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
('', 405)
])
def test_user_delete(client, token, status):
assert client.delete(api.url_for(Users, user_id=1), headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
('', 405)
])
def test_user_patch(client, token, status):
assert client.patch(api.url_for(Users, user_id=1), data={}, headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 403),
(VALID_ADMIN_HEADER_TOKEN, 400),
('', 401)
])
def test_user_list_post_(client, token, status):
assert client.post(api.url_for(UsersList), data={}, headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 200),
(VALID_ADMIN_HEADER_TOKEN, 200),
('', 401)
])
def test_user_list_get(client, token, status):
assert client.get(api.url_for(UsersList), headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
('', 405)
])
def test_user_list_delete(client, token, status):
assert client.delete(api.url_for(UsersList), headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
('', 405)
])
def test_user_list_patch(client, token, status):
assert client.patch(api.url_for(UsersList), data={}, headers=token).status_code == status
def test_sensitive_filter(client):
resp = client.get(api.url_for(UsersList) + '?filter=password;a', headers=VALID_ADMIN_HEADER_TOKEN)
assert "'password' is not sortable or filterable" in resp.json['message']
def test_sensitive_sort(client):
resp = client.get(api.url_for(UsersList) + '?sortBy=password&sortDir=asc', headers=VALID_ADMIN_HEADER_TOKEN)
assert "'password' is not sortable or filterable" in resp.json['message']
def test_user_role_changes(client, session):
user = UserFactory()
role1 = RoleFactory()
role2 = RoleFactory()
session.flush()
data = {
'active': True,
'id': user.id,
'username': user.username,
'email': user.email,
'roles': [
{'id': role1.id},
{'id': role2.id},
],
}
# PUT two roles
resp = client.put(api.url_for(Users, user_id=user.id), data=json.dumps(data), headers=VALID_ADMIN_HEADER_TOKEN)
assert resp.status_code == 200
assert len(resp.json['roles']) == 2
assert set(user.roles) == {role1, role2}
# Remove one role and PUT again
del data['roles'][1]
resp = client.put(api.url_for(Users, user_id=user.id), data=json.dumps(data), headers=VALID_ADMIN_HEADER_TOKEN)
assert resp.status_code == 200
assert len(resp.json['roles']) == 1
assert set(user.roles) == {role1}
| 29.972222
| 115
| 0.690222
| 581
| 4,316
| 4.888124
| 0.156627
| 0.104577
| 0.078873
| 0.103521
| 0.773944
| 0.746831
| 0.746831
| 0.746831
| 0.717606
| 0.717606
| 0
| 0.030227
| 0.164504
| 4,316
| 143
| 116
| 30.181818
| 0.757349
| 0.011121
| 0
| 0.476636
| 0
| 0
| 0.081379
| 0.006567
| 0
| 0
| 0
| 0
| 0.168224
| 1
| 0.121495
| false
| 0.046729
| 0.056075
| 0
| 0.17757
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
e837adb2e61c10b6c61070cb958bcf04f0e67e2c
| 222
|
py
|
Python
|
src/genie/libs/parser/iosxe/tests/ShowVlanFilter/cli/equal/golden_output_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 204
|
2018-06-27T00:55:27.000Z
|
2022-03-06T21:12:18.000Z
|
src/genie/libs/parser/iosxe/tests/ShowVlanFilter/cli/equal/golden_output_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 468
|
2018-06-19T00:33:18.000Z
|
2022-03-31T23:23:35.000Z
|
src/genie/libs/parser/iosxe/tests/ShowVlanFilter/cli/equal/golden_output_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 309
|
2019-01-16T20:21:07.000Z
|
2022-03-30T12:56:41.000Z
|
expected_output = {
"vlan_id": {
"100": {"access_map_tag": "karim"},
"3": {"access_map_tag": "mordred"},
"15": {"access_map_tag": "mordred"},
"5": {"access_map_tag": "mordred"},
}
}
| 24.666667
| 44
| 0.5
| 24
| 222
| 4.208333
| 0.541667
| 0.356436
| 0.475248
| 0.564356
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.042945
| 0.265766
| 222
| 8
| 45
| 27.75
| 0.576687
| 0
| 0
| 0
| 0
| 0
| 0.432432
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
1c0a8e9a6d73b236560d63e413673b60a9ca7854
| 137
|
py
|
Python
|
desafio021.py
|
WebertiBarbosa/python
|
640a70c327c262d4e867a4b4620ca50d42398c00
|
[
"MIT"
] | null | null | null |
desafio021.py
|
WebertiBarbosa/python
|
640a70c327c262d4e867a4b4620ca50d42398c00
|
[
"MIT"
] | 1
|
2020-06-06T21:34:44.000Z
|
2020-06-06T21:44:58.000Z
|
desafio021.py
|
WebertiBarbosa/python
|
640a70c327c262d4e867a4b4620ca50d42398c00
|
[
"MIT"
] | null | null | null |
import pygame
pygame.mixer.init()
pygame.mixer.music.load('ex021.mp3')
pygame.mixer.music.play()
#pygame.event.wait()
input('Agora sim')
| 19.571429
| 36
| 0.751825
| 21
| 137
| 4.904762
| 0.666667
| 0.320388
| 0.31068
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.031008
| 0.058394
| 137
| 6
| 37
| 22.833333
| 0.767442
| 0.138686
| 0
| 0
| 0
| 0
| 0.153846
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.2
| 0
| 0.2
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
1c4adcf7e64b4d92e082b9ba0e6857c359ce8ed3
| 115
|
py
|
Python
|
ex030.py
|
Roninho514/Treinamento-Python
|
fc6ad0b64fb3dc3cfa5381f8fc53b5b3243a7ff6
|
[
"MIT"
] | null | null | null |
ex030.py
|
Roninho514/Treinamento-Python
|
fc6ad0b64fb3dc3cfa5381f8fc53b5b3243a7ff6
|
[
"MIT"
] | null | null | null |
ex030.py
|
Roninho514/Treinamento-Python
|
fc6ad0b64fb3dc3cfa5381f8fc53b5b3243a7ff6
|
[
"MIT"
] | null | null | null |
numero = int(input('Digite um número:'))
print('Esse número é par' if numero % 2 == 0 else 'Esse número é impa7r')
| 38.333333
| 73
| 0.678261
| 20
| 115
| 3.9
| 0.75
| 0.25641
| 0.282051
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.031579
| 0.173913
| 115
| 2
| 74
| 57.5
| 0.789474
| 0
| 0
| 0
| 0
| 0
| 0.469565
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
1c7939a542a02d3eb204d675ac62e0ab9ada58f9
| 275
|
py
|
Python
|
example/scan.py
|
Hong-Xiang/pitem
|
5ab08a5e085f97cde096b680953fbee4f775ed8c
|
[
"Apache-2.0"
] | null | null | null |
example/scan.py
|
Hong-Xiang/pitem
|
5ab08a5e085f97cde096b680953fbee4f775ed8c
|
[
"Apache-2.0"
] | null | null | null |
example/scan.py
|
Hong-Xiang/pitem
|
5ab08a5e085f97cde096b680953fbee4f775ed8c
|
[
"Apache-2.0"
] | null | null | null |
# This file is generated via pitem, DO NOT EDIT
import attrs
@attr.s
class ScanItem:
id: attrs.ib(type=int)
begin_position: attrs.ib(type=float)
end_position: attrs.ib(type=float)
is_denoise: attrs.ib(type=float, validator=is_in_range(0.000000, 1.000000))
| 22.916667
| 79
| 0.723636
| 46
| 275
| 4.217391
| 0.652174
| 0.14433
| 0.226804
| 0.247423
| 0.247423
| 0
| 0
| 0
| 0
| 0
| 0
| 0.060606
| 0.16
| 275
| 11
| 80
| 25
| 0.779221
| 0.163636
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.142857
| 0
| 0.857143
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
1c7bb9f1675178ac870e3a65a1d2da0a675995b9
| 43
|
py
|
Python
|
example_data/yes03.py
|
Cogmob/epr
|
7217c978afd2af87cce06db0ad1e0ab7e395ee9d
|
[
"MIT"
] | null | null | null |
example_data/yes03.py
|
Cogmob/epr
|
7217c978afd2af87cce06db0ad1e0ab7e395ee9d
|
[
"MIT"
] | null | null | null |
example_data/yes03.py
|
Cogmob/epr
|
7217c978afd2af87cce06db0ad1e0ab7e395ee9d
|
[
"MIT"
] | null | null | null |
epr( jsd kfjaksjdkfjaksjdkfjaksdjkfjaksjdf
| 21.5
| 42
| 0.906977
| 3
| 43
| 13
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.069767
| 43
| 1
| 43
| 43
| 0.975
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
98c82d8ad45244a99725605e54e230077e6e15ca
| 168
|
py
|
Python
|
street_food_api/trucks/managers.py
|
ImustAdmit/Street-food-api
|
7c232304379c558c1250d906536f367b6e890e76
|
[
"MIT"
] | 1
|
2021-01-26T04:56:05.000Z
|
2021-01-26T04:56:05.000Z
|
street_food_api/trucks/managers.py
|
ImustAdmit/Street-food-api
|
7c232304379c558c1250d906536f367b6e890e76
|
[
"MIT"
] | null | null | null |
street_food_api/trucks/managers.py
|
ImustAdmit/Street-food-api
|
7c232304379c558c1250d906536f367b6e890e76
|
[
"MIT"
] | null | null | null |
from django.db import models
class ConfirmedTruckManager(models.Manager):
def get_queryset(self):
return super().get_queryset().filter(is_confirmed=True)
| 24
| 63
| 0.755952
| 21
| 168
| 5.904762
| 0.857143
| 0.177419
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 168
| 6
| 64
| 28
| 0.861111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
98d42c4673a4e695362e57430f0ae0b23f0c30e4
| 151
|
py
|
Python
|
dashboard/overview/views.py
|
colinspear/music-dashboard
|
35a7c082a8a6dab4637152adce5e9921a2984a97
|
[
"FTL"
] | null | null | null |
dashboard/overview/views.py
|
colinspear/music-dashboard
|
35a7c082a8a6dab4637152adce5e9921a2984a97
|
[
"FTL"
] | 5
|
2020-03-24T17:21:56.000Z
|
2021-03-17T21:23:28.000Z
|
dashboard/overview/views.py
|
colinspear/music-dashboard
|
35a7c082a8a6dab4637152adce5e9921a2984a97
|
[
"FTL"
] | null | null | null |
from django.shortcuts import render
from django.http import HttpResponse
def i_exist(request):
return HttpResponse('Indeed, this view exists')
| 16.777778
| 51
| 0.781457
| 20
| 151
| 5.85
| 0.8
| 0.17094
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.152318
| 151
| 8
| 52
| 18.875
| 0.914063
| 0
| 0
| 0
| 0
| 0
| 0.161074
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
|
0
| 5
|
98dfafe821513600b628a434f1ae38d505be517f
| 1,772
|
py
|
Python
|
tests/bash/grep.py
|
Mamatu/pysh
|
1bacd6d18a1c40dfcb7ebdcc69711256bf6b9b3a
|
[
"MIT"
] | null | null | null |
tests/bash/grep.py
|
Mamatu/pysh
|
1bacd6d18a1c40dfcb7ebdcc69711256bf6b9b3a
|
[
"MIT"
] | null | null | null |
tests/bash/grep.py
|
Mamatu/pysh
|
1bacd6d18a1c40dfcb7ebdcc69711256bf6b9b3a
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
import pytest
from pysh.bash.grep import *
from pysh import core, shells
from pysh.tests import test
class GrepTests(TestCase):
def test_notmatch_exit1_1(self):
test.inShell(lambda: inText("testa").match("test").endBool("exit 0", "exit 1"), 1)
def test_notmatch_exit1_2(self):
test.inShell(lambda: inText("textlongechotwo").match("echo").endBool("exit 0", "exit 1"), 1)
def test_notmatch_exit1_3(self):
test.inShell(lambda: inText("textlongechotwo").match("echo1").endBool("exit 0", "exit 1"), 1)
def test_match_exit0_1(self):
test.inShell(lambda: inText("test").match("test").endBool("exit 0", "exit 1"), 0)
def test_match_exit0_2(self):
test.inShell(lambda: inText("echo").match("ech[a-z]").endBool("exit 0", "exit 1"), 0)
def test_match_exit0_3(self):
test.inShell(lambda: inText("coredump").match("core.*").endBool("exit 0", "exit 1"), 0)
def test_notcontain_exit1_1(self):
test.inShell(lambda: inText("tesa").contain("test").endBool("exit 0", "exit 1"), 1)
def test_notcontain_exit1_2(self):
test.inShell(lambda: inText("textlongechtwo").contain("tst").endBool("exit 0", "exit 1"), 1)
def test_notcontain_exit1_3(self):
test.inShell(lambda: inText("textlongechtwo").contain("echo1").endBool("exit 0", "exit 1"), 1)
def test_contain_exit0_1(self):
test.inShell(lambda: inText("testa").contain("test").endBool("exit 0", "exit 1"), 0)
def test_contain_exit0_2(self):
test.inShell(lambda: inText("textlongecho12").contain("ech[a-z]").endBool("exit 0", "exit 1"), 0)
def test_contain_exit0_3(self):
test.inShell(lambda: inText("coredump loop cat").contain("core").endBool("exit 0", "exit 1"), 0)
| 53.69697
| 105
| 0.672122
| 258
| 1,772
| 4.476744
| 0.174419
| 0.072727
| 0.155844
| 0.218182
| 0.800866
| 0.800866
| 0.800866
| 0.418182
| 0.334199
| 0.268398
| 0
| 0.042468
| 0.149549
| 1,772
| 32
| 106
| 55.375
| 0.723955
| 0
| 0
| 0
| 0
| 0
| 0.181716
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0.166667
| 0
| 0.6
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
98f08b2635043ba01adc2ce87b41a555cd8aadd7
| 199
|
py
|
Python
|
tests/test_dags_script.py
|
nleguillarme/inteGraph
|
65faae4b7c16977094c387f6359980a4e99f94cb
|
[
"Apache-2.0"
] | null | null | null |
tests/test_dags_script.py
|
nleguillarme/inteGraph
|
65faae4b7c16977094c387f6359980a4e99f94cb
|
[
"Apache-2.0"
] | null | null | null |
tests/test_dags_script.py
|
nleguillarme/inteGraph
|
65faae4b7c16977094c387f6359980a4e99f94cb
|
[
"Apache-2.0"
] | null | null | null |
from airflow.models.dagbag import DagBag
def test_import_dags():
dags = DagBag()
print("DAG import failures. Errors: {}".format(dags.import_errors))
assert len(dags.import_errors) == 0
| 24.875
| 71
| 0.713568
| 27
| 199
| 5.111111
| 0.592593
| 0.144928
| 0.231884
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005988
| 0.160804
| 199
| 7
| 72
| 28.428571
| 0.820359
| 0
| 0
| 0
| 0
| 0
| 0.155779
| 0
| 0
| 0
| 0
| 0
| 0.2
| 1
| 0.2
| false
| 0
| 0.8
| 0
| 1
| 0.2
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
c710ff8ddfb3eb4e9d6e8b51ad5c8baae30db38d
| 158
|
py
|
Python
|
datanator_rest_api/spec/__init__.py
|
KarrLab/datanator_rest_api
|
eeadf30703329fbc8feeb9b96db1b78b54a8ccce
|
[
"MIT"
] | null | null | null |
datanator_rest_api/spec/__init__.py
|
KarrLab/datanator_rest_api
|
eeadf30703329fbc8feeb9b96db1b78b54a8ccce
|
[
"MIT"
] | 130
|
2019-08-22T22:29:05.000Z
|
2020-12-02T15:32:23.000Z
|
datanator_rest_api/spec/__init__.py
|
KarrLab/datanator_rest_api
|
eeadf30703329fbc8feeb9b96db1b78b54a8ccce
|
[
"MIT"
] | null | null | null |
""" API init
:Author: Bilal Shaikh <bilalshaikh42@gmail.com>
:Date: 2019-08-16
:Copyright: 2019, Karr Lab
:License: MIT
"""
from .SpecUtils import SpecUtils
| 17.555556
| 47
| 0.727848
| 22
| 158
| 5.227273
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.10219
| 0.132911
| 158
| 8
| 48
| 19.75
| 0.737226
| 0.734177
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
c729517630f536cdeb70f466c58d5b7694116344
| 515
|
py
|
Python
|
common/metrics.py
|
m-bizhani/Digital-rock-image-processing
|
4d3914dcfa1f814b953e6ce7e97a198f861f8e3a
|
[
"MIT"
] | null | null | null |
common/metrics.py
|
m-bizhani/Digital-rock-image-processing
|
4d3914dcfa1f814b953e6ce7e97a198f861f8e3a
|
[
"MIT"
] | null | null | null |
common/metrics.py
|
m-bizhani/Digital-rock-image-processing
|
4d3914dcfa1f814b953e6ce7e97a198f861f8e3a
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
def PSNR(y_true, y_pred):
max_pixel = 1.0
return tf.image.psnr(y_true, y_pred, max_val =max_pixel)
def ssim(y_true, y_pred):
max_val = 1.0
return tf.image.ssim(y_true, y_pred, max_val = max_val, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
def mssim(y_true, y_pred):
max_val = 1.0
return tf.image.ssim_multiscale(
y_true, y_pred, max_val = max_val, filter_size=8,
filter_sigma=1.5, k1=0.01, k2=0.03)
| 30.294118
| 112
| 0.625243
| 97
| 515
| 3.051546
| 0.298969
| 0.141892
| 0.121622
| 0.202703
| 0.807432
| 0.756757
| 0.685811
| 0.608108
| 0.608108
| 0.608108
| 0
| 0.075718
| 0.256311
| 515
| 16
| 113
| 32.1875
| 0.697128
| 0
| 0
| 0.166667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.083333
| 0
| 0.583333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
c729878274666c8a59a63612852502334789acb8
| 330
|
py
|
Python
|
scattertext/viz/__init__.py
|
laugustyniak/scattertext
|
95d865091ccb35d1798d650e737e401707a4b5af
|
[
"Apache-2.0"
] | null | null | null |
scattertext/viz/__init__.py
|
laugustyniak/scattertext
|
95d865091ccb35d1798d650e737e401707a4b5af
|
[
"Apache-2.0"
] | null | null | null |
scattertext/viz/__init__.py
|
laugustyniak/scattertext
|
95d865091ccb35d1798d650e737e401707a4b5af
|
[
"Apache-2.0"
] | null | null | null |
from .ScatterplotStructure import ScatterplotStructure
from .BasicHTMLFromScatterplotStructure import BasicHTMLFromScatterplotStructure
from scattertext.viz.PairPlotFromScattertextSctructure import PairPlotFromScatterplotStructure
from .VizDataAdapter import VizDataAdapter
from .HTMLSemioticSquareViz import HTMLSemioticSquareViz
| 66
| 94
| 0.924242
| 22
| 330
| 13.863636
| 0.454545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.057576
| 330
| 5
| 95
| 66
| 0.980707
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
c737ccf9a9465649ccdab76b5acd96d8f58f4328
| 3,824
|
gyp
|
Python
|
src/third_party/harfbuzz/harfbuzz.gyp
|
bdon/fontview
|
6e3d9835dafe69d19b1eb185dc9acb9bd01ce708
|
[
"Apache-2.0"
] | null | null | null |
src/third_party/harfbuzz/harfbuzz.gyp
|
bdon/fontview
|
6e3d9835dafe69d19b1eb185dc9acb9bd01ce708
|
[
"Apache-2.0"
] | null | null | null |
src/third_party/harfbuzz/harfbuzz.gyp
|
bdon/fontview
|
6e3d9835dafe69d19b1eb185dc9acb9bd01ce708
|
[
"Apache-2.0"
] | 1
|
2022-01-14T10:20:29.000Z
|
2022-01-14T10:20:29.000Z
|
{
'includes': ['../../common.gypi'],
'targets': [
{
'target_name': 'harfbuzz',
'type': 'static_library',
'defines': [
'HAVE_FREETYPE',
'HAVE_FT_GET_VAR_BLEND_COORDINATES',
'HAVE_INTEL_ATOMIC_PRIMITIVES',
'HAVE_OT',
'HAVE_UCDN',
],
'sources': [
'harfbuzz/src/hb-blob.cc',
'harfbuzz/src/hb-buffer-serialize.cc',
'harfbuzz/src/hb-buffer.cc',
'harfbuzz/src/hb-common.cc',
#'harfbuzz/src/hb-coretext.cc',
#'harfbuzz/src/hb-directwrite.cc',
'harfbuzz/src/hb-face.cc',
#'harfbuzz/src/hb-fallback-shape.cc',
'harfbuzz/src/hb-font.cc',
'harfbuzz/src/hb-ft.cc',
#'harfbuzz/src/hb-glib.cc',
#'harfbuzz/src/hb-gobject-structs.cc',
#'harfbuzz/src/hb-graphite2.cc',
#'harfbuzz/src/hb-icu.cc',
'harfbuzz/src/hb-ot-font.cc',
'harfbuzz/src/hb-ot-layout.cc',
'harfbuzz/src/hb-ot-map.cc',
'harfbuzz/src/hb-ot-var.cc',
'harfbuzz/src/hb-ot-shape-complex-arabic.cc',
'harfbuzz/src/hb-ot-shape-complex-default.cc',
'harfbuzz/src/hb-ot-shape-complex-hangul.cc',
'harfbuzz/src/hb-ot-shape-complex-hebrew.cc',
'harfbuzz/src/hb-ot-shape-complex-indic-table.cc',
'harfbuzz/src/hb-ot-shape-complex-indic.cc',
'harfbuzz/src/hb-ot-shape-complex-khmer.cc',
'harfbuzz/src/hb-ot-shape-complex-myanmar.cc',
'harfbuzz/src/hb-ot-shape-complex-thai.cc',
'harfbuzz/src/hb-ot-shape-complex-tibetan.cc',
'harfbuzz/src/hb-ot-shape-complex-use-table.cc',
'harfbuzz/src/hb-ot-shape-complex-use.cc',
'harfbuzz/src/hb-ot-shape-fallback.cc',
'harfbuzz/src/hb-ot-shape-normalize.cc',
'harfbuzz/src/hb-ot-shape.cc',
'harfbuzz/src/hb-ot-tag.cc',
'harfbuzz/src/hb-set.cc',
'harfbuzz/src/hb-shape-plan.cc',
'harfbuzz/src/hb-shape.cc',
'harfbuzz/src/hb-shaper.cc',
'harfbuzz/src/hb-ucdn.cc',
#'harfbuzz/src/hb-ucdn/ucdn.c',
'harfbuzz/src/hb-unicode.cc',
#'harfbuzz/src/hb-uniscribe.cc',
'harfbuzz/src/hb-warning.cc',
'harfbuzz/src/hb-buffer-deserialize-json.rl',
'harfbuzz/src/hb-buffer-deserialize-text.rl',
'harfbuzz/src/hb-ot-shape-complex-indic-machine.rl',
'harfbuzz/src/hb-ot-shape-complex-khmer-machine.rl',
'harfbuzz/src/hb-ot-shape-complex-myanmar-machine.rl',
'harfbuzz/src/hb-ot-shape-complex-use-machine.rl',
],
'direct_dependent_settings': {
'include_dirs': [
'autoconf_generated',
'harfbuzz/src',
],
},
'include_dirs': [
'autoconf_generated',
'harfbuzz/src',
#'harfbuzz/src/hb-ucdn',
'<(INTERMEDIATE_DIR)',
],
'rules': [
{
'rule_name': 'ragel',
'extension': 'rl',
'outputs': [
'<(INTERMEDIATE_DIR)/<(RULE_INPUT_ROOT).hh'
],
'action': [
'<(PRODUCT_DIR)/ragel', '-e', '-F1',
'-o', '<@(_outputs)',
'<(RULE_INPUT_PATH)'
],
}
],
'dependencies': [
'../freetype/freetype.gyp:freetype',
'../ragel/ragel.gyp:ragel',
'../ucdn/ucdn.gyp:ucdn',
],
},
]
}
| 38.24
| 66
| 0.483002
| 404
| 3,824
| 4.502475
| 0.232673
| 0.314458
| 0.357339
| 0.346344
| 0.55635
| 0.37768
| 0.298516
| 0.137438
| 0
| 0
| 0
| 0.000804
| 0.349634
| 3,824
| 99
| 67
| 38.626263
| 0.730599
| 0.078975
| 0
| 0.159091
| 0
| 0
| 0.540718
| 0.447608
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
c77e06c71c9b8e58f994249c505faf5776f2b966
| 75
|
py
|
Python
|
batch_face/fast_alignment/__init__.py
|
mowshon/batch-face
|
fa5bb5903622dd9142af9b139a72b2b884b65cde
|
[
"MIT"
] | 4
|
2020-11-16T10:25:32.000Z
|
2021-11-25T09:41:37.000Z
|
batch_face/fast_alignment/__init__.py
|
mowshon/batch-face
|
fa5bb5903622dd9142af9b139a72b2b884b65cde
|
[
"MIT"
] | 3
|
2021-04-07T11:29:11.000Z
|
2022-02-28T11:34:09.000Z
|
batch_face/fast_alignment/__init__.py
|
mowshon/batch-face
|
fa5bb5903622dd9142af9b139a72b2b884b65cde
|
[
"MIT"
] | 5
|
2020-11-19T05:33:52.000Z
|
2021-10-15T14:32:30.000Z
|
from ._version import __version__
from .predictor import LandmarkPredictor
| 25
| 40
| 0.866667
| 8
| 75
| 7.5
| 0.625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106667
| 75
| 2
| 41
| 37.5
| 0.895522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
c7af572ab82baef1dba629f150b53f37b38c70bc
| 91
|
py
|
Python
|
pandas_ml/imbaccessors/__init__.py
|
matsavage/pandas-ml
|
794cddc8dc5d0a49fbc9734d826d9465078f376e
|
[
"BSD-3-Clause"
] | 305
|
2016-02-21T06:35:25.000Z
|
2022-03-30T11:53:31.000Z
|
pandas_ml/imbaccessors/__init__.py
|
matsavage/pandas-ml
|
794cddc8dc5d0a49fbc9734d826d9465078f376e
|
[
"BSD-3-Clause"
] | 69
|
2016-02-16T08:10:46.000Z
|
2022-03-04T14:36:12.000Z
|
pandas_ml/imbaccessors/__init__.py
|
matsavage/pandas-ml
|
794cddc8dc5d0a49fbc9734d826d9465078f376e
|
[
"BSD-3-Clause"
] | 73
|
2016-02-16T08:27:28.000Z
|
2022-03-10T06:57:51.000Z
|
#!/usr/bin/env python
from pandas_ml.imbaccessors.base import ImbalanceMethods # noqa
| 22.75
| 65
| 0.769231
| 12
| 91
| 5.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 91
| 3
| 66
| 30.333333
| 0.884615
| 0.274725
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
c7afb0a2d05fd2b544af6b88629198886b263767
| 31,853
|
py
|
Python
|
report_generation.py
|
rohit-k-das/vulnerability-management-reporter
|
0d3f2177669ff4dc72ba1a80e825e7e8de18cc86
|
[
"MIT"
] | null | null | null |
report_generation.py
|
rohit-k-das/vulnerability-management-reporter
|
0d3f2177669ff4dc72ba1a80e825e7e8de18cc86
|
[
"MIT"
] | null | null | null |
report_generation.py
|
rohit-k-das/vulnerability-management-reporter
|
0d3f2177669ff4dc72ba1a80e825e7e8de18cc86
|
[
"MIT"
] | null | null | null |
import logging
import drive
import datetime
import sheet
from typing import List, Dict, Tuple
import ConfigParser
import os
logger = logging.getLogger(__name__)
Config = ConfigParser.ConfigParser()
Config.read(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'settings.ini'))
google_user_for_service_account = Config.get('Settings', 'Google_User_For_Project')
google_team_drive = Config.get('Settings', 'Gdrive_Team_Drive')
gdrive_folder_in_team_drive = Config.get('Settings', 'GDrive_Vulnerabilty_Management_Folder')
# Creates root folder that will contain vulnerability report
def check_and_create_report_root_folders() -> Tuple[str, str]:
drive_id = drive.find_drive(google_team_drive, google_user_for_service_account)
# Check main folder in Security Engineering
root_folder_id = drive.find_folder(gdrive_folder_in_team_drive, google_user_for_service_account, drive_id)
year = datetime.datetime.now().strftime('%Y')
month_year = datetime.datetime.now().strftime('%B - %Y')
if root_folder_id is not None:
# Check year folder in main folder
sub_folder_id = drive.find_folder(year, google_user_for_service_account, drive_id, root_folder_id)
if sub_folder_id is not None:
# Check month year folder in year sub-folder
sub_sub_folder_id = drive.find_folder(month_year, google_user_for_service_account, drive_id, sub_folder_id)
if sub_sub_folder_id is None:
sub_sub_folder_id = drive.create_file(sub_folder_id, month_year, 'application/vnd.google-apps.folder', google_user_for_service_account, drive_id)
else:
sub_folder_id = drive.create_file(root_folder_id, year, 'application/vnd.google-apps.folder', google_user_for_service_account, drive_id)
sub_sub_folder_id = drive.create_file(sub_folder_id, month_year, 'application/vnd.google-apps.folder', google_user_for_service_account, drive_id)
else:
root_folder_id = drive.create_file(root_folder_id, gdrive_folder_in_team_drive, 'application/vnd.google-apps.folder', drive.google_user_for_service_account, drive_id)
sub_folder_id = drive.create_file(root_folder_id, year, 'application/vnd.google-apps.folder', google_user_for_service_account, drive_id)
sub_sub_folder_id = drive.create_file(sub_folder_id, month_year, 'application/vnd.google-apps.folder', google_user_for_service_account, drive_id)
return drive_id, sub_sub_folder_id
def check_and_create_report_prod_folders(drive_id: str, sub_sub_folder_id: str) -> Tuple[str, str, str]:
# Check Production Folder
production_folder_id = drive.find_folder('Production', google_user_for_service_account, drive_id, sub_sub_folder_id)
if production_folder_id is not None:
# Check GCP Folder
prod_gcp_folder_id = drive.find_folder('GCP', google_user_for_service_account, drive_id, production_folder_id)
if prod_gcp_folder_id is None:
prod_gcp_folder_id = drive.create_file(production_folder_id, 'GCP', 'application/vnd.google-apps.folder', google_user_for_service_account, drive_id)
# Check Windows Folder Prod
prod_windows_folder_id = drive.find_folder('Windows', google_user_for_service_account, drive_id, production_folder_id)
if prod_windows_folder_id is None:
prod_windows_folder_id = drive.create_file(production_folder_id, 'Windows', 'application/vnd.google-apps.folder', google_user_for_service_account, drive_id)
# Check Linux Folder
prod_linux_folder_id = drive.find_folder('Linux', google_user_for_service_account, drive_id, production_folder_id)
if prod_linux_folder_id is None:
prod_linux_folder_id = drive.create_file(production_folder_id, 'Linux', 'application/vnd.google-apps.folder', google_user_for_service_account, drive_id)
else:
production_folder_id = drive.create_file(sub_sub_folder_id, 'Production', 'application/vnd.google-apps.folder', google_user_for_service_account, drive_id)
prod_windows_folder_id = drive.create_file(production_folder_id, 'Windows', 'application/vnd.google-apps.folder', google_user_for_service_account, drive_id)
prod_linux_folder_id = drive.create_file(production_folder_id, 'Linux', 'application/vnd.google-apps.folder', google_user_for_service_account, drive_id)
prod_gcp_folder_id = drive.create_file(production_folder_id, 'GCP', 'application/vnd.google-apps.folder', google_user_for_service_account, drive_id)
return prod_gcp_folder_id, prod_windows_folder_id, prod_linux_folder_id
def check_and_create_report_corp_folders(drive_id: str, sub_sub_folder_id: str) -> Tuple[str, str]:
# Check Corporate Folder
corporate_folder_id = drive.find_folder('Corporate', google_user_for_service_account, drive_id, sub_sub_folder_id)
if corporate_folder_id is not None:
# Check Windows Folder Corporate
corp_windows_folder_id = drive.find_folder('Windows', google_user_for_service_account, drive_id, corporate_folder_id)
if corp_windows_folder_id is None:
corp_windows_folder_id = drive.create_file(corporate_folder_id, 'Windows', 'application/vnd.google-apps.folder', google_user_for_service_account, drive_id)
# Check Mac Folder Corporate
corp_mac_folder_id = drive.find_folder('Macs', google_user_for_service_account, drive_id, corporate_folder_id)
if corp_mac_folder_id is None:
corp_mac_folder_id = drive.create_file(corporate_folder_id, 'Macs', 'application/vnd.google-apps.folder', google_user_for_service_account, drive_id)
else:
corporate_folder_id = drive.create_file(sub_sub_folder_id, 'Corporate', 'application/vnd.google-apps.folder', google_user_for_service_account, drive_id)
corp_windows_folder_id = drive.create_file(corporate_folder_id, 'Windows', 'application/vnd.google-apps.folder', google_user_for_service_account, drive_id)
corp_mac_folder_id = drive.create_file(corporate_folder_id, 'Macs', 'application/vnd.google-apps.folder', google_user_for_service_account, drive_id)
return corp_windows_folder_id, corp_mac_folder_id
def generate_windows_package_report(report_name: str, vulnerabilities: List, folder_id: str, drive_id: str) -> None:
package_rows = []
package_rows.append('Device,Criticality,Vulnerability,KB,CVE,Info'.split(','))
package_sheet_id = drive.create_file(folder_id, 'KB_%s' % report_name, 'application/vnd.google-apps.spreadsheet', google_user_for_service_account, drive_id)
update_rows = []
update_rows.append('Device,Criticality,Vulnerability,Solution,CVE,Info'.split(','))
update_sheet_id = drive.create_file(folder_id, 'Software_Update_%s' % report_name, 'application/vnd.google-apps.spreadsheet', google_user_for_service_account, drive_id)
if package_sheet_id is not None and update_sheet_id is not None:
criticality_of_vulnerabilities = categorize_vulnerabilities_on_criticality(vulnerabilities)
for category in criticality_of_vulnerabilities:
kb_patch_vulnerabilities = []
update_patch_vulnerabilities = []
for vulnerability in criticality_of_vulnerabilities[category]:
if 'KB' in vulnerability.resolution[:3]:
kb_patch_vulnerabilities.append(vulnerability)
else:
update_patch_vulnerabilities.append(vulnerability)
kb_patch_devices = {}
'''
{
testhost.com: {
KB: []
Plugin Name: []
CVE: []
Link: []
Critcality: ''
}
}
'''
update_patch_device = {}
for vulnerability in kb_patch_vulnerabilities:
if 'centos' not in vulnerability.plugin_name.lower():
if vulnerability.dns:
if vulnerability.dns in kb_patch_devices:
kb_patch_devices[vulnerability.dns].append(vulnerability)
else:
kb_patch_devices[vulnerability.dns] = [vulnerability]
else:
if vulnerability.ip in kb_patch_devices:
kb_patch_devices[vulnerability.ip].append(vulnerability)
else:
kb_patch_devices[vulnerability.ip] = [vulnerability]
for vulnerability in update_patch_vulnerabilities:
if 'centos' not in vulnerability.plugin_name.lower():
if vulnerability.dns:
if vulnerability.dns in update_patch_device:
update_patch_device[vulnerability.dns].append(vulnerability)
else:
update_patch_device[vulnerability.dns] = [vulnerability]
else:
if vulnerability.ip in update_patch_device:
update_patch_device[vulnerability.ip].append(vulnerability)
else:
update_patch_device[vulnerability.ip] = [vulnerability]
logger.info('Found %d devices that marked as %s for KB patches' % (len(kb_patch_devices), category))
logger.info('Found %d devices that marked as %s for updates' % (len(update_patch_device), category))
for device in kb_patch_devices:
KB = []
plugins = []
cves = []
info_links = []
for vulnerability in kb_patch_devices[device]:
KB.extend(vulnerability.resolution.split(','))
plugins.append(vulnerability.plugin_name)
if vulnerability.cves:
cves.extend(vulnerability.cves)
info_links.extend(vulnerability.additional_links)
KB = '\r'.join(list(set(KB))).replace("'", '').replace('"', '')
plugins = '\r'.join(list(set(plugins))).replace("'", '').replace('"', '')
cves = ', '.join(list(set(cves))).replace("'", '').replace('"', '')
info_links = '\r'.join(list(set(info_links))).replace("'", '').replace('"', '')
package_rows.append(("%s|%s|%s|%s|%s|%s" % (device, category, plugins, KB, cves, info_links)).split('|'))
for device in update_patch_device:
solution = []
plugins = []
cves = []
info_links = []
for vulnerability in update_patch_device[device]:
solution.append(vulnerability.resolution)
plugins.append(vulnerability.plugin_name)
if vulnerability.cves:
cves.extend(vulnerability.cves)
info_links.extend(vulnerability.additional_links)
solution = '\r'.join(list(set(solution))).replace("'", '').replace('"', '')
plugins = '\r'.join(list(set(plugins))).replace("'", '').replace('"', '')
cves = ', '.join(list(set(cves))).replace("'", '').replace('"', '')
info_links = '\r'.join(list(set(info_links))).replace("'", '').replace('"', '')
update_rows.append(("%s|%s|%s|%s|%s|%s" % (device, category, plugins, solution, cves, info_links)).split('|'))
sheet.update_sheet('KB_%s' % report_name, package_rows, google_user_for_service_account, package_sheet_id)
sheet.update_sheet('Software_Update_%s' % report_name, update_rows, google_user_for_service_account, update_sheet_id)
def generate_linux_package_report(report_name: str, vulnerabilities: List, folder_id: str, drive_id: str) -> None:
rows = []
rows.append('Device,Criticality,Vulnerability,Upgrade/Update,CVE,Info'.split(','))
sheet_id = drive.create_file(folder_id, report_name, 'application/vnd.google-apps.spreadsheet',
google_user_for_service_account, drive_id)
if sheet_id is not None:
criticality_of_vulnerabilities = categorize_vulnerabilities_on_criticality(vulnerabilities)
for category in criticality_of_vulnerabilities:
devices = {}
for vulnerability in criticality_of_vulnerabilities[category]:
if vulnerability.dns:
if vulnerability.dns in devices:
devices[vulnerability.dns].append(vulnerability)
else:
devices[vulnerability.dns] = [vulnerability]
else:
if vulnerability.ip in devices:
devices[vulnerability.ip].append(vulnerability)
else:
devices[vulnerability.ip] = [vulnerability]
logger.info('Found %d devices that marked as %s for linux security updates' % (len(devices), category))
for device in devices:
packages = []
plugins = []
cves = []
info_links = []
for vulnerability in devices[device]:
packages.extend(vulnerability.resolution.split(','))
plugins.append(vulnerability.plugin_name)
if vulnerability.cves:
cves.extend(vulnerability.cves)
info_links.extend(vulnerability.additional_links)
packages = '\r'.join(list(set(packages))).replace("'", '').replace('"', '')
plugins = '\r'.join(list(set(plugins))).replace("'", '').replace('"', '')
cves = ', '.join(list(set(cves))).replace("'", '').replace('"', '')
info_links = '\r'.join(list(set(info_links))).replace("'", '').replace('"', '')
rows.append(("%s|%s|%s|%s|%s|%s" % (device, category, plugins, packages, cves, info_links)).split('|'))
sheet.update_sheet(report_name, rows, google_user_for_service_account, sheet_id)
def generate_config_report(report_name: str, vulnerabilities: List, folder_id: str, drive_id: str) -> None:
rows = []
rows.append('Device,Criticality,Vulnerability,Changes,CVE,Info'.split(','))
sheet_id = drive.create_file(folder_id, 'Change_Configuration_%s' % report_name, 'application/vnd.google-apps.spreadsheet',
google_user_for_service_account, drive_id)
if sheet_id is not None:
criticality_of_vulnerabilities = categorize_vulnerabilities_on_criticality(vulnerabilities)
for category in criticality_of_vulnerabilities:
devices = {}
for vulnerability in criticality_of_vulnerabilities[category]:
if vulnerability.dns:
if vulnerability.dns in devices:
devices[vulnerability.dns].append(vulnerability)
else:
devices[vulnerability.dns] = [vulnerability]
else:
if vulnerability.ip in devices:
devices[vulnerability.ip].append(vulnerability)
else:
devices[vulnerability.ip] = [vulnerability]
logger.info('Found %d devices that marked as %s for config updates' % (len(devices), category))
for device in devices:
changes = []
plugins = []
cves = []
info_links = []
for vulnerability in devices[device]:
changes.extend(vulnerability.resolution.split(','))
plugins.append(vulnerability.plugin_name)
if vulnerability.cves:
cves.extend(vulnerability.cves)
info_links.extend(vulnerability.additional_links)
changes = '\r'.join(list(set(changes))).replace("'", '').replace('"', '')
plugins = '\r'.join(list(set(plugins))).replace("'", '').replace('"', '')
cves = ', '.join(list(set(cves))).replace("'", '').replace('"', '')
info_links = '\r'.join(list(set(info_links))).replace("'", '').replace('"', '')
rows.append(("%s|%s|%s|%s|%s|%s" % (device, category, plugins, changes, cves, info_links)).split('|'))
sheet.update_sheet(report_name, rows, google_user_for_service_account, sheet_id)
def generate_general_report(report_name: str, vulnerabilities: List, folder_id: str, drive_id: str) -> None:
rows = []
sheet_id = drive.create_file(folder_id, report_name, 'application/vnd.google-apps.spreadsheet', google_user_for_service_account, drive_id)
if sheet_id is not None:
rows.append('Device,Criticality,Vulnerability,Solution,CVE,Info'.split(','))
criticality_of_vulnerabilities = categorize_vulnerabilities_on_criticality(vulnerabilities)
for category in criticality_of_vulnerabilities:
solutions = {}
for vulnerability in criticality_of_vulnerabilities[category]:
if vulnerability.resolution in solutions:
solutions[vulnerability.resolution].append(vulnerability)
else:
solutions[vulnerability.resolution] = [vulnerability]
for solution in solutions:
devices = []
plugins = []
cves = []
info_links = []
for vulnerability in solutions[solution]:
devices.append(vulnerability.dns or vulnerability.ip)
plugins.append(vulnerability.plugin_name)
if vulnerability.cves:
cves.extend(vulnerability.cves)
info_links.extend(vulnerability.additional_links)
plugins = '\r'.join(list(set(plugins))).replace("'", '').replace('"', '')
cves = ', '.join(list(set(cves))).replace("'", '').replace('"', '')
info_links = '\r'.join(list(set(info_links))).replace("'", '').replace('"', '')
devices = list(set(devices))
# Sheet can accept a max of 50000 characters in a single cell
if len(str(devices)) > 45000:
number_in_each_block_list = int(len(devices)/(len(str(devices))/45000))
device_list = [devices[i:number_in_each_block_list] for i in range(0, len(devices), number_in_each_block_list)]
for each_list in device_list:
devices = '\r'.join(each_list).replace("'", '').replace('"', '')
rows.append(("%s|%s|%s|%s|%s|%s" % (devices, category, plugins, solution, cves, info_links)).split('|'))
else:
devices = '\r'.join(devices).replace("'", '').replace('"', '')
rows.append(("%s|%s|%s|%s|%s|%s" % (devices, category, plugins, solution, cves, info_links)).split('|'))
sheet.update_sheet(report_name, rows, google_user_for_service_account, sheet_id)
def categorize_vulnerabilities_on_criticality(vulnerabilities: List) -> Dict[str, List]:
criticality_of_vulnerabilities = {
'High': [],
'Medium': [],
'Low': []
}
for vulnerability in vulnerabilities:
if vulnerability.actual_criticality == 'High':
criticality_of_vulnerabilities['High'].append(vulnerability)
elif vulnerability.actual_criticality == 'Medium':
criticality_of_vulnerabilities['Medium'].append(vulnerability)
elif vulnerability.actual_criticality == 'Low':
criticality_of_vulnerabilities['Low'].append(vulnerability)
else:
if vulnerability.nessus_criticiality == 'High':
criticality_of_vulnerabilities['High'].append(vulnerability)
elif vulnerability.nessus_criticiality == 'Medium':
criticality_of_vulnerabilities['Medium'].append(vulnerability)
elif vulnerability.nessus_criticiality == 'Low':
criticality_of_vulnerabilities['Low'].append(vulnerability)
return criticality_of_vulnerabilities
def divide_linux_vulnerabilities_based_on_os_version(vulnerabilities: List) -> Dict[str, List]:
os_flavors = list(set(vulnerability.os for vulnerability in vulnerabilities))
categorized_on_os = {}
for os in os_flavors:
if os:
categorized_on_os[os] = []
categorized_on_os['Linux'] = []
for vulnerability in vulnerabilities:
if vulnerability.os:
categorized_on_os[vulnerability.os].append(vulnerability)
else:
categorized_on_os['Linux'].append(vulnerability)
return categorized_on_os
def generate_prod_reports(vulnerabilities: List, gcp: bool = False) -> None:
drive.drive_access_tokens[google_user_for_service_account] = {}
access_token, expiry = drive.generate_drive_api_access_token(google_user_for_service_account)
if access_token is not None and expiry is not None:
drive.drive_access_tokens[google_user_for_service_account]['access_token'] = access_token
drive.drive_access_tokens[google_user_for_service_account]['expiry'] = expiry
drive_id, folder_id = check_and_create_report_root_folders()
if drive_id is not None and folder_id is not None:
prod_gcp_folder_id, prod_windows_folder_id, prod_linux_folder_id = check_and_create_report_prod_folders(drive_id, folder_id)
else:
logger.info('Unable to find drive id and folder id')
exit(-1)
sheet.sheet_access_tokens[google_user_for_service_account] = {}
access_token, expiry = sheet.generate_sheet_api_access_token(google_user_for_service_account)
if access_token is not None and expiry is not None:
sheet.sheet_access_tokens[google_user_for_service_account]['access_token'] = access_token
sheet.sheet_access_tokens[google_user_for_service_account]['expiry'] = expiry
server_windows_package_vulnerabilities = []
server_windows_config_vulnerabilities = []
server_linux_package_vulnerabilities = []
server_linux_config_vulnerabilities = []
logger.info('Dividing vulnerabilities into packages and configs')
for vulnerability in vulnerabilities:
if vulnerability.platform == 'Windows' and vulnerability.vulnerability_type == 'package':
server_windows_package_vulnerabilities.append(vulnerability)
elif vulnerability.platform == 'Windows' and vulnerability.vulnerability_type == 'config':
server_windows_config_vulnerabilities.append(vulnerability)
elif vulnerability.platform == 'Linux' and vulnerability.vulnerability_type == 'package':
server_linux_package_vulnerabilities.append(vulnerability)
elif vulnerability.platform == 'Linux' and vulnerability.vulnerability_type == 'config':
server_linux_config_vulnerabilities.append(vulnerability)
vulnerabilities.clear()
logger.info('Dividing Linux Vulnerabilities based on OS')
server_linux_package_vulnerabilities_based_on_os = divide_linux_vulnerabilities_based_on_os_version(server_linux_package_vulnerabilities)
server_linux_config_vulnerabilities_based_on_os = divide_linux_vulnerabilities_based_on_os_version(server_linux_config_vulnerabilities)
server_linux_package_vulnerabilities.clear()
server_linux_config_vulnerabilities.clear()
if not gcp:
# Windows Packages
logger.info('Generating reports for High, Medium & Low vulnerabilities for windows packages across dev, qa, test and prod')
if server_windows_package_vulnerabilities and prod_windows_folder_id is not None:
generate_windows_package_report('windows_package_%s' % datetime.date.today().isoformat().replace('-', ''), server_windows_package_vulnerabilities, prod_windows_folder_id, drive_id)
server_windows_package_vulnerabilities.clear()
# Windows Config
logger.info('Generating reports for High, Medium & Low vulnerabilities for windows config across dev, qa, test and prod')
if server_windows_config_vulnerabilities and prod_windows_folder_id is not None:
generate_general_report('Windows_config_%s' % datetime.date.today().isoformat().replace('-', ''), server_windows_config_vulnerabilities, prod_windows_folder_id, drive_id)
server_windows_config_vulnerabilities.clear()
# Linux Packages
logger.info('Generating reports for High, Medium & Low vulnerabilities for linux packages across dev, qa, test and prod')
for os in server_linux_package_vulnerabilities_based_on_os:
if server_linux_package_vulnerabilities_based_on_os[os] and prod_linux_folder_id is not None:
generate_linux_package_report('Linux_package_%s_%s' % (os, datetime.date.today().isoformat().replace('-', '')), server_linux_package_vulnerabilities_based_on_os[os], prod_linux_folder_id, drive_id)
server_linux_package_vulnerabilities_based_on_os.clear()
# Linux Config
logger.info('Generating reports for High, Medium & Low vulnerabilities for linux config across dev, qa, test and prod')
for os in server_linux_config_vulnerabilities_based_on_os:
if server_linux_config_vulnerabilities_based_on_os[os] and prod_linux_folder_id is not None:
generate_general_report('Linux_config_%s_%s' % (os, datetime.date.today().isoformat().replace('-', '')), server_linux_config_vulnerabilities_based_on_os[os], prod_linux_folder_id, drive_id)
server_linux_config_vulnerabilities_based_on_os.clear()
#drive.assign_permission(prod_windows_folder_id, email, recipient)
else:
# GCP Windows Packages
logger.info('Generating reports for High, Medium & Low vulnerabilities for GCP windows packages across dev, qa, test and prod')
if server_windows_package_vulnerabilities and prod_gcp_folder_id is not None:
generate_windows_package_report('windows_package_%s' % datetime.date.today().isoformat().replace('-', ''), server_windows_package_vulnerabilities, prod_gcp_folder_id, drive_id)
server_windows_package_vulnerabilities.clear()
# GCP Windows Config
logger.info('Generating reports for High, Medium & Low vulnerabilities for GCP windows config across dev, qa, test and prod')
if server_windows_config_vulnerabilities and prod_gcp_folder_id is not None:
generate_general_report('Windows_config_%s' % datetime.date.today().isoformat().replace('-', ''), server_windows_config_vulnerabilities, prod_gcp_folder_id, drive_id)
server_windows_config_vulnerabilities.clear()
# GCP Linux Packages
logger.info('Generating reports for High, Medium & Low vulnerabilities for GCP linux packages across dev, qa, test and prod')
for os in server_linux_package_vulnerabilities_based_on_os:
if server_linux_package_vulnerabilities_based_on_os[os] and prod_gcp_folder_id is not None:
generate_linux_package_report('Linux_package_%s_%s' % (os, datetime.date.today().isoformat().replace('-', '')),
server_linux_package_vulnerabilities_based_on_os[os], prod_gcp_folder_id, drive_id)
server_linux_package_vulnerabilities_based_on_os.clear()
# GCP Linux Config
logger.info('Generating reports for High, Medium & Low vulnerabilities for GCP linux config across dev, qa, test and prod')
for os in server_linux_config_vulnerabilities_based_on_os:
if server_linux_config_vulnerabilities_based_on_os[os] and prod_gcp_folder_id is not None:
generate_general_report('Linux_config_%s_%s' % (os,datetime.date.today().isoformat().replace('-', '')),
server_linux_config_vulnerabilities_based_on_os[os], prod_gcp_folder_id, drive_id)
server_linux_config_vulnerabilities_based_on_os.clear()
def generate_corp_reports(vulnerabilities: List) -> None:
drive.drive_access_tokens[google_user_for_service_account] = {}
access_token, expiry = drive.generate_drive_api_access_token(google_user_for_service_account)
if access_token is not None and expiry is not None:
drive.drive_access_tokens[google_user_for_service_account]['access_token'] = access_token
drive.drive_access_tokens[google_user_for_service_account]['expiry'] = expiry
drive_id, folder_id = check_and_create_report_root_folders()
if drive_id is not None and folder_id is not None:
corp_windows_folder_id, corp_mac_folder_id = check_and_create_report_corp_folders(drive_id, folder_id)
else:
logger.info('Unable to find drive id and folder id')
exit(-1)
sheet.sheet_access_tokens[google_user_for_service_account] = {}
access_token, expiry = sheet.generate_sheet_api_access_token(google_user_for_service_account)
if access_token is not None and expiry is not None:
sheet.sheet_access_tokens[google_user_for_service_account]['access_token'] = access_token
sheet.sheet_access_tokens[google_user_for_service_account]['expiry'] = expiry
windows_package_vulnerabilities = []
windows_config_vulnerabilities = []
mac_package_vulnerabilities = []
mac_config_vulnerabilities = []
logger.info('Dividing vulnerabilities into dev, qa, test and prod')
for vulnerability in vulnerabilities:
if vulnerability.platform == 'Windows' and vulnerability.vulnerability_type == 'package':
windows_package_vulnerabilities.append(vulnerability)
elif vulnerability.platform == 'Windows' and vulnerability.vulnerability_type == 'config':
windows_config_vulnerabilities.append(vulnerability)
if vulnerability.platform == 'Mac' and vulnerability.vulnerability_type == 'package':
mac_package_vulnerabilities.append(vulnerability)
elif vulnerability.platform == 'Mac' and vulnerability.vulnerability_type == 'config':
mac_config_vulnerabilities.append(vulnerability)
vulnerabilities.clear()
# Windows Packages
logger.info('Generating reports for High, Medium & Low vulnerabilities for Corp windows packages corp environment')
if windows_package_vulnerabilities and corp_windows_folder_id is not None:
generate_windows_package_report('windows_package_%s' % datetime.date.today().isoformat().replace('-', ''), windows_package_vulnerabilities, corp_windows_folder_id, drive_id)
windows_package_vulnerabilities.clear()
# Windows Config
logger.info('Generating reports for High, Medium & Low vulnerabilities for Corp windows config corp environment')
if windows_config_vulnerabilities and corp_windows_folder_id is not None:
generate_general_report('Windows_config_%s' % datetime.date.today().isoformat().replace('-', ''), windows_config_vulnerabilities, corp_windows_folder_id, drive_id)
windows_config_vulnerabilities.clear()
# Mac Packages
logger.info('Generating reports for High, Medium & Low vulnerabilities for Corp mac packages corp environment')
if mac_package_vulnerabilities and corp_mac_folder_id is not None:
generate_linux_package_report('Mac_package_%s' % datetime.date.today().isoformat().replace('-', ''), mac_package_vulnerabilities, corp_mac_folder_id, drive_id)
mac_package_vulnerabilities.clear()
# Mac Config
logger.info('Generating reports for High, Medium & Low vulnerabilities for Corp mac config corp environment')
if mac_config_vulnerabilities and corp_mac_folder_id is not None:
generate_general_report('Mac_config_%s' % datetime.date.today().isoformat().replace('-', ''), mac_config_vulnerabilities, corp_mac_folder_id, drive_id)
mac_config_vulnerabilities.clear()
| 58.232176
| 213
| 0.680815
| 3,703
| 31,853
| 5.508237
| 0.05293
| 0.047066
| 0.036329
| 0.05491
| 0.849439
| 0.806295
| 0.768201
| 0.723293
| 0.677158
| 0.630975
| 0
| 0.000767
| 0.221832
| 31,853
| 546
| 214
| 58.338828
| 0.822125
| 0.02072
| 0
| 0.466346
| 0
| 0.009615
| 0.120194
| 0.036831
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026442
| false
| 0
| 0.016827
| 0
| 0.055288
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
c7f4e4d4d4fb5516d40f6ce6d63f7fb0a55a8a78
| 111
|
py
|
Python
|
Data_Conversion/Kfunc/__init__.py
|
simay1224/K-project-UI
|
c69f83b6446052a1cd32a00700e7db197f36a1ed
|
[
"Apache-2.0"
] | null | null | null |
Data_Conversion/Kfunc/__init__.py
|
simay1224/K-project-UI
|
c69f83b6446052a1cd32a00700e7db197f36a1ed
|
[
"Apache-2.0"
] | 1
|
2018-06-19T22:21:43.000Z
|
2018-06-19T22:21:43.000Z
|
Data_Conversion/Kfunc/__init__.py
|
simay1224/K-project-UI
|
c69f83b6446052a1cd32a00700e7db197f36a1ed
|
[
"Apache-2.0"
] | 3
|
2018-08-29T18:39:57.000Z
|
2020-06-05T15:29:07.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 15 17:09:56 2016
@author: medialab
"""
from model import *
| 11.1
| 35
| 0.594595
| 17
| 111
| 3.882353
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.151163
| 0.225225
| 111
| 9
| 36
| 12.333333
| 0.616279
| 0.693694
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
1bf68078a26440e3f5260b505f96ef272240aaf8
| 95
|
py
|
Python
|
Class 12 Record Book/Length.py
|
Bamgm14/My-Random-Work
|
b9678a3a84dd8ff00efd638890cff76eb6967c1b
|
[
"MIT"
] | null | null | null |
Class 12 Record Book/Length.py
|
Bamgm14/My-Random-Work
|
b9678a3a84dd8ff00efd638890cff76eb6967c1b
|
[
"MIT"
] | null | null | null |
Class 12 Record Book/Length.py
|
Bamgm14/My-Random-Work
|
b9678a3a84dd8ff00efd638890cff76eb6967c1b
|
[
"MIT"
] | null | null | null |
def Length(file='Weird.txt'):
return len(list(open(file,'r').readlines()))
print(Length())
| 23.75
| 48
| 0.663158
| 14
| 95
| 4.5
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.094737
| 95
| 3
| 49
| 31.666667
| 0.732558
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.333333
| 0.666667
| 0.333333
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
4043fd507df5bdbc5820b08b225176806f2983d1
| 1,836
|
py
|
Python
|
stonesoup/types/array.py
|
dlast-dstl/Stone-Soup
|
033254add5adc00097b746f81d6640308a3e3319
|
[
"MIT"
] | 1
|
2021-04-13T11:47:42.000Z
|
2021-04-13T11:47:42.000Z
|
stonesoup/types/array.py
|
dlast-dstl/Stone-Soup
|
033254add5adc00097b746f81d6640308a3e3319
|
[
"MIT"
] | null | null | null |
stonesoup/types/array.py
|
dlast-dstl/Stone-Soup
|
033254add5adc00097b746f81d6640308a3e3319
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import numpy as np
class StateVector(np.ndarray):
"""State vector wrapper for :class:`numpy.ndarray`
This class returns a view to a :class:`numpy.ndarray`, but ensures that
its initialised at a *Nx1* vector. It's called same as to
:func:`numpy.asarray`.
"""
def __new__(cls, *args, **kwargs):
array = np.asarray(*args, **kwargs)
if not (array.ndim == 2 and array.shape[1] == 1):
raise ValueError(
"state vector shape should be Nx1 dimensions: got {}".format(
array.shape))
return array.view(cls)
def __array_wrap__(self, array):
return np.asarray(array)
def __matmul__(self, other):
out = np.matmul(np.asfarray(self), np.asfarray(other))
return out.view(type=type(self))
def __rmatmul__(self, other):
out = np.matmul(np.asfarray(other), np.asfarray(self))
return out.view(type=type(other))
class CovarianceMatrix(np.ndarray):
"""Covariance matrix wrapper for :class:`numpy.ndarray`.
This class returns a view to a :class:`numpy.ndarray`, but ensures that
its initialised at a *NxN* matrix. It's called similar to
:func:`numpy.asarray`.
"""
def __new__(cls, *args, **kwargs):
array = np.asarray(*args, **kwargs)
if not array.ndim == 2:
raise ValueError("Covariance should have ndim of 2: got {}"
"".format(array.ndim))
return array.view(cls)
def __array_wrap__(self, array):
return np.asarray(array)
def __matmul__(self, other):
out = np.matmul(self, np.asfarray(other))
return out.view(type=type(self))
def __rmatmul__(self, other):
out = np.matmul(np.asfarray(other), self)
return out.view(type=type(other))
| 31.655172
| 77
| 0.610022
| 242
| 1,836
| 4.487603
| 0.280992
| 0.055249
| 0.062615
| 0.051565
| 0.712707
| 0.712707
| 0.712707
| 0.64825
| 0.64825
| 0.64825
| 0
| 0.005878
| 0.258715
| 1,836
| 57
| 78
| 32.210526
| 0.792065
| 0.235294
| 0
| 0.5625
| 0
| 0
| 0.066961
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.03125
| 0.0625
| 0.59375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
40517258bb4832a077d5c1ffbc13d2bd7db0e5b6
| 2,508
|
py
|
Python
|
bannerpunk/print.py
|
jarret/bannerpunk
|
f51d29fadf72c04b0b66adf8f5674328bff9f599
|
[
"MIT"
] | 3
|
2019-12-09T02:03:57.000Z
|
2019-12-29T03:42:18.000Z
|
bannerpunk/print.py
|
jarret/bannerpunk
|
f51d29fadf72c04b0b66adf8f5674328bff9f599
|
[
"MIT"
] | null | null | null |
bannerpunk/print.py
|
jarret/bannerpunk
|
f51d29fadf72c04b0b66adf8f5674328bff9f599
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2018 PrimeVR
# All rights Reserved
###############################################################################
# print helpers
###############################################################################
CHILL_WHITE = '\x1b[0;37;40m'
CHILL_PURPLE = '\x1b[0;35;40m'
CHILL_LIGHT_BLUE = '\x1b[0;36;40m'
CHILL_BLUE = '\x1b[0;34;40m'
MEGA_WHITE = '\x1b[1;37;40m'
LIGHT_BLUE = '\x1b[1;36;40m'
BLUE = '\x1b[1;34;40m'
GREEN = '\x1b[1;32;40m'
CHILL_GREEN = '\x1b[0;32;40m'
RED = '\x1b[1;31;40m'
YELLOW = '\x1b[1;33;40m'
CHILL_YELLOW = '\x1b[0;33;40m'
FANCY_BLUE = '\x1b[1;37;44m'
ANNOYING = '\x1b[5;31;44m'
ENDC = '\x1b[0m'
def print_red(string):
print(RED + string + ENDC)
def print_green(string):
print(GREEN + string + ENDC)
def print_chill_green(string):
print(CHILL_GREEN + string + ENDC)
def print_light_blue(string):
print(LIGHT_BLUE + string + ENDC)
def print_fancy_blue(string):
print(FANCY_BLUE + string + ENDC)
def print_blue(string):
print(BLUE + string + ENDC)
def print_yellow(string):
print(YELLOW + string + ENDC)
def print_chill_yellow(string):
print(CHILL_YELLOW + string + ENDC)
def print_chill_white(string):
print(CHILL_WHITE + string + ENDC)
def print_chill_purple(string):
print(CHILL_PURPLE + string + ENDC)
def print_chill_light_blue(string):
print(CHILL_LIGHT_BLUE + string + ENDC)
def print_chill_blue(string):
print(CHILL_BLUE + string + ENDC)
def print_mega_white(string):
print(MEGA_WHITE + string + ENDC)
def print_annoying(string):
print(ANNOYING + string + ENDC)
##################################################################
def red_str(string):
return RED + string + ENDC
def chill_green_str(string):
return CHILL_GREEN + string + ENDC
def light_blue_str(string):
return LIGHT_BLUE + string + ENDC
def fancy_blue_str(string):
return FANCY_BLUE + string + ENDC
def blue_str(string):
return BLUE + string + ENDC
def yellow_str(string):
return YELLOW + string + ENDC
def chill_yellow_str(string):
return CHILL_YELLOW + string + ENDC
def chill_white_str(string):
return CHILL_WHITE + string + ENDC
def chill_purple_str(string):
return CHILL_PURPLE + string + ENDC
def chill_light_blue_str(string):
return CHILL_LIGHT_BLUE + string + ENDC
def chill_blue_str(string):
return CHILL_BLUE + string + ENDC
def mega_white_str(string):
return MEGA_WHITE + string + ENDC
def annoying_str(string):
return ANNOYING + string + ENDC
| 23.660377
| 79
| 0.642743
| 348
| 2,508
| 4.41092
| 0.112069
| 0.175896
| 0.220195
| 0.152443
| 0.441694
| 0.093811
| 0
| 0
| 0
| 0
| 0
| 0.042715
| 0.159888
| 2,508
| 105
| 80
| 23.885714
| 0.685809
| 0.023923
| 0
| 0
| 0
| 0
| 0.085135
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.391304
| false
| 0
| 0
| 0.188406
| 0.57971
| 0.405797
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
|
0
| 5
|
40517478ae6e37ed546d663cd3159a7be1cafc5d
| 107
|
py
|
Python
|
slybot/slybot/starturls/__init__.py
|
bowlofstew/portia
|
41aaf2ef1a3ac75aeda363b6a5b67bf21b1afd4c
|
[
"BSD-3-Clause"
] | 1
|
2017-11-03T13:00:21.000Z
|
2017-11-03T13:00:21.000Z
|
slybot/slybot/starturls/__init__.py
|
Save22/portia
|
961d2c87b99d99fbdc17aa932ec897bdbcd54d79
|
[
"BSD-3-Clause"
] | 2
|
2021-03-31T20:04:55.000Z
|
2021-12-13T20:47:09.000Z
|
slybot/slybot/starturls/__init__.py
|
bowlofstew/portia
|
41aaf2ef1a3ac75aeda363b6a5b67bf21b1afd4c
|
[
"BSD-3-Clause"
] | 2
|
2017-11-03T13:00:23.000Z
|
2020-08-28T19:59:40.000Z
|
from .generator import UrlGenerator
class StartUrls():
def __call__(self, spec):
return spec
| 15.285714
| 35
| 0.691589
| 12
| 107
| 5.833333
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.233645
| 107
| 6
| 36
| 17.833333
| 0.853659
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
409f30d07086ca905cdec06fd445e125530e6735
| 1,564
|
py
|
Python
|
guillotina/utils/__init__.py
|
karannaoh/guillotina
|
dbc04f142734c465a04cb94ef6801ba819e63c76
|
[
"BSD-2-Clause"
] | null | null | null |
guillotina/utils/__init__.py
|
karannaoh/guillotina
|
dbc04f142734c465a04cb94ef6801ba819e63c76
|
[
"BSD-2-Clause"
] | null | null | null |
guillotina/utils/__init__.py
|
karannaoh/guillotina
|
dbc04f142734c465a04cb94ef6801ba819e63c76
|
[
"BSD-2-Clause"
] | null | null | null |
from .auth import get_authenticated_user # noqa
from .auth import get_authenticated_user_id # noqa
from .content import get_behavior # noqa
from .content import get_containers # noqa
from .content import get_content_depth # noqa
from .content import get_content_path # noqa
from .content import get_object_by_oid # noqa
from .content import get_object_url # noqa
from .content import get_owners # noqa
from .content import iter_databases # noqa
from .content import iter_parents # noqa
from .content import navigate_to # noqa
from .content import valid_id # noqa
from .crypto import get_jwk_key # noqa
from .crypto import secure_passphrase # noqa
from .misc import apply_coroutine # noqa
from .misc import get_current_request # noqa
from .misc import get_random_string # noqa
from .misc import get_url # noqa
from .misc import lazy_apply # noqa
from .misc import list_or_dict_items # noqa
from .misc import loop_apply_coroutine # noqa
from .misc import merge_dicts # noqa
from .misc import run_async # noqa
from .misc import safe_unidecode # noqa
from .misc import strings_differ # noqa
from .misc import to_str # noqa
from .modules import get_caller_module # noqa
from .modules import get_class_dotted_name # noqa
from .modules import get_dotted_name # noqa
from .modules import get_module_dotted_name # noqa
from .modules import import_class # noqa
from .modules import resolve_dotted_name # noqa
from .modules import resolve_module_path # noqa
from .modules import resolve_path # noqa
from .navigator import Navigator # noqa
| 42.27027
| 51
| 0.792839
| 237
| 1,564
| 4.991561
| 0.248945
| 0.236686
| 0.121724
| 0.182587
| 0.559594
| 0.324598
| 0.057481
| 0
| 0
| 0
| 0
| 0
| 0.161125
| 1,564
| 36
| 52
| 43.444444
| 0.901677
| 0.11445
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.027778
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
40ba7aab465979e7dcec90e2c60e8d488a3753b6
| 24,098
|
py
|
Python
|
tests/test_report_writer.py
|
lemoncheesecake/lemoncheesecake
|
bc92cb8225d74e2687ed5825ee5af3f56f907829
|
[
"Apache-2.0",
"MIT"
] | 34
|
2017-06-12T18:50:36.000Z
|
2021-11-29T01:59:07.000Z
|
tests/test_report_writer.py
|
lemoncheesecake/lemoncheesecake
|
bc92cb8225d74e2687ed5825ee5af3f56f907829
|
[
"Apache-2.0",
"MIT"
] | 25
|
2017-12-07T13:35:29.000Z
|
2022-03-10T01:27:58.000Z
|
tests/test_report_writer.py
|
lemoncheesecake/lemoncheesecake
|
bc92cb8225d74e2687ed5825ee5af3f56f907829
|
[
"Apache-2.0",
"MIT"
] | 4
|
2019-05-05T03:19:00.000Z
|
2021-10-06T13:12:05.000Z
|
# -*- coding: utf-8 -*-
'''
Created on Nov 1, 2016
@author: nicolas
'''
import os.path as osp
import time
import pytest
import six
import lemoncheesecake.api as lcc
from lemoncheesecake.matching import *
from helpers.runner import run_suite_class, run_suite_classes, run_func_in_test
from helpers.report import assert_report_from_suite, assert_report_from_suites, get_last_test, get_last_attachment, \
assert_attachment
SAMPLE_IMAGE_PATH = osp.join(osp.dirname(__file__), osp.pardir, "doc", "_static", "report-sample.png")
with open(SAMPLE_IMAGE_PATH, "rb") as fh:
SAMPLE_IMAGE_CONTENT = fh.read()
def _get_suite(report, suite_path=None):
return report.get_suite(suite_path) if suite_path else report.get_suites()[0]
def _get_suite_setup(report, suite_path=None):
suite = _get_suite(report, suite_path)
return suite.suite_setup
def _get_suite_teardown(report, suite_path=None):
suite = _get_suite(report, suite_path)
return suite.suite_teardown
def make_file_reader(encoding=None, binary=False):
def reader(path):
with open(path, "rb" if binary else "r") as fh:
content = fh.read()
if encoding and six.PY2:
content = content.decode(encoding)
return content
return reader
def test_simple_test():
@lcc.suite("MySuite")
class mysuite:
@lcc.test("Some test")
def sometest(self):
pass
report = run_suite_class(mysuite)
assert_report_from_suite(report, mysuite)
def test_test_with_all_metadata():
@lcc.suite("MySuite")
class mysuite:
@lcc.link("http://foo.bar", "foobar")
@lcc.prop("foo", "bar")
@lcc.tags("foo", "bar")
@lcc.test("Some test")
def sometest(self):
pass
report = run_suite_class(mysuite)
assert_report_from_suite(report, mysuite)
def test_suite_with_all_metadata():
@lcc.link("http://foo.bar", "foobar")
@lcc.prop("foo", "bar")
@lcc.tags("foo", "bar")
@lcc.suite("MySuite")
class mysuite:
@lcc.test("Some test")
def sometest(self):
pass
report = run_suite_class(mysuite)
assert_report_from_suite(report, mysuite)
def test_multiple_suites_and_tests():
@lcc.suite("MySuite1")
class mysuite1:
@lcc.tags("foo")
@lcc.test("Some test 1")
def test_1_1(self):
pass
@lcc.tags("bar")
@lcc.test("Some test 2")
def test_1_2(self):
pass
@lcc.tags("baz")
@lcc.test("Some test 3")
def test_1_3(self):
pass
@lcc.suite("MySuite2")
class mysuite2:
@lcc.prop("foo", "bar")
@lcc.test("Some test 1")
def test_2_1(self):
pass
@lcc.prop("foo", "baz")
@lcc.test("Some test 2")
def test_2_2(self):
pass
@lcc.test("Some test 3")
def test_2_3(self):
pass
# suite3 is a sub suite of suite2
@lcc.suite("MySuite3")
class mysuite3:
@lcc.prop("foo", "bar")
@lcc.test("Some test 1")
def test_3_1(self):
pass
@lcc.prop("foo", "baz")
@lcc.test("Some test 2")
def test_3_2(self):
pass
@lcc.test("Some test 3")
def test_3_3(self):
pass
report = run_suite_classes([mysuite1, mysuite2])
assert_report_from_suites(report, [mysuite1, mysuite2])
def test_check_success():
@lcc.suite("MySuite")
class mysuite:
@lcc.test("Test 1")
def test_1(self):
check_that("somevalue", "foo", equal_to("foo"))
report = run_suite_class(mysuite)
test = get_last_test(report)
assert test.status == "passed"
step = test.get_steps()[0]
assert "somevalue" in step.get_logs()[0].description
assert "foo" in step.get_logs()[0].description
assert step.get_logs()[0].is_successful is True
assert "foo" in step.get_logs()[0].details
def test_check_failure():
@lcc.suite("MySuite")
class mysuite:
@lcc.test("Test 1")
def test_1(self):
check_that("somevalue", "foo", equal_to("bar"))
report = run_suite_class(mysuite)
test = get_last_test(report)
assert test.status == "failed"
step = test.get_steps()[0]
assert "somevalue" in step.get_logs()[0].description
assert "bar" in step.get_logs()[0].description
assert step.get_logs()[0].is_successful is False
assert "foo" in step.get_logs()[0].details
def test_require_success():
@lcc.suite("MySuite")
class mysuite:
@lcc.test("Test 1")
def test_1(self):
require_that("somevalue", "foo", equal_to("foo"))
report = run_suite_class(mysuite)
test = get_last_test(report)
assert test.status == "passed"
step = test.get_steps()[0]
assert "somevalue" in step.get_logs()[0].description
assert "foo" in step.get_logs()[0].description
assert step.get_logs()[0].is_successful is True
assert "foo" in step.get_logs()[0].details
def test_require_failure():
@lcc.suite("MySuite")
class mysuite:
@lcc.test("Test 1")
def test_1(self):
require_that("somevalue", "foo", equal_to("bar"))
report = run_suite_class(mysuite)
test = get_last_test(report)
assert test.status == "failed"
step = test.get_steps()[0]
assert "somevalue" in step.get_logs()[0].description
assert "bar" in step.get_logs()[0].description
assert step.get_logs()[0].is_successful is False
assert "foo" in step.get_logs()[0].details
def test_all_types_of_logs():
@lcc.suite("MySuite")
class mysuite:
@lcc.test("Test 1")
def test_1(self):
lcc.log_debug("some debug message")
lcc.log_info("some info message")
lcc.log_warning("some warning message")
@lcc.test("Test 2")
def test_2(self):
lcc.log_error("some error message")
report = run_suite_class(mysuite)
test = report.get_test("mysuite.test_1")
assert test.status == "passed"
step = test.get_steps()[0]
assert step.get_logs()[0].level == "debug"
assert step.get_logs()[0].message == "some debug message"
assert step.get_logs()[1].level == "info"
assert step.get_logs()[1].message == "some info message"
assert step.get_logs()[2].level == "warn"
test = report.get_test("mysuite.test_2")
assert test.status == "failed"
step = test.get_steps()[0]
assert step.get_logs()[0].message == "some error message"
assert step.get_logs()[0].level == "error"
def test_multiple_steps():
@lcc.suite("MySuite")
class mysuite:
@lcc.test("Some test")
def sometest(self):
lcc.set_step("step 1")
lcc.log_info("do something")
lcc.set_step("step 2")
lcc.log_info("do something else")
report = run_suite_class(mysuite)
test = get_last_test(report)
assert test.status == "passed"
steps = test.get_steps()
assert steps[0].description == "step 1"
assert steps[0].get_logs()[0].level == "info"
assert steps[0].get_logs()[0].message == "do something"
assert steps[1].description == "step 2"
assert steps[1].get_logs()[0].level == "info"
assert steps[1].get_logs()[0].message == "do something else"
def test_multiple_steps_on_different_threads():
def thread_func(i):
lcc.set_step(str(i))
time.sleep(0.001)
lcc.log_info(str(i))
@lcc.suite("MySuite")
class mysuite:
@lcc.test("Some test")
def sometest(self):
threads = [lcc.Thread(target=thread_func, args=(i,)) for i in range(3)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
report = run_suite_class(mysuite)
test = get_last_test(report)
remainings = list(range(3))
steps = test.get_steps()
for step in steps:
remainings.remove(int(step.description))
assert len(step.get_logs()) == 1
assert step.get_logs()[0].message == step.description
assert len(remainings) == 0
def test_thread_logging_without_explicit_step():
@lcc.suite("MySuite")
class mysuite:
@lcc.test("Some test")
def sometest(self):
thread = lcc.Thread(target=lambda: lcc.log_info("doing something"))
thread.start()
thread.join()
report = run_suite_class(mysuite)
test = get_last_test(report)
assert test.status == "passed"
assert len(test.get_steps()) == 1
step = test.get_steps()[0]
assert step.description == "Some test"
assert step.get_logs()[0].level == "info"
assert "doing something" == step.get_logs()[0].message
def test_thread_logging_without_detached_bis():
def func():
lcc.log_info("log in thread")
@lcc.suite("MySuite")
class mysuite:
@lcc.test("Some test")
def sometest(self):
lcc.set_step("Step 1")
lcc.log_info("log 1")
thread = lcc.Thread(target=func)
lcc.set_step("Step 2")
lcc.log_info("log 2")
thread.start()
thread.join()
report = run_suite_class(mysuite)
test = get_last_test(report)
assert test.status == "passed"
steps = test.get_steps()
assert len(steps) == 3
step = test.get_steps()[0]
assert step.description == "Step 1"
assert step.get_logs()[0].message == "log 1"
step = test.get_steps()[1]
assert step.description == "Step 2"
assert step.get_logs()[0].message == "log 2"
step = test.get_steps()[2]
assert step.description == "Step 1"
assert step.get_logs()[0].message == "log in thread"
def test_exception_in_thread():
def thread_func():
lcc.log_info("doing something")
raise Exception("this_is_an_exception")
@lcc.suite("MySuite")
class mysuite:
@lcc.test("Some test")
def sometest(self):
thread = lcc.Thread(target=thread_func)
thread.start()
thread.join()
report = run_suite_class(mysuite)
test = get_last_test(report)
assert test.status == "failed"
steps = test.get_steps()
assert len(steps) == 1
step = steps[0]
assert step.description == "Some test"
assert step.get_logs()[-1].level == "error"
assert "this_is_an_exception" in step.get_logs()[-1].message
def test_same_step_in_two_threads():
def thread_func():
lcc.set_step("step 2")
lcc.log_info("log 2")
time.sleep(0.001)
lcc.set_step("step 1")
lcc.log_info("log 3")
@lcc.suite("MySuite")
class mysuite:
@lcc.test("Some test")
def sometest(self):
lcc.set_step("step 1")
lcc.log_info("log 1")
thread = lcc.Thread(target=thread_func)
thread.start()
lcc.log_info("log 4")
thread.join()
report = run_suite_class(mysuite)
test = get_last_test(report)
steps = test.get_steps()
assert len(steps) == 3
step = steps[0]
assert step.description == "step 1"
assert len(step.get_logs()) == 2
assert step.get_logs()[0].message == "log 1"
assert step.get_logs()[1].message == "log 4"
step = steps[1]
assert step.description == "step 2"
assert len(step.get_logs()) == 1
assert step.get_logs()[0].message == "log 2"
step = steps[2]
assert step.description == "step 1"
assert len(step.get_logs()) == 1
assert step.get_logs()[0].message == "log 3"
def test_deprecated_end_step():
@lcc.suite("MySuite")
class mysuite:
@lcc.test("Some test")
def sometest(self):
lcc.set_step("step")
lcc.log_info("log")
lcc.end_step("step")
with pytest.warns(DeprecationWarning, match="deprecated"):
report = run_suite_class(mysuite)
test = get_last_test(report)
assert test.status == "passed"
step = test.get_steps()[0]
assert step.description == "step"
assert step.get_logs()[0].level == "info"
assert step.get_logs()[0].message == "log"
def test_deprecated_detached_step():
@lcc.suite("MySuite")
class mysuite:
@lcc.test("Some test")
def sometest(self):
with lcc.detached_step("step"):
lcc.log_info("log")
with pytest.warns(DeprecationWarning, match="deprecated"):
report = run_suite_class(mysuite)
test = get_last_test(report)
step = test.get_steps()[0]
assert test.status == "passed"
assert step.description == "step"
assert step.get_logs()[0].level == "info"
assert step.get_logs()[0].message == "log"
def test_default_step():
@lcc.suite("MySuite")
class mysuite:
@lcc.test("Some test")
def sometest(self):
lcc.log_info("do something")
report = run_suite_class(mysuite)
test = get_last_test(report)
assert test.status == "passed"
step = test.get_steps()[0]
assert step.description == "Some test"
assert step.get_logs()[0].level == "info"
assert step.get_logs()[0].message == "do something"
def test_step_after_test_setup():
@lcc.suite("mysuite")
class mysuite:
def setup_test(self, test):
lcc.log_info("in test setup")
@lcc.test("Some test")
def sometest(self):
lcc.log_info("do something")
report = run_suite_class(mysuite)
test = get_last_test(report)
assert test.status == "passed"
steps = test.get_steps()
assert steps[0].description == "Setup test"
assert steps[0].get_logs()[0].level == "info"
assert steps[0].get_logs()[0].message == "in test setup"
assert steps[1].description == "Some test"
assert steps[1].get_logs()[0].level == "info"
assert steps[1].get_logs()[0].message == "do something"
def test_prepare_attachment(tmpdir):
def do():
with lcc.prepare_attachment("foobar.txt", "some description") as filename:
with open(filename, "w") as fh:
fh.write("some content")
report = run_func_in_test(do, tmpdir=tmpdir)
assert_attachment(
get_last_attachment(report), "foobar.txt", "some description", False, "some content", make_file_reader()
)
def test_prepare_image_attachment(tmpdir):
def do():
with lcc.prepare_image_attachment("foobar.png", "some description") as filename:
with open(filename, "wb") as fh:
fh.write(SAMPLE_IMAGE_CONTENT)
report = run_func_in_test(do, tmpdir=tmpdir)
assert_attachment(
get_last_attachment(report), "foobar.png", "some description", True, SAMPLE_IMAGE_CONTENT,
make_file_reader(binary=True)
)
def test_save_attachment_file(tmpdir):
def do():
filename = osp.join(tmpdir.strpath, "somefile.txt")
with open(filename, "w") as fh:
fh.write("some other content")
lcc.save_attachment_file(filename, "some other file")
report = run_func_in_test(do, tmpdir=tmpdir.mkdir("report"))
assert_attachment(
get_last_attachment(report), "somefile.txt", "some other file", False, "some other content", make_file_reader()
)
def test_save_image_file(tmpdir):
def do():
lcc.save_image_file(SAMPLE_IMAGE_PATH, "some other file")
report = run_func_in_test(do, tmpdir=tmpdir.mkdir("report"))
assert_attachment(
get_last_attachment(report), osp.basename(SAMPLE_IMAGE_PATH), "some other file", True, SAMPLE_IMAGE_CONTENT,
make_file_reader(binary=True)
)
def _test_save_attachment_content(tmpdir, file_name, file_content, file_reader):
def do():
lcc.save_attachment_content(file_content, file_name)
report = run_func_in_test(do, tmpdir=tmpdir)
assert_attachment(get_last_attachment(report), file_name, file_name, False, file_content, file_reader)
def test_save_attachment_text_ascii(tmpdir):
_test_save_attachment_content(tmpdir, "foobar.txt", "foobar", make_file_reader())
def test_save_attachment_text_utf8(tmpdir):
_test_save_attachment_content(tmpdir, "foobar.txt", u"éééçççààà", make_file_reader(encoding="utf-8"))
def test_save_attachment_binary(tmpdir):
_test_save_attachment_content(tmpdir, "foobar.png", SAMPLE_IMAGE_CONTENT, make_file_reader(binary=True))
def test_save_image_content(tmpdir):
def do():
lcc.save_image_content(SAMPLE_IMAGE_CONTENT, "somefile.png", "some file")
report = run_func_in_test(do, tmpdir=tmpdir)
assert_attachment(
get_last_attachment(report), "somefile.png", "some file", True, SAMPLE_IMAGE_CONTENT,
make_file_reader(binary=True)
)
def test_log_url():
@lcc.suite("MySuite")
class mysuite:
@lcc.test("Some test")
def sometest(self):
lcc.log_url("http://example.com", "example")
report = run_suite_class(mysuite)
test = get_last_test(report)
step = test.get_steps()[0]
assert step.get_logs()[0].description == "example"
assert step.get_logs()[0].url == "http://example.com"
def test_unicode(tmpdir):
@lcc.suite("MySuite")
class mysuite:
@lcc.test("some test")
def sometest(self):
lcc.set_step(u"éééààà")
check_that(u"éééààà", 1, equal_to(1))
lcc.log_info(u"éééààà")
lcc.save_attachment_content("A" * 1024, u"somefileààà", u"éééààà")
report = run_suite_class(mysuite, tmpdir=tmpdir)
test = get_last_test(report)
assert test.status == "passed"
step = test.get_steps()[0]
assert step.description == u"éééààà"
assert u"éééààà" in step.get_logs()[0].description
assert "1" in step.get_logs()[0].description
assert step.get_logs()[1].message == u"éééààà"
assert_attachment(step.get_logs()[2], u"somefileààà", u"éééààà", False, "A" * 1024, make_file_reader(encoding="utf8"))
def test_setup_suite_success():
@lcc.suite("MySuite")
class mysuite:
def setup_suite(self):
lcc.log_info("some log")
@lcc.test("Some test")
def sometest(self):
pass
report = run_suite_class(mysuite)
setup = _get_suite_setup(report)
assert setup.status == "passed"
assert setup.start_time is not None
assert setup.end_time is not None
assert setup.get_steps()[0].get_logs()[0].message == "some log"
assert setup.is_successful()
def test_setup_suite_failure():
@lcc.suite("MySuite")
class mysuite:
def setup_suite(self):
lcc.log_error("something bad happened")
@lcc.test("Some test")
def sometest(self):
pass
report = run_suite_class(mysuite)
setup = _get_suite_setup(report)
assert setup.status == "failed"
assert setup.start_time is not None
assert setup.end_time is not None
assert setup.get_steps()[0].get_logs()[0].message == "something bad happened"
assert not setup.is_successful()
def test_setup_suite_without_content():
@lcc.suite("MySuite")
class mysuite:
def setup_suite(self):
pass
@lcc.test("Some test")
def sometest(self):
pass
report = run_suite_class(mysuite)
assert _get_suite_setup(report) is None
def test_teardown_suite_success():
@lcc.suite("MySuite")
class mysuite:
@lcc.test("Some test")
def sometest(self):
pass
def teardown_suite(self):
lcc.log_info("some log")
report = run_suite_class(mysuite)
teardown = _get_suite_teardown(report)
assert teardown.status == "passed"
assert teardown.start_time is not None
assert teardown.end_time is not None
assert teardown.get_steps()[0].get_logs()[0].message == "some log"
assert teardown.is_successful()
def test_teardown_suite_failure():
@lcc.suite("MySuite")
class mysuite:
@lcc.test("Some test")
def sometest(self):
pass
def teardown_suite(self):
check_that("val", 1, equal_to(2))
report = run_suite_class(mysuite)
teardown = _get_suite_teardown(report)
assert teardown.status == "failed"
assert teardown.start_time is not None
assert teardown.end_time is not None
assert teardown.get_steps()[0].get_logs()[0].is_successful is False
assert not teardown.is_successful()
def test_teardown_suite_without_content():
@lcc.suite("MySuite")
class mysuite:
@lcc.test("Some test")
def sometest(self):
pass
def teardown_suite(self):
pass
report = run_suite_class(mysuite)
assert _get_suite_teardown(report) is None
def test_setup_test_session_success():
@lcc.suite("MySuite")
class mysuite:
@lcc.test("Some test")
def sometest(self, fixt):
pass
@lcc.fixture(scope="session")
def fixt():
lcc.log_info("some log")
report = run_suite_class(mysuite, fixtures=[fixt])
setup = report.test_session_setup
assert setup.status == "passed"
assert setup.start_time is not None
assert setup.end_time is not None
assert setup.get_steps()[0].get_logs()[0].message == "some log"
assert setup.is_successful()
def test_setup_test_session_failure():
@lcc.suite("MySuite")
class mysuite:
@lcc.test("Some test")
def sometest(self, fixt):
pass
@lcc.fixture(scope="session")
def fixt():
lcc.log_error("something bad happened")
report = run_suite_class(mysuite, fixtures=[fixt])
setup = report.test_session_setup
assert setup.status == "failed"
assert setup.start_time is not None
assert setup.end_time is not None
assert setup.get_steps()[0].get_logs()[0].message == "something bad happened"
assert not setup.is_successful()
def test_setup_test_session_without_content():
@lcc.suite("MySuite")
class mysuite:
@lcc.test("Some test")
def sometest(self, fixt):
pass
@lcc.fixture(scope="session")
def fixt():
pass
report = run_suite_class(mysuite, fixtures=[fixt])
assert report.test_session_setup is None
def test_teardown_test_session_success():
@lcc.suite("MySuite")
class mysuite:
@lcc.test("Some test")
def sometest(self, fixt):
pass
@lcc.fixture(scope="session")
def fixt():
yield
lcc.log_info("some log")
report = run_suite_class(mysuite, fixtures=[fixt])
teardown = report.test_session_teardown
assert teardown.status == "passed"
assert teardown.start_time is not None
assert teardown.end_time is not None
assert teardown.get_steps()[0].get_logs()[0].message == "some log"
assert teardown.is_successful()
def test_teardown_test_session_failure():
@lcc.suite("MySuite")
class mysuite:
@lcc.test("Some test")
def sometest(self, fixt):
pass
@lcc.fixture(scope="session")
def fixt():
yield
check_that("val", 1, equal_to(2))
report = run_suite_class(mysuite, fixtures=[fixt])
teardown = report.test_session_teardown
assert teardown.status == "failed"
assert teardown.start_time is not None
assert teardown.end_time is not None
assert teardown.get_steps()[0].get_logs()[0].is_successful is False
assert not teardown.is_successful()
def test_teardown_test_session_without_content():
@lcc.suite("MySuite")
class mysuite:
@lcc.test("Some test")
def sometest(self, fixt):
pass
@lcc.fixture(scope="session")
def fixt():
yield
report = run_suite_class(mysuite, fixtures=[fixt])
assert report.test_session_teardown is None
def test_add_report_info():
@lcc.suite("Some suite")
class mysuite:
@lcc.test("Some test")
def sometest(self):
lcc.add_report_info("some info", "some data")
report = run_suite_class(mysuite)
assert report.info[-1] == ["some info", "some data"]
| 27.603666
| 122
| 0.632086
| 3,221
| 24,098
| 4.519093
| 0.063645
| 0.03222
| 0.030228
| 0.032152
| 0.8018
| 0.762572
| 0.729321
| 0.691261
| 0.669964
| 0.653476
| 0
| 0.012425
| 0.238526
| 24,098
| 872
| 123
| 27.635321
| 0.780817
| 0.003942
| 0
| 0.683721
| 0
| 0
| 0.102151
| 0
| 0
| 0
| 0
| 0
| 0.235659
| 1
| 0.176744
| false
| 0.065116
| 0.012403
| 0.00155
| 0.252713
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
40f56b6672d61b45315a677a55d834c92ed3b91e
| 110
|
py
|
Python
|
quand.py
|
gaoyan0629/ml
|
c1ad3a3a6bdbe75e5911e8946bdf73d6cf14c3c8
|
[
"Apache-2.0"
] | null | null | null |
quand.py
|
gaoyan0629/ml
|
c1ad3a3a6bdbe75e5911e8946bdf73d6cf14c3c8
|
[
"Apache-2.0"
] | null | null | null |
quand.py
|
gaoyan0629/ml
|
c1ad3a3a6bdbe75e5911e8946bdf73d6cf14c3c8
|
[
"Apache-2.0"
] | null | null | null |
import quandl
mydata = quandl.get("YAHOO/INDEX_DJI",
start_date="2005-12-01", end_date="2005-12-05")
| 27.5
| 55
| 0.681818
| 18
| 110
| 4
| 0.777778
| 0.222222
| 0.277778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.170213
| 0.145455
| 110
| 3
| 56
| 36.666667
| 0.595745
| 0
| 0
| 0
| 0
| 0
| 0.318182
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
dc0e01e5c601a01f2950a7352ea2398fa5d1ff2e
| 285
|
py
|
Python
|
qtrader/agents/random.py
|
aaron8tang/qtrader
|
9bd50fd173c7b55707e91d75985055bbe8664548
|
[
"Apache-2.0"
] | 381
|
2017-10-25T19:17:04.000Z
|
2021-03-02T08:46:53.000Z
|
qtrader/agents/random.py
|
362115815/qtrader
|
e5c1e175e19b20381f9140fb76c30ad5cb81f01c
|
[
"Apache-2.0"
] | 3
|
2018-02-13T23:19:40.000Z
|
2018-12-03T22:50:58.000Z
|
qtrader/agents/random.py
|
362115815/qtrader
|
e5c1e175e19b20381f9140fb76c30ad5cb81f01c
|
[
"Apache-2.0"
] | 145
|
2017-10-25T19:17:06.000Z
|
2021-02-15T04:54:08.000Z
|
import numpy as np
from qtrader.agents.base import Agent
class RandomAgent(Agent):
"""Random agent."""
_id = 'random'
def __init__(self, action_space):
self.action_space = action_space
def act(self, observation):
return self.action_space.sample()
| 17.8125
| 41
| 0.673684
| 36
| 285
| 5.083333
| 0.611111
| 0.240437
| 0.245902
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.224561
| 285
| 15
| 42
| 19
| 0.828054
| 0.045614
| 0
| 0
| 0
| 0
| 0.022556
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0.125
| 0.875
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
9074a91f6d1cd93585f26530bb8637feb3d5a91f
| 109
|
py
|
Python
|
avatar/signals.py
|
davmlaw/django-avatar
|
a634433a273f2d24c0a476c73943dfaa59eb0fd6
|
[
"BSD-3-Clause"
] | null | null | null |
avatar/signals.py
|
davmlaw/django-avatar
|
a634433a273f2d24c0a476c73943dfaa59eb0fd6
|
[
"BSD-3-Clause"
] | null | null | null |
avatar/signals.py
|
davmlaw/django-avatar
|
a634433a273f2d24c0a476c73943dfaa59eb0fd6
|
[
"BSD-3-Clause"
] | null | null | null |
import django.dispatch
avatar_updated = django.dispatch.Signal()
avatar_deleted = django.dispatch.Signal()
| 18.166667
| 41
| 0.807339
| 13
| 109
| 6.615385
| 0.538462
| 0.488372
| 0.465116
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.091743
| 109
| 5
| 42
| 21.8
| 0.868687
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
90a87194ba14bd6a3d01f3dabd02bf63a4148a42
| 5,867
|
py
|
Python
|
models/.ipynb_checkpoints/model_architecture-checkpoint.py
|
punyajoy/Fear-speech-analysis
|
a20f0032eee0114aac04710a5961f395bf0e4d59
|
[
"MIT"
] | 10
|
2021-02-07T19:41:01.000Z
|
2021-02-28T08:07:07.000Z
|
models/.ipynb_checkpoints/model_architecture-checkpoint.py
|
punyajoy/Fear-speech-analysis
|
a20f0032eee0114aac04710a5961f395bf0e4d59
|
[
"MIT"
] | 1
|
2021-02-07T18:28:25.000Z
|
2021-02-07T18:28:25.000Z
|
models/.ipynb_checkpoints/model_architecture-checkpoint.py
|
punyajoy/Fear-speech-analysis
|
a20f0032eee0114aac04710a5961f395bf0e4d59
|
[
"MIT"
] | 2
|
2021-02-07T19:40:49.000Z
|
2021-02-20T17:08:01.000Z
|
#from transformers import BertForSequenceClassification,BertPreTrainedModel,RobertaPreTrainedModel,XLMRobertaForSequenceClassification
from transformers.modeling_bert import *
from transformers.modeling_roberta import *
from transformers.modeling_xlm_roberta import *
from torch import nn
import torch
from torch.nn import LSTM
def select_transformer_model(type_of_model,path,params):
if(type_of_model=='lstm_transformer'):
if (path=='bert-base-multilingual-cased'):
model = DocumentBERTLSTM.from_pretrained(
path, # Use the 12-layer BERT model, with an uncased vocab.
num_labels = 2,
params=params
)
elif (path=='xlm-roberta-base'):
model = DocumentRobertaLSTM.from_pretrained(
path, # Use the 12-layer BERT model, with an uncased vocab.
num_labels = 2, # The number of output labels--2 for binary classification # You can increase this for multi-class tasks.
params=params
)
return model
class DocumentBERTLSTM(BertPreTrainedModel):
"""
BERT output over document in LSTM
"""
def __init__(self,config,params):
super(DocumentBERTLSTM, self).__init__(config)
self.bert = BertModel(config)
print(params)
self.num_labels = config.num_labels
self.batch_size= params['batch_size']
self.weights=params['weights']
self.bert_batch_size=params['max_sentences_per_doc']
self.dropout = nn.Dropout(p=config.hidden_dropout_prob)
self.lstm = LSTM(config.hidden_size,config.hidden_size)
self.classifier = nn.Sequential(
nn.Dropout(p=config.hidden_dropout_prob),
nn.Linear(config.hidden_size, config.num_labels),
nn.Tanh()
)
#input_ids, token_type_ids, attention_masks
def forward(self, document_batch, labels= None,device='cuda'):
#contains all BERT sequences
#bert should output a (batch_size, num_sequences, bert_hidden_size)
bert_output = torch.zeros(size=(document_batch.shape[0],
min(document_batch.shape[1],self.bert_batch_size),
self.bert.config.hidden_size), dtype=torch.float, device=device)
#only pass through bert_batch_size numbers of inputs into bert.
#this means that we are possibly cutting off the last part of documents.
for doc_id in range(document_batch.shape[0]):
bert_output[doc_id][:self.bert_batch_size] = self.dropout(self.bert(document_batch[doc_id][:self.bert_batch_size,0],
token_type_ids=document_batch[doc_id][:self.bert_batch_size,1],
attention_mask=document_batch[doc_id][:self.bert_batch_size,2])[1])
output, (_, _) = self.lstm(bert_output.permute(1,0,2))
last_layer = output[-1]
prediction = self.classifier(last_layer)
assert prediction.shape[0] == document_batch.shape[0]
if labels is not None:
loss_funct = CrossEntropyLoss(weight=torch.tensor(self.weights).to(device))
loss_logits = loss_funct(prediction.view(-1, self.num_labels), labels.view(-1))
loss= loss_logits
output = [loss, output]
return output
class DocumentRobertaLSTM(RobertaPreTrainedModel):
"""
Roberta output over document in LSTM
"""
def __init__(self,config,params):
super().__init__(config)
self.num_labels = config.num_labels
self.roberta= RobertaModel(config)
self.batch_size= params['batch_size']
self.weights=params['weights']
self.bert_batch_size=params['max_sentences_per_doc']
self.dropout = nn.Dropout(p=config.hidden_dropout_prob)
self.lstm = LSTM(config.hidden_size,config.hidden_size)
self.classifier = nn.Sequential(
nn.Dropout(p=config.hidden_dropout_prob),
nn.Linear(config.hidden_size, config.num_labels),
nn.Tanh()
)
self.init_weights()
#input_ids, token_type_ids, attention_masks
def forward(self, document_batch,labels= None,device='cuda'):
#contains all BERT sequences
#bert should output a (batch_size, num_sequences, bert_hidden_size)
bert_output = torch.ones(size=(document_batch.shape[0],
min(document_batch.shape[1],self.bert_batch_size),
self.roberta.config.hidden_size), dtype=torch.float, device=device)
#only pass through bert_batch_size numbers of inputs into bert.
#this means that we are possibly cutting off the last part of documents.
print(document_batch.shape)
for doc_id in range(document_batch.shape[0]):
temp=self.roberta(document_batch[doc_id][:self.bert_batch_size,0],
token_type_ids=document_batch[doc_id][:self.bert_batch_size,1],
attention_mask=document_batch[doc_id][:self.bert_batch_size,2])
print(temp.shape)
bert_output[doc_id][:self.bert_batch_size] = self.dropout(temp[1])
output, (_, _) = self.lstm(bert_output.permute(1,0,2))
last_layer = output[-1]
prediction = self.classifier(last_layer)
assert prediction.shape[0] == document_batch.shape[0]
if labels is not None:
loss_funct = CrossEntropyLoss(weight=torch.tensor(self.weights).to(device))
loss_logits = loss_funct(prediction.view(-1, self.num_labels), labels.view(-1))
loss= loss_logits
outputs = (loss,) + outputs
return outputs
| 42.208633
| 148
| 0.633373
| 703
| 5,867
| 5.051209
| 0.192034
| 0.05069
| 0.051253
| 0.057449
| 0.73782
| 0.73782
| 0.73782
| 0.720924
| 0.720924
| 0.701774
| 0
| 0.008661
| 0.27186
| 5,867
| 139
| 149
| 42.208633
| 0.822566
| 0.163968
| 0
| 0.565217
| 0
| 0
| 0.029624
| 0.0144
| 0
| 0
| 0
| 0
| 0.021739
| 1
| 0.054348
| false
| 0
| 0.065217
| 0
| 0.173913
| 0.032609
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
90daff38b261653bbebc6b1dc07df60be513b782
| 13
|
py
|
Python
|
Module00/myfirst-python.py
|
geiyer/cis189-python
|
2b85ff66277b337aab9f6a7a6fa1c86dccf0178c
|
[
"MIT"
] | 2
|
2021-02-24T00:32:36.000Z
|
2021-04-21T00:09:36.000Z
|
Module00/myfirst-python.py
|
geiyer/cis189-python
|
2b85ff66277b337aab9f6a7a6fa1c86dccf0178c
|
[
"MIT"
] | null | null | null |
Module00/myfirst-python.py
|
geiyer/cis189-python
|
2b85ff66277b337aab9f6a7a6fa1c86dccf0178c
|
[
"MIT"
] | 2
|
2021-03-30T23:37:49.000Z
|
2021-04-21T00:08:32.000Z
|
print('gopi')
| 13
| 13
| 0.692308
| 2
| 13
| 4.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 13
| 1
| 13
| 13
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
90eac991fed069567cc92ab449b4869d566e3e75
| 48
|
py
|
Python
|
converty/image_convert/__init__.py
|
sharmas1ddharth/converty
|
eb32dfc1882a7c20d57916287c10e154adbe67d9
|
[
"MIT"
] | null | null | null |
converty/image_convert/__init__.py
|
sharmas1ddharth/converty
|
eb32dfc1882a7c20d57916287c10e154adbe67d9
|
[
"MIT"
] | null | null | null |
converty/image_convert/__init__.py
|
sharmas1ddharth/converty
|
eb32dfc1882a7c20d57916287c10e154adbe67d9
|
[
"MIT"
] | null | null | null |
#from img_convert import jpg_to_png, png_to_jpg
| 24
| 47
| 0.854167
| 10
| 48
| 3.6
| 0.7
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.104167
| 48
| 1
| 48
| 48
| 0.837209
| 0.958333
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
2903d5d69b9135292976689b8bee6d75b64c1f3f
| 10,412
|
py
|
Python
|
spytest/tests/routing/BGP/bgp4nodelib.py
|
mykolaf/sonic-mgmt
|
de77268526173c5e3a345f3f3703b56eb40c5eed
|
[
"Apache-2.0"
] | 1
|
2021-09-15T17:09:13.000Z
|
2021-09-15T17:09:13.000Z
|
spytest/tests/routing/BGP/bgp4nodelib.py
|
mykolaf/sonic-mgmt
|
de77268526173c5e3a345f3f3703b56eb40c5eed
|
[
"Apache-2.0"
] | 1
|
2020-02-05T16:51:53.000Z
|
2020-02-05T16:51:53.000Z
|
spytest/tests/routing/BGP/bgp4nodelib.py
|
mykolaf/sonic-mgmt
|
de77268526173c5e3a345f3f3703b56eb40c5eed
|
[
"Apache-2.0"
] | null | null | null |
#BGP 4 node linear topology
from spytest import st
from spytest.dicts import SpyTestDict
import apis.routing.ip as ipapi
import apis.routing.bgp as bgpapi
import utilities.common as utils
import BGP.bgplib as bgplib
global topo
topo = SpyTestDict()
def l3_ipv4v6_address_config_unconfig(config='yes', vrf_type='all', config_type='all'):
"""
:param config:
:param vrf_type:
:param config_type:
:return:
"""
st.banner("{}Configuring IP Addresses between linear topology nodes.".format('Un' if config != 'yes' else ''))
tb_vars = st.get_testbed_vars()
st.log("TestBed Vars => {}\n".format(tb_vars))
topo['dut_list'] = tb_vars.dut_list
st.log("topo dut_list {}".format(topo['dut_list']))
config = 'add' if config == 'yes' else 'remove'
ipv4_adr = '11'
ipv6_adr = '67fe'
result = True
k = 1
i=0
while i < (len(topo['dut_list']) - 1):
dut = topo['dut_list'][i]
peer_dut = topo['dut_list'][i+1]
link = 1
for local, partner, remote in st.get_dut_links(dut, peer_dut):
if config_type == 'ipv4' or config_type == 'all':
ipaddr1 = "{}.{}.0.1".format(ipv4_adr, k)
ipaddr2 = "{}.{}.0.2".format(ipv4_adr, k)
topo['D{}D{}P{}'.format(i+1,i+2,link)] = local
topo['D{}D{}P{}_ipv4'.format(i+1,i+2,link)] = ipaddr1
topo['D{}D{}P{}_neigh_ipv4'.format(i+1,i+2,link)] = ipaddr2
topo['D{}D{}P{}'.format(i+2,i+1,link)] = remote
topo['D{}D{}P{}_ipv4'.format(i+2,i+1,link)] = ipaddr2
topo['D{}D{}P{}_neigh_ipv4'.format(i+2,i+1,link)] = ipaddr1
[out, exceptions] = utils.exec_all(bgplib.fast_start,[[ipapi.config_ip_addr_interface, dut, local, ipaddr1, '24', "ipv4", config],[ipapi.config_ip_addr_interface, peer_dut, remote, ipaddr2,'24', "ipv4", config]])
st.log([out, exceptions])
if config_type == 'ipv6' or config_type == 'all':
ip6addr1 = "{}:{}::1".format(ipv6_adr, k)
ip6addr2 = "{}:{}::2".format(ipv6_adr, k)
topo['D{}D{}P{}'.format(i+1,i+2,link)] = local
topo['D{}D{}P{}_ipv6'.format(i+1,i+2,link)] = ip6addr1
topo['D{}D{}P{}_neigh_ipv6'.format(i+1,i+2,link)] = ip6addr2
topo['D{}D{}P{}'.format(i+2,i+1,link)] = remote
topo['D{}D{}P{}_ipv6'.format(i+2,i+1,link)] = ip6addr2
topo['D{}D{}P{}_neigh_ipv6'.format(i+2,i+1,link)] = ip6addr1
[out, exceptions] = utils.exec_all(bgplib.fast_start,[[ipapi.config_ip_addr_interface, dut, local, ip6addr1, '64', "ipv6", config],[ipapi.config_ip_addr_interface, peer_dut, remote, ip6addr2,'64', "ipv6", config]])
st.log([out, exceptions])
link += 1
break
k += 1
i += 1
return result
def l3tc_vrfipv4v6_address_ping_test(vrf_type='all', config_type='all', ping_count=3):
"""
:param vrf_type:
:param config_type:
:param ping_count:
:return:
"""
st.banner("Ping Checking between Spine and Leaf nodes.")
ipv4_adr = '11'
ipv6_adr = '67fe'
result = True
k = 1
i=0
while i < (len(topo['dut_list']) - 1):
dut = topo['dut_list'][i]
peer_dut = topo['dut_list'][i+1]
link = 1
for local, partner, remote in st.get_dut_links(dut, peer_dut):
if config_type == 'ipv4' or config_type == 'all':
ipaddr1 = "{}.{}.0.1".format(ipv4_adr, k)
ipaddr2 = "{}.{}.0.2".format(ipv4_adr, k)
if not ipapi.ping(dut, ipaddr2, family='ipv4', count=ping_count):
st.log("{}- {} configured on {} - ping failed".format(dut, local, ipaddr2))
result = False
if config_type == 'ipv6' or config_type == 'all':
ip6addr1 = "{}:{}::1".format(ipv6_adr, k)
ip6addr2 = "{}:{}::2".format(ipv6_adr, k)
if not ipapi.ping(dut, ip6addr2, family='ipv6', count=ping_count):
st.log("{}- {} configured on {} - ping v6 failed".format(dut, local, ip6addr2))
result = False
link += 1
break
k += 1
i += 1
return result
def l3tc_vrfipv4v6_confed_bgp_config(config='yes', vrf_type='all', config_type='all'):
"""
:param config:
:param vrf_type:
:param config_type:
:return:
"""
st.banner("{}Configuring BGP with 4-node confederation topology.".format('Un' if config != 'yes' else ''))
#Confedration topo:
#DUT1 in sub-AS1 (AS1 = 24)
#DUT2, DUT3, DUT 4 in sub-AS2 (AS2 = 35)
#IBGP AS = 100
config = 'add' if config == 'yes' else 'remove'
leftconfed_as = 24
rightconfed_as = 35
iBGP_as = 100
topo['D1_as'] = 24
topo['D2_as'] = 35
topo['D3_as'] = 35
topo['D4_as'] = 35
result = True
if config == 'add':
if config_type == 'ipv4' or config_type == 'all':
#Confederation config for DUT1
dut = topo['dut_list'][0]
neighbor = topo['D1D2P1_neigh_ipv4']
bgpapi.config_bgp(dut, local_as = leftconfed_as, config = 'yes', conf_peers = rightconfed_as, conf_identf = iBGP_as, remote_as = rightconfed_as, config_type_list = ["neighbor"], neighbor = neighbor)
#Confederation config for DUT2
dut = topo['dut_list'][1]
neighbor = topo['D2D3P1_neigh_ipv4']
bgpapi.config_bgp(dut, local_as = rightconfed_as, config = 'yes', conf_peers = leftconfed_as, conf_identf = iBGP_as, remote_as = rightconfed_as, config_type_list = ["neighbor"], neighbor = neighbor)
bgpapi.create_bgp_neighbor(dut, rightconfed_as, topo['D2D1P1_neigh_ipv4'], leftconfed_as)
#Confederation config for DUT3
dut = topo['dut_list'][2]
neighbor = topo['D3D4P1_neigh_ipv4']
bgpapi.config_bgp(dut, local_as = rightconfed_as, config = 'yes', conf_peers = leftconfed_as, conf_identf = iBGP_as, remote_as = rightconfed_as, config_type_list = ["neighbor"], neighbor = neighbor)
bgpapi.create_bgp_neighbor(dut, rightconfed_as, topo['D3D2P1_neigh_ipv4'], rightconfed_as)
#Confederation config for DUT4
dut = topo['dut_list'][3]
neighbor = topo['D4D3P1_neigh_ipv4']
bgpapi.config_bgp(dut, local_as = rightconfed_as, config = 'yes', conf_peers = leftconfed_as, conf_identf = iBGP_as, remote_as = rightconfed_as, config_type_list = ["neighbor"], neighbor = neighbor)
if config_type == 'ipv6' or config_type == 'all':
#Confederation config for DUT1
dut = topo['dut_list'][0]
neighbor = topo['D1D2P1_neigh_ipv6']
bgpapi.config_bgp(dut, local_as = leftconfed_as, config = 'yes', addr_family ='ipv6', conf_peers = rightconfed_as, conf_identf = iBGP_as, remote_as = rightconfed_as, config_type_list = ["neighbor", "activate"], neighbor = neighbor)
#Confederation config for DUT2
dut = topo['dut_list'][1]
neighbor = topo['D2D3P1_neigh_ipv6']
bgpapi.config_bgp(dut, local_as = rightconfed_as, config = 'yes', addr_family ='ipv6', conf_peers = leftconfed_as, conf_identf = iBGP_as, remote_as = rightconfed_as, config_type_list = ["neighbor", "activate"], neighbor = neighbor)
bgpapi.create_bgp_neighbor(dut, rightconfed_as, topo['D2D1P1_neigh_ipv6'], leftconfed_as, family="ipv6")
#Confederation config for DUT3
dut = topo['dut_list'][2]
neighbor = topo['D3D4P1_neigh_ipv6']
bgpapi.config_bgp(dut, local_as = rightconfed_as, config = 'yes', addr_family ='ipv6', conf_peers = leftconfed_as, conf_identf = iBGP_as, remote_as = rightconfed_as, config_type_list = ["neighbor", "activate"], neighbor = neighbor)
bgpapi.create_bgp_neighbor(dut, rightconfed_as, topo['D3D2P1_neigh_ipv6'], rightconfed_as, family="ipv6")
#Confederation config for DUT4
dut = topo['dut_list'][3]
neighbor = topo['D4D3P1_neigh_ipv6']
bgpapi.config_bgp(dut, local_as = rightconfed_as, config = 'yes', addr_family ='ipv6', conf_peers = leftconfed_as, conf_identf = iBGP_as, remote_as = rightconfed_as, config_type_list = ["neighbor", "activate"], neighbor = neighbor)
else:
bgpapi.cleanup_router_bgp(topo['dut_list'])
return result
def l3tc_vrfipv4v6_address_confed_bgp_check(config_type='all'):
st.banner("BGP Neighbor Checking in confederation topology")
result = True
if config_type == 'ipv4' or config_type == 'all':
#Check link between DUT 1----DUT2 and DUT2----DUT3
neigh_list = []
neigh_list.append(topo['D2D3P1_neigh_ipv4'])
neigh_list.append(topo['D2D1P1_neigh_ipv4'])
neigh_list = list(set(neigh_list))
if not bgpapi.verify_bgp_summary(topo['dut_list'][1], family='ipv4', neighbor=neigh_list, state='Established'):
st.log("{} - Neighbor {} is failed to Establish".format(topo['dut_list'][1], neigh_list))
result = False
#Check link between DUT3----DUT4
if not bgpapi.verify_bgp_summary(topo['dut_list'][2], family='ipv4', neighbor=topo['D3D4P1_neigh_ipv4'], state='Established'):
st.log("{} - Neighbor {} is failed to Establish".format(topo['dut_list'][2], topo['D3D4P1_neigh_ipv4']))
result = False
if config_type == 'ipv6' or config_type == 'all':
#Check link between DUT 1----DUT2 and DUT2----DUT3
neigh_list = []
neigh_list.append(topo['D2D3P1_neigh_ipv6'])
neigh_list.append(topo['D2D1P1_neigh_ipv6'])
neigh_list = list(set(neigh_list))
if not bgpapi.verify_bgp_summary(topo['dut_list'][1], family='ipv6', neighbor=neigh_list, state='Established'):
st.log("{} - Neighbor {} is failed to Establish".format(topo['dut_list'][1], neigh_list))
result = False
#Check link between DUT3----DUT4
if not bgpapi.verify_bgp_summary(topo['dut_list'][2], family='ipv6', neighbor=topo['D3D4P1_neigh_ipv6'], state='Established'):
st.log("{} - Neighbor {} is failed to Establish".format(topo['dut_list'][2], topo['D3D4P1_neigh_ipv6']))
result = False
return result
def get_confed_topology_info():
return topo
| 46.690583
| 243
| 0.608817
| 1,388
| 10,412
| 4.340778
| 0.112392
| 0.051452
| 0.047469
| 0.048797
| 0.809627
| 0.792697
| 0.750871
| 0.718672
| 0.680166
| 0.665228
| 0
| 0.039939
| 0.24491
| 10,412
| 222
| 244
| 46.900901
| 0.726405
| 0.067326
| 0
| 0.545455
| 0
| 0
| 0.170343
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032468
| false
| 0
| 0.038961
| 0.006494
| 0.103896
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
2911616b731165fcdfcdc35a01337dbcf1f0bef8
| 72,048
|
py
|
Python
|
google/ads/google_ads/v5/proto/resources/ad_group_criterion_pb2.py
|
arammaliachi/google-ads-python
|
a4fe89567bd43eb784410523a6306b5d1dd9ee67
|
[
"Apache-2.0"
] | 1
|
2021-04-09T04:28:47.000Z
|
2021-04-09T04:28:47.000Z
|
google/ads/google_ads/v5/proto/resources/ad_group_criterion_pb2.py
|
arammaliachi/google-ads-python
|
a4fe89567bd43eb784410523a6306b5d1dd9ee67
|
[
"Apache-2.0"
] | null | null | null |
google/ads/google_ads/v5/proto/resources/ad_group_criterion_pb2.py
|
arammaliachi/google-ads-python
|
a4fe89567bd43eb784410523a6306b5d1dd9ee67
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v5/proto/resources/ad_group_criterion.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.ads.google_ads.v5.proto.common import criteria_pb2 as google_dot_ads_dot_googleads__v5_dot_proto_dot_common_dot_criteria__pb2
from google.ads.google_ads.v5.proto.common import custom_parameter_pb2 as google_dot_ads_dot_googleads__v5_dot_proto_dot_common_dot_custom__parameter__pb2
from google.ads.google_ads.v5.proto.enums import ad_group_criterion_approval_status_pb2 as google_dot_ads_dot_googleads__v5_dot_proto_dot_enums_dot_ad__group__criterion__approval__status__pb2
from google.ads.google_ads.v5.proto.enums import ad_group_criterion_status_pb2 as google_dot_ads_dot_googleads__v5_dot_proto_dot_enums_dot_ad__group__criterion__status__pb2
from google.ads.google_ads.v5.proto.enums import bidding_source_pb2 as google_dot_ads_dot_googleads__v5_dot_proto_dot_enums_dot_bidding__source__pb2
from google.ads.google_ads.v5.proto.enums import criterion_system_serving_status_pb2 as google_dot_ads_dot_googleads__v5_dot_proto_dot_enums_dot_criterion__system__serving__status__pb2
from google.ads.google_ads.v5.proto.enums import criterion_type_pb2 as google_dot_ads_dot_googleads__v5_dot_proto_dot_enums_dot_criterion__type__pb2
from google.ads.google_ads.v5.proto.enums import quality_score_bucket_pb2 as google_dot_ads_dot_googleads__v5_dot_proto_dot_enums_dot_quality__score__bucket__pb2
from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2
from google.api import resource_pb2 as google_dot_api_dot_resource__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v5/proto/resources/ad_group_criterion.proto',
package='google.ads.googleads.v5.resources',
syntax='proto3',
serialized_options=b'\n%com.google.ads.googleads.v5.resourcesB\025AdGroupCriterionProtoP\001ZJgoogle.golang.org/genproto/googleapis/ads/googleads/v5/resources;resources\242\002\003GAA\252\002!Google.Ads.GoogleAds.V5.Resources\312\002!Google\\Ads\\GoogleAds\\V5\\Resources\352\002%Google::Ads::GoogleAds::V5::Resources',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n@google/ads/googleads_v5/proto/resources/ad_group_criterion.proto\x12!google.ads.googleads.v5.resources\x1a\x33google/ads/googleads_v5/proto/common/criteria.proto\x1a;google/ads/googleads_v5/proto/common/custom_parameter.proto\x1aLgoogle/ads/googleads_v5/proto/enums/ad_group_criterion_approval_status.proto\x1a\x43google/ads/googleads_v5/proto/enums/ad_group_criterion_status.proto\x1a\x38google/ads/googleads_v5/proto/enums/bidding_source.proto\x1aIgoogle/ads/googleads_v5/proto/enums/criterion_system_serving_status.proto\x1a\x38google/ads/googleads_v5/proto/enums/criterion_type.proto\x1a>google/ads/googleads_v5/proto/enums/quality_score_bucket.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x1cgoogle/api/annotations.proto\"\xab#\n\x10\x41\x64GroupCriterion\x12H\n\rresource_name\x18\x01 \x01(\tB1\xe0\x41\x05\xfa\x41+\n)googleads.googleapis.com/AdGroupCriterion\x12\x1e\n\x0c\x63riterion_id\x18\x38 \x01(\x03\x42\x03\xe0\x41\x03H\x01\x88\x01\x01\x12`\n\x06status\x18\x03 \x01(\x0e\x32P.google.ads.googleads.v5.enums.AdGroupCriterionStatusEnum.AdGroupCriterionStatus\x12Z\n\x0cquality_info\x18\x04 \x01(\x0b\x32?.google.ads.googleads.v5.resources.AdGroupCriterion.QualityInfoB\x03\xe0\x41\x03\x12?\n\x08\x61\x64_group\x18\x39 \x01(\tB(\xe0\x41\x05\xfa\x41\"\n googleads.googleapis.com/AdGroupH\x02\x88\x01\x01\x12Q\n\x04type\x18\x19 \x01(\x0e\x32>.google.ads.googleads.v5.enums.CriterionTypeEnum.CriterionTypeB\x03\xe0\x41\x03\x12\x1a\n\x08negative\x18: \x01(\x08\x42\x03\xe0\x41\x05H\x03\x88\x01\x01\x12\x80\x01\n\x15system_serving_status\x18\x34 \x01(\x0e\x32\\.google.ads.googleads.v5.enums.CriterionSystemServingStatusEnum.CriterionSystemServingStatusB\x03\xe0\x41\x03\x12~\n\x0f\x61pproval_status\x18\x35 \x01(\x0e\x32`.google.ads.googleads.v5.enums.AdGroupCriterionApprovalStatusEnum.AdGroupCriterionApprovalStatusB\x03\xe0\x41\x03\x12 \n\x13\x64isapproval_reasons\x18; \x03(\tB\x03\xe0\x41\x03\x12\x19\n\x0c\x62id_modifier\x18= \x01(\x01H\x04\x88\x01\x01\x12\x1b\n\x0e\x63pc_bid_micros\x18> \x01(\x03H\x05\x88\x01\x01\x12\x1b\n\x0e\x63pm_bid_micros\x18? \x01(\x03H\x06\x88\x01\x01\x12\x1b\n\x0e\x63pv_bid_micros\x18@ \x01(\x03H\x07\x88\x01\x01\x12#\n\x16percent_cpc_bid_micros\x18\x41 \x01(\x03H\x08\x88\x01\x01\x12*\n\x18\x65\x66\x66\x65\x63tive_cpc_bid_micros\x18\x42 \x01(\x03\x42\x03\xe0\x41\x03H\t\x88\x01\x01\x12*\n\x18\x65\x66\x66\x65\x63tive_cpm_bid_micros\x18\x43 \x01(\x03\x42\x03\xe0\x41\x03H\n\x88\x01\x01\x12*\n\x18\x65\x66\x66\x65\x63tive_cpv_bid_micros\x18\x44 \x01(\x03\x42\x03\xe0\x41\x03H\x0b\x88\x01\x01\x12\x32\n effective_percent_cpc_bid_micros\x18\x45 \x01(\x03\x42\x03\xe0\x41\x03H\x0c\x88\x01\x01\x12\x65\n\x18\x65\x66\x66\x65\x63tive_cpc_bid_source\x18\x15 \x01(\x0e\x32>.google.ads.googleads.v5.enums.BiddingSourceEnum.BiddingSourceB\x03\xe0\x41\x03\x12\x65\n\x18\x65\x66\x66\x65\x63tive_cpm_bid_source\x18\x16 \x01(\x0e\x32>.google.ads.googleads.v5.enums.BiddingSourceEnum.BiddingSourceB\x03\xe0\x41\x03\x12\x65\n\x18\x65\x66\x66\x65\x63tive_cpv_bid_source\x18\x17 \x01(\x0e\x32>.google.ads.googleads.v5.enums.BiddingSourceEnum.BiddingSourceB\x03\xe0\x41\x03\x12m\n effective_percent_cpc_bid_source\x18# \x01(\x0e\x32>.google.ads.googleads.v5.enums.BiddingSourceEnum.BiddingSourceB\x03\xe0\x41\x03\x12\x66\n\x12position_estimates\x18\n \x01(\x0b\x32\x45.google.ads.googleads.v5.resources.AdGroupCriterion.PositionEstimatesB\x03\xe0\x41\x03\x12\x12\n\nfinal_urls\x18\x46 \x03(\t\x12\x19\n\x11\x66inal_mobile_urls\x18G \x03(\t\x12\x1d\n\x10\x66inal_url_suffix\x18H \x01(\tH\r\x88\x01\x01\x12\"\n\x15tracking_url_template\x18I \x01(\tH\x0e\x88\x01\x01\x12N\n\x15url_custom_parameters\x18\x0e \x03(\x0b\x32/.google.ads.googleads.v5.common.CustomParameter\x12\x43\n\x07keyword\x18\x1b \x01(\x0b\x32+.google.ads.googleads.v5.common.KeywordInfoB\x03\xe0\x41\x05H\x00\x12G\n\tplacement\x18\x1c \x01(\x0b\x32-.google.ads.googleads.v5.common.PlacementInfoB\x03\xe0\x41\x05H\x00\x12Y\n\x13mobile_app_category\x18\x1d \x01(\x0b\x32\x35.google.ads.googleads.v5.common.MobileAppCategoryInfoB\x03\xe0\x41\x05H\x00\x12X\n\x12mobile_application\x18\x1e \x01(\x0b\x32\x35.google.ads.googleads.v5.common.MobileApplicationInfoB\x03\xe0\x41\x05H\x00\x12N\n\rlisting_group\x18 \x01(\x0b\x32\x30.google.ads.googleads.v5.common.ListingGroupInfoB\x03\xe0\x41\x05H\x00\x12\x46\n\tage_range\x18$ \x01(\x0b\x32,.google.ads.googleads.v5.common.AgeRangeInfoB\x03\xe0\x41\x05H\x00\x12\x41\n\x06gender\x18% \x01(\x0b\x32*.google.ads.googleads.v5.common.GenderInfoB\x03\xe0\x41\x05H\x00\x12L\n\x0cincome_range\x18& \x01(\x0b\x32/.google.ads.googleads.v5.common.IncomeRangeInfoB\x03\xe0\x41\x05H\x00\x12R\n\x0fparental_status\x18\' \x01(\x0b\x32\x32.google.ads.googleads.v5.common.ParentalStatusInfoB\x03\xe0\x41\x05H\x00\x12\x46\n\tuser_list\x18* \x01(\x0b\x32,.google.ads.googleads.v5.common.UserListInfoB\x03\xe0\x41\x05H\x00\x12N\n\ryoutube_video\x18( \x01(\x0b\x32\x30.google.ads.googleads.v5.common.YouTubeVideoInfoB\x03\xe0\x41\x05H\x00\x12R\n\x0fyoutube_channel\x18) \x01(\x0b\x32\x32.google.ads.googleads.v5.common.YouTubeChannelInfoB\x03\xe0\x41\x05H\x00\x12?\n\x05topic\x18+ \x01(\x0b\x32).google.ads.googleads.v5.common.TopicInfoB\x03\xe0\x41\x05H\x00\x12N\n\ruser_interest\x18- \x01(\x0b\x32\x30.google.ads.googleads.v5.common.UserInterestInfoB\x03\xe0\x41\x05H\x00\x12\x43\n\x07webpage\x18. \x01(\x0b\x32+.google.ads.googleads.v5.common.WebpageInfoB\x03\xe0\x41\x05H\x00\x12U\n\x11\x61pp_payment_model\x18/ \x01(\x0b\x32\x33.google.ads.googleads.v5.common.AppPaymentModelInfoB\x03\xe0\x41\x05H\x00\x12R\n\x0f\x63ustom_affinity\x18\x30 \x01(\x0b\x32\x32.google.ads.googleads.v5.common.CustomAffinityInfoB\x03\xe0\x41\x05H\x00\x12N\n\rcustom_intent\x18\x31 \x01(\x0b\x32\x30.google.ads.googleads.v5.common.CustomIntentInfoB\x03\xe0\x41\x05H\x00\x1a\x8d\x03\n\x0bQualityInfo\x12\x1f\n\rquality_score\x18\x05 \x01(\x05\x42\x03\xe0\x41\x03H\x00\x88\x01\x01\x12m\n\x16\x63reative_quality_score\x18\x02 \x01(\x0e\x32H.google.ads.googleads.v5.enums.QualityScoreBucketEnum.QualityScoreBucketB\x03\xe0\x41\x03\x12o\n\x18post_click_quality_score\x18\x03 \x01(\x0e\x32H.google.ads.googleads.v5.enums.QualityScoreBucketEnum.QualityScoreBucketB\x03\xe0\x41\x03\x12k\n\x14search_predicted_ctr\x18\x04 \x01(\x0e\x32H.google.ads.googleads.v5.enums.QualityScoreBucketEnum.QualityScoreBucketB\x03\xe0\x41\x03\x42\x10\n\x0e_quality_score\x1a\xbc\x03\n\x11PositionEstimates\x12\'\n\x15\x66irst_page_cpc_micros\x18\x06 \x01(\x03\x42\x03\xe0\x41\x03H\x00\x88\x01\x01\x12+\n\x19\x66irst_position_cpc_micros\x18\x07 \x01(\x03\x42\x03\xe0\x41\x03H\x01\x88\x01\x01\x12(\n\x16top_of_page_cpc_micros\x18\x08 \x01(\x03\x42\x03\xe0\x41\x03H\x02\x88\x01\x01\x12<\n*estimated_add_clicks_at_first_position_cpc\x18\t \x01(\x03\x42\x03\xe0\x41\x03H\x03\x88\x01\x01\x12:\n(estimated_add_cost_at_first_position_cpc\x18\n \x01(\x03\x42\x03\xe0\x41\x03H\x04\x88\x01\x01\x42\x18\n\x16_first_page_cpc_microsB\x1c\n\x1a_first_position_cpc_microsB\x19\n\x17_top_of_page_cpc_microsB-\n+_estimated_add_clicks_at_first_position_cpcB+\n)_estimated_add_cost_at_first_position_cpc:i\xea\x41\x66\n)googleads.googleapis.com/AdGroupCriterion\x12\x39\x63ustomers/{customer}/adGroupCriteria/{ad_group_criterion}B\x0b\n\tcriterionB\x0f\n\r_criterion_idB\x0b\n\t_ad_groupB\x0b\n\t_negativeB\x0f\n\r_bid_modifierB\x11\n\x0f_cpc_bid_microsB\x11\n\x0f_cpm_bid_microsB\x11\n\x0f_cpv_bid_microsB\x19\n\x17_percent_cpc_bid_microsB\x1b\n\x19_effective_cpc_bid_microsB\x1b\n\x19_effective_cpm_bid_microsB\x1b\n\x19_effective_cpv_bid_microsB#\n!_effective_percent_cpc_bid_microsB\x13\n\x11_final_url_suffixB\x18\n\x16_tracking_url_templateB\x82\x02\n%com.google.ads.googleads.v5.resourcesB\x15\x41\x64GroupCriterionProtoP\x01ZJgoogle.golang.org/genproto/googleapis/ads/googleads/v5/resources;resources\xa2\x02\x03GAA\xaa\x02!Google.Ads.GoogleAds.V5.Resources\xca\x02!Google\\Ads\\GoogleAds\\V5\\Resources\xea\x02%Google::Ads::GoogleAds::V5::Resourcesb\x06proto3'
,
dependencies=[google_dot_ads_dot_googleads__v5_dot_proto_dot_common_dot_criteria__pb2.DESCRIPTOR,google_dot_ads_dot_googleads__v5_dot_proto_dot_common_dot_custom__parameter__pb2.DESCRIPTOR,google_dot_ads_dot_googleads__v5_dot_proto_dot_enums_dot_ad__group__criterion__approval__status__pb2.DESCRIPTOR,google_dot_ads_dot_googleads__v5_dot_proto_dot_enums_dot_ad__group__criterion__status__pb2.DESCRIPTOR,google_dot_ads_dot_googleads__v5_dot_proto_dot_enums_dot_bidding__source__pb2.DESCRIPTOR,google_dot_ads_dot_googleads__v5_dot_proto_dot_enums_dot_criterion__system__serving__status__pb2.DESCRIPTOR,google_dot_ads_dot_googleads__v5_dot_proto_dot_enums_dot_criterion__type__pb2.DESCRIPTOR,google_dot_ads_dot_googleads__v5_dot_proto_dot_enums_dot_quality__score__bucket__pb2.DESCRIPTOR,google_dot_api_dot_field__behavior__pb2.DESCRIPTOR,google_dot_api_dot_resource__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_ADGROUPCRITERION_QUALITYINFO = _descriptor.Descriptor(
name='QualityInfo',
full_name='google.ads.googleads.v5.resources.AdGroupCriterion.QualityInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='quality_score', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.QualityInfo.quality_score', index=0,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='creative_quality_score', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.QualityInfo.creative_quality_score', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='post_click_quality_score', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.QualityInfo.post_click_quality_score', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='search_predicted_ctr', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.QualityInfo.search_predicted_ctr', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='_quality_score', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.QualityInfo._quality_score',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=3954,
serialized_end=4351,
)
_ADGROUPCRITERION_POSITIONESTIMATES = _descriptor.Descriptor(
name='PositionEstimates',
full_name='google.ads.googleads.v5.resources.AdGroupCriterion.PositionEstimates',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='first_page_cpc_micros', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.PositionEstimates.first_page_cpc_micros', index=0,
number=6, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='first_position_cpc_micros', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.PositionEstimates.first_position_cpc_micros', index=1,
number=7, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='top_of_page_cpc_micros', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.PositionEstimates.top_of_page_cpc_micros', index=2,
number=8, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='estimated_add_clicks_at_first_position_cpc', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.PositionEstimates.estimated_add_clicks_at_first_position_cpc', index=3,
number=9, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='estimated_add_cost_at_first_position_cpc', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.PositionEstimates.estimated_add_cost_at_first_position_cpc', index=4,
number=10, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='_first_page_cpc_micros', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.PositionEstimates._first_page_cpc_micros',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_first_position_cpc_micros', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.PositionEstimates._first_position_cpc_micros',
index=1, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_top_of_page_cpc_micros', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.PositionEstimates._top_of_page_cpc_micros',
index=2, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_estimated_add_clicks_at_first_position_cpc', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.PositionEstimates._estimated_add_clicks_at_first_position_cpc',
index=3, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_estimated_add_cost_at_first_position_cpc', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.PositionEstimates._estimated_add_cost_at_first_position_cpc',
index=4, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=4354,
serialized_end=4798,
)
_ADGROUPCRITERION = _descriptor.Descriptor(
name='AdGroupCriterion',
full_name='google.ads.googleads.v5.resources.AdGroupCriterion',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005\372A+\n)googleads.googleapis.com/AdGroupCriterion', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='criterion_id', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.criterion_id', index=1,
number=56, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='status', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.status', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='quality_info', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.quality_info', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='ad_group', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.ad_group', index=4,
number=57, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005\372A\"\n googleads.googleapis.com/AdGroup', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='type', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.type', index=5,
number=25, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='negative', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.negative', index=6,
number=58, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='system_serving_status', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.system_serving_status', index=7,
number=52, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='approval_status', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.approval_status', index=8,
number=53, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='disapproval_reasons', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.disapproval_reasons', index=9,
number=59, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='bid_modifier', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.bid_modifier', index=10,
number=61, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='cpc_bid_micros', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.cpc_bid_micros', index=11,
number=62, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='cpm_bid_micros', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.cpm_bid_micros', index=12,
number=63, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='cpv_bid_micros', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.cpv_bid_micros', index=13,
number=64, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='percent_cpc_bid_micros', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.percent_cpc_bid_micros', index=14,
number=65, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='effective_cpc_bid_micros', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.effective_cpc_bid_micros', index=15,
number=66, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='effective_cpm_bid_micros', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.effective_cpm_bid_micros', index=16,
number=67, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='effective_cpv_bid_micros', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.effective_cpv_bid_micros', index=17,
number=68, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='effective_percent_cpc_bid_micros', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.effective_percent_cpc_bid_micros', index=18,
number=69, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='effective_cpc_bid_source', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.effective_cpc_bid_source', index=19,
number=21, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='effective_cpm_bid_source', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.effective_cpm_bid_source', index=20,
number=22, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='effective_cpv_bid_source', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.effective_cpv_bid_source', index=21,
number=23, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='effective_percent_cpc_bid_source', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.effective_percent_cpc_bid_source', index=22,
number=35, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='position_estimates', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.position_estimates', index=23,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='final_urls', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.final_urls', index=24,
number=70, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='final_mobile_urls', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.final_mobile_urls', index=25,
number=71, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='final_url_suffix', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.final_url_suffix', index=26,
number=72, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='tracking_url_template', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.tracking_url_template', index=27,
number=73, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='url_custom_parameters', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.url_custom_parameters', index=28,
number=14, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='keyword', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.keyword', index=29,
number=27, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='placement', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.placement', index=30,
number=28, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='mobile_app_category', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.mobile_app_category', index=31,
number=29, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='mobile_application', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.mobile_application', index=32,
number=30, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='listing_group', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.listing_group', index=33,
number=32, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='age_range', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.age_range', index=34,
number=36, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='gender', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.gender', index=35,
number=37, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='income_range', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.income_range', index=36,
number=38, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='parental_status', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.parental_status', index=37,
number=39, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='user_list', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.user_list', index=38,
number=42, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='youtube_video', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.youtube_video', index=39,
number=40, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='youtube_channel', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.youtube_channel', index=40,
number=41, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='topic', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.topic', index=41,
number=43, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='user_interest', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.user_interest', index=42,
number=45, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='webpage', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.webpage', index=43,
number=46, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='app_payment_model', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.app_payment_model', index=44,
number=47, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='custom_affinity', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.custom_affinity', index=45,
number=48, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='custom_intent', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.custom_intent', index=46,
number=49, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_ADGROUPCRITERION_QUALITYINFO, _ADGROUPCRITERION_POSITIONESTIMATES, ],
enum_types=[
],
serialized_options=b'\352Af\n)googleads.googleapis.com/AdGroupCriterion\0229customers/{customer}/adGroupCriteria/{ad_group_criterion}',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='criterion', full_name='google.ads.googleads.v5.resources.AdGroupCriterion.criterion',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_criterion_id', full_name='google.ads.googleads.v5.resources.AdGroupCriterion._criterion_id',
index=1, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_ad_group', full_name='google.ads.googleads.v5.resources.AdGroupCriterion._ad_group',
index=2, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_negative', full_name='google.ads.googleads.v5.resources.AdGroupCriterion._negative',
index=3, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_bid_modifier', full_name='google.ads.googleads.v5.resources.AdGroupCriterion._bid_modifier',
index=4, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_cpc_bid_micros', full_name='google.ads.googleads.v5.resources.AdGroupCriterion._cpc_bid_micros',
index=5, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_cpm_bid_micros', full_name='google.ads.googleads.v5.resources.AdGroupCriterion._cpm_bid_micros',
index=6, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_cpv_bid_micros', full_name='google.ads.googleads.v5.resources.AdGroupCriterion._cpv_bid_micros',
index=7, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_percent_cpc_bid_micros', full_name='google.ads.googleads.v5.resources.AdGroupCriterion._percent_cpc_bid_micros',
index=8, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_effective_cpc_bid_micros', full_name='google.ads.googleads.v5.resources.AdGroupCriterion._effective_cpc_bid_micros',
index=9, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_effective_cpm_bid_micros', full_name='google.ads.googleads.v5.resources.AdGroupCriterion._effective_cpm_bid_micros',
index=10, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_effective_cpv_bid_micros', full_name='google.ads.googleads.v5.resources.AdGroupCriterion._effective_cpv_bid_micros',
index=11, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_effective_percent_cpc_bid_micros', full_name='google.ads.googleads.v5.resources.AdGroupCriterion._effective_percent_cpc_bid_micros',
index=12, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_final_url_suffix', full_name='google.ads.googleads.v5.resources.AdGroupCriterion._final_url_suffix',
index=13, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_tracking_url_template', full_name='google.ads.googleads.v5.resources.AdGroupCriterion._tracking_url_template',
index=14, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=710,
serialized_end=5233,
)
_ADGROUPCRITERION_QUALITYINFO.fields_by_name['creative_quality_score'].enum_type = google_dot_ads_dot_googleads__v5_dot_proto_dot_enums_dot_quality__score__bucket__pb2._QUALITYSCOREBUCKETENUM_QUALITYSCOREBUCKET
_ADGROUPCRITERION_QUALITYINFO.fields_by_name['post_click_quality_score'].enum_type = google_dot_ads_dot_googleads__v5_dot_proto_dot_enums_dot_quality__score__bucket__pb2._QUALITYSCOREBUCKETENUM_QUALITYSCOREBUCKET
_ADGROUPCRITERION_QUALITYINFO.fields_by_name['search_predicted_ctr'].enum_type = google_dot_ads_dot_googleads__v5_dot_proto_dot_enums_dot_quality__score__bucket__pb2._QUALITYSCOREBUCKETENUM_QUALITYSCOREBUCKET
_ADGROUPCRITERION_QUALITYINFO.containing_type = _ADGROUPCRITERION
_ADGROUPCRITERION_QUALITYINFO.oneofs_by_name['_quality_score'].fields.append(
_ADGROUPCRITERION_QUALITYINFO.fields_by_name['quality_score'])
_ADGROUPCRITERION_QUALITYINFO.fields_by_name['quality_score'].containing_oneof = _ADGROUPCRITERION_QUALITYINFO.oneofs_by_name['_quality_score']
_ADGROUPCRITERION_POSITIONESTIMATES.containing_type = _ADGROUPCRITERION
_ADGROUPCRITERION_POSITIONESTIMATES.oneofs_by_name['_first_page_cpc_micros'].fields.append(
_ADGROUPCRITERION_POSITIONESTIMATES.fields_by_name['first_page_cpc_micros'])
_ADGROUPCRITERION_POSITIONESTIMATES.fields_by_name['first_page_cpc_micros'].containing_oneof = _ADGROUPCRITERION_POSITIONESTIMATES.oneofs_by_name['_first_page_cpc_micros']
_ADGROUPCRITERION_POSITIONESTIMATES.oneofs_by_name['_first_position_cpc_micros'].fields.append(
_ADGROUPCRITERION_POSITIONESTIMATES.fields_by_name['first_position_cpc_micros'])
_ADGROUPCRITERION_POSITIONESTIMATES.fields_by_name['first_position_cpc_micros'].containing_oneof = _ADGROUPCRITERION_POSITIONESTIMATES.oneofs_by_name['_first_position_cpc_micros']
_ADGROUPCRITERION_POSITIONESTIMATES.oneofs_by_name['_top_of_page_cpc_micros'].fields.append(
_ADGROUPCRITERION_POSITIONESTIMATES.fields_by_name['top_of_page_cpc_micros'])
_ADGROUPCRITERION_POSITIONESTIMATES.fields_by_name['top_of_page_cpc_micros'].containing_oneof = _ADGROUPCRITERION_POSITIONESTIMATES.oneofs_by_name['_top_of_page_cpc_micros']
_ADGROUPCRITERION_POSITIONESTIMATES.oneofs_by_name['_estimated_add_clicks_at_first_position_cpc'].fields.append(
_ADGROUPCRITERION_POSITIONESTIMATES.fields_by_name['estimated_add_clicks_at_first_position_cpc'])
_ADGROUPCRITERION_POSITIONESTIMATES.fields_by_name['estimated_add_clicks_at_first_position_cpc'].containing_oneof = _ADGROUPCRITERION_POSITIONESTIMATES.oneofs_by_name['_estimated_add_clicks_at_first_position_cpc']
_ADGROUPCRITERION_POSITIONESTIMATES.oneofs_by_name['_estimated_add_cost_at_first_position_cpc'].fields.append(
_ADGROUPCRITERION_POSITIONESTIMATES.fields_by_name['estimated_add_cost_at_first_position_cpc'])
_ADGROUPCRITERION_POSITIONESTIMATES.fields_by_name['estimated_add_cost_at_first_position_cpc'].containing_oneof = _ADGROUPCRITERION_POSITIONESTIMATES.oneofs_by_name['_estimated_add_cost_at_first_position_cpc']
_ADGROUPCRITERION.fields_by_name['status'].enum_type = google_dot_ads_dot_googleads__v5_dot_proto_dot_enums_dot_ad__group__criterion__status__pb2._ADGROUPCRITERIONSTATUSENUM_ADGROUPCRITERIONSTATUS
_ADGROUPCRITERION.fields_by_name['quality_info'].message_type = _ADGROUPCRITERION_QUALITYINFO
_ADGROUPCRITERION.fields_by_name['type'].enum_type = google_dot_ads_dot_googleads__v5_dot_proto_dot_enums_dot_criterion__type__pb2._CRITERIONTYPEENUM_CRITERIONTYPE
_ADGROUPCRITERION.fields_by_name['system_serving_status'].enum_type = google_dot_ads_dot_googleads__v5_dot_proto_dot_enums_dot_criterion__system__serving__status__pb2._CRITERIONSYSTEMSERVINGSTATUSENUM_CRITERIONSYSTEMSERVINGSTATUS
_ADGROUPCRITERION.fields_by_name['approval_status'].enum_type = google_dot_ads_dot_googleads__v5_dot_proto_dot_enums_dot_ad__group__criterion__approval__status__pb2._ADGROUPCRITERIONAPPROVALSTATUSENUM_ADGROUPCRITERIONAPPROVALSTATUS
_ADGROUPCRITERION.fields_by_name['effective_cpc_bid_source'].enum_type = google_dot_ads_dot_googleads__v5_dot_proto_dot_enums_dot_bidding__source__pb2._BIDDINGSOURCEENUM_BIDDINGSOURCE
_ADGROUPCRITERION.fields_by_name['effective_cpm_bid_source'].enum_type = google_dot_ads_dot_googleads__v5_dot_proto_dot_enums_dot_bidding__source__pb2._BIDDINGSOURCEENUM_BIDDINGSOURCE
_ADGROUPCRITERION.fields_by_name['effective_cpv_bid_source'].enum_type = google_dot_ads_dot_googleads__v5_dot_proto_dot_enums_dot_bidding__source__pb2._BIDDINGSOURCEENUM_BIDDINGSOURCE
_ADGROUPCRITERION.fields_by_name['effective_percent_cpc_bid_source'].enum_type = google_dot_ads_dot_googleads__v5_dot_proto_dot_enums_dot_bidding__source__pb2._BIDDINGSOURCEENUM_BIDDINGSOURCE
_ADGROUPCRITERION.fields_by_name['position_estimates'].message_type = _ADGROUPCRITERION_POSITIONESTIMATES
_ADGROUPCRITERION.fields_by_name['url_custom_parameters'].message_type = google_dot_ads_dot_googleads__v5_dot_proto_dot_common_dot_custom__parameter__pb2._CUSTOMPARAMETER
_ADGROUPCRITERION.fields_by_name['keyword'].message_type = google_dot_ads_dot_googleads__v5_dot_proto_dot_common_dot_criteria__pb2._KEYWORDINFO
_ADGROUPCRITERION.fields_by_name['placement'].message_type = google_dot_ads_dot_googleads__v5_dot_proto_dot_common_dot_criteria__pb2._PLACEMENTINFO
_ADGROUPCRITERION.fields_by_name['mobile_app_category'].message_type = google_dot_ads_dot_googleads__v5_dot_proto_dot_common_dot_criteria__pb2._MOBILEAPPCATEGORYINFO
_ADGROUPCRITERION.fields_by_name['mobile_application'].message_type = google_dot_ads_dot_googleads__v5_dot_proto_dot_common_dot_criteria__pb2._MOBILEAPPLICATIONINFO
_ADGROUPCRITERION.fields_by_name['listing_group'].message_type = google_dot_ads_dot_googleads__v5_dot_proto_dot_common_dot_criteria__pb2._LISTINGGROUPINFO
_ADGROUPCRITERION.fields_by_name['age_range'].message_type = google_dot_ads_dot_googleads__v5_dot_proto_dot_common_dot_criteria__pb2._AGERANGEINFO
_ADGROUPCRITERION.fields_by_name['gender'].message_type = google_dot_ads_dot_googleads__v5_dot_proto_dot_common_dot_criteria__pb2._GENDERINFO
_ADGROUPCRITERION.fields_by_name['income_range'].message_type = google_dot_ads_dot_googleads__v5_dot_proto_dot_common_dot_criteria__pb2._INCOMERANGEINFO
_ADGROUPCRITERION.fields_by_name['parental_status'].message_type = google_dot_ads_dot_googleads__v5_dot_proto_dot_common_dot_criteria__pb2._PARENTALSTATUSINFO
_ADGROUPCRITERION.fields_by_name['user_list'].message_type = google_dot_ads_dot_googleads__v5_dot_proto_dot_common_dot_criteria__pb2._USERLISTINFO
_ADGROUPCRITERION.fields_by_name['youtube_video'].message_type = google_dot_ads_dot_googleads__v5_dot_proto_dot_common_dot_criteria__pb2._YOUTUBEVIDEOINFO
_ADGROUPCRITERION.fields_by_name['youtube_channel'].message_type = google_dot_ads_dot_googleads__v5_dot_proto_dot_common_dot_criteria__pb2._YOUTUBECHANNELINFO
_ADGROUPCRITERION.fields_by_name['topic'].message_type = google_dot_ads_dot_googleads__v5_dot_proto_dot_common_dot_criteria__pb2._TOPICINFO
_ADGROUPCRITERION.fields_by_name['user_interest'].message_type = google_dot_ads_dot_googleads__v5_dot_proto_dot_common_dot_criteria__pb2._USERINTERESTINFO
_ADGROUPCRITERION.fields_by_name['webpage'].message_type = google_dot_ads_dot_googleads__v5_dot_proto_dot_common_dot_criteria__pb2._WEBPAGEINFO
_ADGROUPCRITERION.fields_by_name['app_payment_model'].message_type = google_dot_ads_dot_googleads__v5_dot_proto_dot_common_dot_criteria__pb2._APPPAYMENTMODELINFO
_ADGROUPCRITERION.fields_by_name['custom_affinity'].message_type = google_dot_ads_dot_googleads__v5_dot_proto_dot_common_dot_criteria__pb2._CUSTOMAFFINITYINFO
_ADGROUPCRITERION.fields_by_name['custom_intent'].message_type = google_dot_ads_dot_googleads__v5_dot_proto_dot_common_dot_criteria__pb2._CUSTOMINTENTINFO
_ADGROUPCRITERION.oneofs_by_name['criterion'].fields.append(
_ADGROUPCRITERION.fields_by_name['keyword'])
_ADGROUPCRITERION.fields_by_name['keyword'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['criterion']
_ADGROUPCRITERION.oneofs_by_name['criterion'].fields.append(
_ADGROUPCRITERION.fields_by_name['placement'])
_ADGROUPCRITERION.fields_by_name['placement'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['criterion']
_ADGROUPCRITERION.oneofs_by_name['criterion'].fields.append(
_ADGROUPCRITERION.fields_by_name['mobile_app_category'])
_ADGROUPCRITERION.fields_by_name['mobile_app_category'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['criterion']
_ADGROUPCRITERION.oneofs_by_name['criterion'].fields.append(
_ADGROUPCRITERION.fields_by_name['mobile_application'])
_ADGROUPCRITERION.fields_by_name['mobile_application'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['criterion']
_ADGROUPCRITERION.oneofs_by_name['criterion'].fields.append(
_ADGROUPCRITERION.fields_by_name['listing_group'])
_ADGROUPCRITERION.fields_by_name['listing_group'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['criterion']
_ADGROUPCRITERION.oneofs_by_name['criterion'].fields.append(
_ADGROUPCRITERION.fields_by_name['age_range'])
_ADGROUPCRITERION.fields_by_name['age_range'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['criterion']
_ADGROUPCRITERION.oneofs_by_name['criterion'].fields.append(
_ADGROUPCRITERION.fields_by_name['gender'])
_ADGROUPCRITERION.fields_by_name['gender'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['criterion']
_ADGROUPCRITERION.oneofs_by_name['criterion'].fields.append(
_ADGROUPCRITERION.fields_by_name['income_range'])
_ADGROUPCRITERION.fields_by_name['income_range'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['criterion']
_ADGROUPCRITERION.oneofs_by_name['criterion'].fields.append(
_ADGROUPCRITERION.fields_by_name['parental_status'])
_ADGROUPCRITERION.fields_by_name['parental_status'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['criterion']
_ADGROUPCRITERION.oneofs_by_name['criterion'].fields.append(
_ADGROUPCRITERION.fields_by_name['user_list'])
_ADGROUPCRITERION.fields_by_name['user_list'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['criterion']
_ADGROUPCRITERION.oneofs_by_name['criterion'].fields.append(
_ADGROUPCRITERION.fields_by_name['youtube_video'])
_ADGROUPCRITERION.fields_by_name['youtube_video'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['criterion']
_ADGROUPCRITERION.oneofs_by_name['criterion'].fields.append(
_ADGROUPCRITERION.fields_by_name['youtube_channel'])
_ADGROUPCRITERION.fields_by_name['youtube_channel'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['criterion']
_ADGROUPCRITERION.oneofs_by_name['criterion'].fields.append(
_ADGROUPCRITERION.fields_by_name['topic'])
_ADGROUPCRITERION.fields_by_name['topic'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['criterion']
_ADGROUPCRITERION.oneofs_by_name['criterion'].fields.append(
_ADGROUPCRITERION.fields_by_name['user_interest'])
_ADGROUPCRITERION.fields_by_name['user_interest'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['criterion']
_ADGROUPCRITERION.oneofs_by_name['criterion'].fields.append(
_ADGROUPCRITERION.fields_by_name['webpage'])
_ADGROUPCRITERION.fields_by_name['webpage'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['criterion']
_ADGROUPCRITERION.oneofs_by_name['criterion'].fields.append(
_ADGROUPCRITERION.fields_by_name['app_payment_model'])
_ADGROUPCRITERION.fields_by_name['app_payment_model'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['criterion']
_ADGROUPCRITERION.oneofs_by_name['criterion'].fields.append(
_ADGROUPCRITERION.fields_by_name['custom_affinity'])
_ADGROUPCRITERION.fields_by_name['custom_affinity'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['criterion']
_ADGROUPCRITERION.oneofs_by_name['criterion'].fields.append(
_ADGROUPCRITERION.fields_by_name['custom_intent'])
_ADGROUPCRITERION.fields_by_name['custom_intent'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['criterion']
_ADGROUPCRITERION.oneofs_by_name['_criterion_id'].fields.append(
_ADGROUPCRITERION.fields_by_name['criterion_id'])
_ADGROUPCRITERION.fields_by_name['criterion_id'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['_criterion_id']
_ADGROUPCRITERION.oneofs_by_name['_ad_group'].fields.append(
_ADGROUPCRITERION.fields_by_name['ad_group'])
_ADGROUPCRITERION.fields_by_name['ad_group'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['_ad_group']
_ADGROUPCRITERION.oneofs_by_name['_negative'].fields.append(
_ADGROUPCRITERION.fields_by_name['negative'])
_ADGROUPCRITERION.fields_by_name['negative'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['_negative']
_ADGROUPCRITERION.oneofs_by_name['_bid_modifier'].fields.append(
_ADGROUPCRITERION.fields_by_name['bid_modifier'])
_ADGROUPCRITERION.fields_by_name['bid_modifier'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['_bid_modifier']
_ADGROUPCRITERION.oneofs_by_name['_cpc_bid_micros'].fields.append(
_ADGROUPCRITERION.fields_by_name['cpc_bid_micros'])
_ADGROUPCRITERION.fields_by_name['cpc_bid_micros'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['_cpc_bid_micros']
_ADGROUPCRITERION.oneofs_by_name['_cpm_bid_micros'].fields.append(
_ADGROUPCRITERION.fields_by_name['cpm_bid_micros'])
_ADGROUPCRITERION.fields_by_name['cpm_bid_micros'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['_cpm_bid_micros']
_ADGROUPCRITERION.oneofs_by_name['_cpv_bid_micros'].fields.append(
_ADGROUPCRITERION.fields_by_name['cpv_bid_micros'])
_ADGROUPCRITERION.fields_by_name['cpv_bid_micros'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['_cpv_bid_micros']
_ADGROUPCRITERION.oneofs_by_name['_percent_cpc_bid_micros'].fields.append(
_ADGROUPCRITERION.fields_by_name['percent_cpc_bid_micros'])
_ADGROUPCRITERION.fields_by_name['percent_cpc_bid_micros'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['_percent_cpc_bid_micros']
_ADGROUPCRITERION.oneofs_by_name['_effective_cpc_bid_micros'].fields.append(
_ADGROUPCRITERION.fields_by_name['effective_cpc_bid_micros'])
_ADGROUPCRITERION.fields_by_name['effective_cpc_bid_micros'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['_effective_cpc_bid_micros']
_ADGROUPCRITERION.oneofs_by_name['_effective_cpm_bid_micros'].fields.append(
_ADGROUPCRITERION.fields_by_name['effective_cpm_bid_micros'])
_ADGROUPCRITERION.fields_by_name['effective_cpm_bid_micros'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['_effective_cpm_bid_micros']
_ADGROUPCRITERION.oneofs_by_name['_effective_cpv_bid_micros'].fields.append(
_ADGROUPCRITERION.fields_by_name['effective_cpv_bid_micros'])
_ADGROUPCRITERION.fields_by_name['effective_cpv_bid_micros'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['_effective_cpv_bid_micros']
_ADGROUPCRITERION.oneofs_by_name['_effective_percent_cpc_bid_micros'].fields.append(
_ADGROUPCRITERION.fields_by_name['effective_percent_cpc_bid_micros'])
_ADGROUPCRITERION.fields_by_name['effective_percent_cpc_bid_micros'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['_effective_percent_cpc_bid_micros']
_ADGROUPCRITERION.oneofs_by_name['_final_url_suffix'].fields.append(
_ADGROUPCRITERION.fields_by_name['final_url_suffix'])
_ADGROUPCRITERION.fields_by_name['final_url_suffix'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['_final_url_suffix']
_ADGROUPCRITERION.oneofs_by_name['_tracking_url_template'].fields.append(
_ADGROUPCRITERION.fields_by_name['tracking_url_template'])
_ADGROUPCRITERION.fields_by_name['tracking_url_template'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['_tracking_url_template']
DESCRIPTOR.message_types_by_name['AdGroupCriterion'] = _ADGROUPCRITERION
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
AdGroupCriterion = _reflection.GeneratedProtocolMessageType('AdGroupCriterion', (_message.Message,), {
'QualityInfo' : _reflection.GeneratedProtocolMessageType('QualityInfo', (_message.Message,), {
'DESCRIPTOR' : _ADGROUPCRITERION_QUALITYINFO,
'__module__' : 'google.ads.googleads_v5.proto.resources.ad_group_criterion_pb2'
,
'__doc__': """A container for ad group criterion quality information.
Attributes:
quality_score:
Output only. The quality score. This field may not be
populated if Google does not have enough information to
determine a value.
creative_quality_score:
Output only. The performance of the ad compared to other
advertisers.
post_click_quality_score:
Output only. The quality score of the landing page.
search_predicted_ctr:
Output only. The click-through rate compared to that of other
advertisers.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v5.resources.AdGroupCriterion.QualityInfo)
})
,
'PositionEstimates' : _reflection.GeneratedProtocolMessageType('PositionEstimates', (_message.Message,), {
'DESCRIPTOR' : _ADGROUPCRITERION_POSITIONESTIMATES,
'__module__' : 'google.ads.googleads_v5.proto.resources.ad_group_criterion_pb2'
,
'__doc__': """Estimates for criterion bids at various positions.
Attributes:
first_page_cpc_micros:
Output only. The estimate of the CPC bid required for ad to be
shown on first page of search results.
first_position_cpc_micros:
Output only. The estimate of the CPC bid required for ad to be
displayed in first position, at the top of the first page of
search results.
top_of_page_cpc_micros:
Output only. The estimate of the CPC bid required for ad to be
displayed at the top of the first page of search results.
estimated_add_clicks_at_first_position_cpc:
Output only. Estimate of how many clicks per week you might
get by changing your keyword bid to the value in
first\_position\_cpc\_micros.
estimated_add_cost_at_first_position_cpc:
Output only. Estimate of how your cost per week might change
when changing your keyword bid to the value in
first\_position\_cpc\_micros.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v5.resources.AdGroupCriterion.PositionEstimates)
})
,
'DESCRIPTOR' : _ADGROUPCRITERION,
'__module__' : 'google.ads.googleads_v5.proto.resources.ad_group_criterion_pb2'
,
'__doc__': """An ad group criterion.
Attributes:
resource_name:
Immutable. The resource name of the ad group criterion. Ad
group criterion resource names have the form: ``customers/{cu
stomer_id}/adGroupCriteria/{ad_group_id}~{criterion_id}``
criterion_id:
Output only. The ID of the criterion. This field is ignored
for mutates.
status:
The status of the criterion. This is the status of the ad
group criterion entity, set by the client. Note: UI reports
may incorporate additional information that affects whether a
criterion is eligible to run. In some cases a criterion that's
REMOVED in the API can still show as enabled in the UI. For
example, campaigns by default show to users of all age ranges
unless excluded. The UI will show each age range as "enabled",
since they're eligible to see the ads; but
AdGroupCriterion.status will show "removed", since no positive
criterion was added.
quality_info:
Output only. Information regarding the quality of the
criterion.
ad_group:
Immutable. The ad group to which the criterion belongs.
type:
Output only. The type of the criterion.
negative:
Immutable. Whether to target (``false``) or exclude (``true``)
the criterion. This field is immutable. To switch a criterion
from positive to negative, remove then re-add it.
system_serving_status:
Output only. Serving status of the criterion.
approval_status:
Output only. Approval status of the criterion.
disapproval_reasons:
Output only. List of disapproval reasons of the criterion.
The different reasons for disapproving a criterion can be
found here:
https://support.google.com/adspolicy/answer/6008942 This
field is read-only.
bid_modifier:
The modifier for the bid when the criterion matches. The
modifier must be in the range: 0.1 - 10.0. Most targetable
criteria types support modifiers.
cpc_bid_micros:
The CPC (cost-per-click) bid.
cpm_bid_micros:
The CPM (cost-per-thousand viewable impressions) bid.
cpv_bid_micros:
The CPV (cost-per-view) bid.
percent_cpc_bid_micros:
The CPC bid amount, expressed as a fraction of the advertised
price for some good or service. The valid range for the
fraction is [0,1) and the value stored here is 1,000,000 \*
[fraction].
effective_cpc_bid_micros:
Output only. The effective CPC (cost-per-click) bid.
effective_cpm_bid_micros:
Output only. The effective CPM (cost-per-thousand viewable
impressions) bid.
effective_cpv_bid_micros:
Output only. The effective CPV (cost-per-view) bid.
effective_percent_cpc_bid_micros:
Output only. The effective Percent CPC bid amount.
effective_cpc_bid_source:
Output only. Source of the effective CPC bid.
effective_cpm_bid_source:
Output only. Source of the effective CPM bid.
effective_cpv_bid_source:
Output only. Source of the effective CPV bid.
effective_percent_cpc_bid_source:
Output only. Source of the effective Percent CPC bid.
position_estimates:
Output only. Estimates for criterion bids at various
positions.
final_urls:
The list of possible final URLs after all cross-domain
redirects for the ad.
final_mobile_urls:
The list of possible final mobile URLs after all cross-domain
redirects.
final_url_suffix:
URL template for appending params to final URL.
tracking_url_template:
The URL template for constructing a tracking URL.
url_custom_parameters:
The list of mappings used to substitute custom parameter tags
in a ``tracking_url_template``, ``final_urls``, or
``mobile_final_urls``.
criterion:
The ad group criterion. Exactly one must be set.
keyword:
Immutable. Keyword.
placement:
Immutable. Placement.
mobile_app_category:
Immutable. Mobile app category.
mobile_application:
Immutable. Mobile application.
listing_group:
Immutable. Listing group.
age_range:
Immutable. Age range.
gender:
Immutable. Gender.
income_range:
Immutable. Income range.
parental_status:
Immutable. Parental status.
user_list:
Immutable. User List.
youtube_video:
Immutable. YouTube Video.
youtube_channel:
Immutable. YouTube Channel.
topic:
Immutable. Topic.
user_interest:
Immutable. User Interest.
webpage:
Immutable. Webpage
app_payment_model:
Immutable. App Payment Model.
custom_affinity:
Immutable. Custom Affinity.
custom_intent:
Immutable. Custom Intent.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v5.resources.AdGroupCriterion)
})
_sym_db.RegisterMessage(AdGroupCriterion)
_sym_db.RegisterMessage(AdGroupCriterion.QualityInfo)
_sym_db.RegisterMessage(AdGroupCriterion.PositionEstimates)
DESCRIPTOR._options = None
_ADGROUPCRITERION_QUALITYINFO.fields_by_name['quality_score']._options = None
_ADGROUPCRITERION_QUALITYINFO.fields_by_name['creative_quality_score']._options = None
_ADGROUPCRITERION_QUALITYINFO.fields_by_name['post_click_quality_score']._options = None
_ADGROUPCRITERION_QUALITYINFO.fields_by_name['search_predicted_ctr']._options = None
_ADGROUPCRITERION_POSITIONESTIMATES.fields_by_name['first_page_cpc_micros']._options = None
_ADGROUPCRITERION_POSITIONESTIMATES.fields_by_name['first_position_cpc_micros']._options = None
_ADGROUPCRITERION_POSITIONESTIMATES.fields_by_name['top_of_page_cpc_micros']._options = None
_ADGROUPCRITERION_POSITIONESTIMATES.fields_by_name['estimated_add_clicks_at_first_position_cpc']._options = None
_ADGROUPCRITERION_POSITIONESTIMATES.fields_by_name['estimated_add_cost_at_first_position_cpc']._options = None
_ADGROUPCRITERION.fields_by_name['resource_name']._options = None
_ADGROUPCRITERION.fields_by_name['criterion_id']._options = None
_ADGROUPCRITERION.fields_by_name['quality_info']._options = None
_ADGROUPCRITERION.fields_by_name['ad_group']._options = None
_ADGROUPCRITERION.fields_by_name['type']._options = None
_ADGROUPCRITERION.fields_by_name['negative']._options = None
_ADGROUPCRITERION.fields_by_name['system_serving_status']._options = None
_ADGROUPCRITERION.fields_by_name['approval_status']._options = None
_ADGROUPCRITERION.fields_by_name['disapproval_reasons']._options = None
_ADGROUPCRITERION.fields_by_name['effective_cpc_bid_micros']._options = None
_ADGROUPCRITERION.fields_by_name['effective_cpm_bid_micros']._options = None
_ADGROUPCRITERION.fields_by_name['effective_cpv_bid_micros']._options = None
_ADGROUPCRITERION.fields_by_name['effective_percent_cpc_bid_micros']._options = None
_ADGROUPCRITERION.fields_by_name['effective_cpc_bid_source']._options = None
_ADGROUPCRITERION.fields_by_name['effective_cpm_bid_source']._options = None
_ADGROUPCRITERION.fields_by_name['effective_cpv_bid_source']._options = None
_ADGROUPCRITERION.fields_by_name['effective_percent_cpc_bid_source']._options = None
_ADGROUPCRITERION.fields_by_name['position_estimates']._options = None
_ADGROUPCRITERION.fields_by_name['keyword']._options = None
_ADGROUPCRITERION.fields_by_name['placement']._options = None
_ADGROUPCRITERION.fields_by_name['mobile_app_category']._options = None
_ADGROUPCRITERION.fields_by_name['mobile_application']._options = None
_ADGROUPCRITERION.fields_by_name['listing_group']._options = None
_ADGROUPCRITERION.fields_by_name['age_range']._options = None
_ADGROUPCRITERION.fields_by_name['gender']._options = None
_ADGROUPCRITERION.fields_by_name['income_range']._options = None
_ADGROUPCRITERION.fields_by_name['parental_status']._options = None
_ADGROUPCRITERION.fields_by_name['user_list']._options = None
_ADGROUPCRITERION.fields_by_name['youtube_video']._options = None
_ADGROUPCRITERION.fields_by_name['youtube_channel']._options = None
_ADGROUPCRITERION.fields_by_name['topic']._options = None
_ADGROUPCRITERION.fields_by_name['user_interest']._options = None
_ADGROUPCRITERION.fields_by_name['webpage']._options = None
_ADGROUPCRITERION.fields_by_name['app_payment_model']._options = None
_ADGROUPCRITERION.fields_by_name['custom_affinity']._options = None
_ADGROUPCRITERION.fields_by_name['custom_intent']._options = None
_ADGROUPCRITERION._options = None
# @@protoc_insertion_point(module_scope)
| 71.832502
| 7,972
| 0.798426
| 9,605
| 72,048
| 5.567829
| 0.065487
| 0.025805
| 0.034331
| 0.049739
| 0.830943
| 0.798239
| 0.7436
| 0.684811
| 0.651676
| 0.583201
| 0
| 0.038577
| 0.100155
| 72,048
| 1,002
| 7,973
| 71.904192
| 0.786309
| 0.007231
| 0
| 0.45221
| 1
| 0.004111
| 0.35468
| 0.223972
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.015416
| 0
| 0.015416
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
2920ca4dd0173ed4466604445038b0eb072ca1d0
| 74
|
py
|
Python
|
pyretina/mc/__init__.py
|
yandexdataschool/pyretina
|
300d3cd460ded071d75d3729e9b5dc1489d86d73
|
[
"Apache-2.0"
] | 2
|
2016-05-28T15:59:47.000Z
|
2018-07-30T21:05:18.000Z
|
pyretina/mc/__init__.py
|
yandexdataschool/pyretina
|
300d3cd460ded071d75d3729e9b5dc1489d86d73
|
[
"Apache-2.0"
] | null | null | null |
pyretina/mc/__init__.py
|
yandexdataschool/pyretina
|
300d3cd460ded071d75d3729e9b5dc1489d86d73
|
[
"Apache-2.0"
] | null | null | null |
from pseudo_velo_mc import monte_carlo, mc_stream
from config import Event
| 37
| 49
| 0.878378
| 13
| 74
| 4.692308
| 0.769231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108108
| 74
| 2
| 50
| 37
| 0.924242
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
4625660b6d5d04c16c34782448fe3d79c97a7f98
| 112
|
py
|
Python
|
detection/__main__.py
|
janaSunrise/opencv2-face-detection-python
|
319f158f5f8eeadea597a447fee065d3926eefca
|
[
"MIT"
] | 3
|
2021-05-04T17:50:02.000Z
|
2021-05-10T14:38:34.000Z
|
detection/__main__.py
|
janaSunrise/opencv2-face-detection-python
|
319f158f5f8eeadea597a447fee065d3926eefca
|
[
"MIT"
] | null | null | null |
detection/__main__.py
|
janaSunrise/opencv2-face-detection-python
|
319f158f5f8eeadea597a447fee065d3926eefca
|
[
"MIT"
] | null | null | null |
if __name__ == "__main__":
print("You cannot run this module directly. Try running one of the submodules.")
| 37.333333
| 84
| 0.723214
| 16
| 112
| 4.5625
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.178571
| 112
| 2
| 85
| 56
| 0.793478
| 0
| 0
| 0
| 0
| 0
| 0.705357
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
464396c60b52257d002eea4cf825e7bdb78ee009
| 99
|
py
|
Python
|
main.py
|
TheRavehorn/KeyLogger
|
19faa15f929417942f6aaac1a28ada42ae3de384
|
[
"MIT"
] | null | null | null |
main.py
|
TheRavehorn/KeyLogger
|
19faa15f929417942f6aaac1a28ada42ae3de384
|
[
"MIT"
] | null | null | null |
main.py
|
TheRavehorn/KeyLogger
|
19faa15f929417942f6aaac1a28ada42ae3de384
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import keylogger
my_keylogger = keylogger.KeyLogger()
my_keylogger.start()
| 16.5
| 36
| 0.787879
| 13
| 99
| 5.846154
| 0.615385
| 0.289474
| 0.526316
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011111
| 0.090909
| 99
| 5
| 37
| 19.8
| 0.833333
| 0.212121
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
4649cd8b599afe84623169e1d007d61a6131e097
| 39
|
py
|
Python
|
setupegg.py
|
joshua-sterner/stockwell_transform
|
6b78613cc3b2f6e0ac872813f41e57e949327c8c
|
[
"MIT"
] | 25
|
2017-03-23T19:21:00.000Z
|
2022-03-03T14:49:42.000Z
|
setupegg.py
|
joshua-sterner/stockwell_transform
|
6b78613cc3b2f6e0ac872813f41e57e949327c8c
|
[
"MIT"
] | 3
|
2017-12-18T16:49:06.000Z
|
2019-06-30T12:24:40.000Z
|
setupegg.py
|
joshua-sterner/stockwell_transform
|
6b78613cc3b2f6e0ac872813f41e57e949327c8c
|
[
"MIT"
] | 13
|
2016-04-25T23:17:45.000Z
|
2021-01-30T23:42:35.000Z
|
import setuptools
execfile("setup.py")
| 13
| 20
| 0.794872
| 5
| 39
| 6.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 39
| 2
| 21
| 19.5
| 0.861111
| 0
| 0
| 0
| 0
| 0
| 0.205128
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
d3c01ed5c9aad5e69c4a89e695d078cf93edc0e2
| 19
|
py
|
Python
|
gym/version.py
|
MrJayK/gym
|
49d33fa83c6b65859d584939dc6e72f1ad36882d
|
[
"Python-2.0",
"OLDAP-2.7"
] | 9
|
2019-12-11T20:34:20.000Z
|
2021-05-23T04:35:29.000Z
|
gym/version.py
|
MrJayK/gym
|
49d33fa83c6b65859d584939dc6e72f1ad36882d
|
[
"Python-2.0",
"OLDAP-2.7"
] | null | null | null |
gym/version.py
|
MrJayK/gym
|
49d33fa83c6b65859d584939dc6e72f1ad36882d
|
[
"Python-2.0",
"OLDAP-2.7"
] | 1
|
2018-12-18T12:21:47.000Z
|
2018-12-18T12:21:47.000Z
|
VERSION = '0.11.0'
| 9.5
| 18
| 0.578947
| 4
| 19
| 2.75
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 0.157895
| 19
| 1
| 19
| 19
| 0.4375
| 0
| 0
| 0
| 0
| 0
| 0.315789
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
d3dfd6b2d289d0878c6eea0c8424f0e05509ec2c
| 6,999
|
py
|
Python
|
modules/2.79/freestyle/functions.py
|
cmbasnett/fake-bpy-module
|
acb8b0f102751a9563e5b5e5c7cd69a4e8aa2a55
|
[
"MIT"
] | null | null | null |
modules/2.79/freestyle/functions.py
|
cmbasnett/fake-bpy-module
|
acb8b0f102751a9563e5b5e5c7cd69a4e8aa2a55
|
[
"MIT"
] | null | null | null |
modules/2.79/freestyle/functions.py
|
cmbasnett/fake-bpy-module
|
acb8b0f102751a9563e5b5e5c7cd69a4e8aa2a55
|
[
"MIT"
] | null | null | null |
class ChainingTimeStampF1D:
def __init__(self):
pass
def __call__(self, inter):
pass
class Curvature2DAngleF0D:
def __init__(self):
pass
def __call__(self, it):
pass
class Curvature2DAngleF1D:
def __init__(self, integration_type=IntegrationType.MEAN):
pass
def __call__(self, inter):
pass
class CurveMaterialF0D:
pass
class CurveNatureF0D:
def __init__(self):
pass
def __call__(self, it):
pass
class CurveNatureF1D:
def __init__(self, integration_type=IntegrationType.MEAN):
pass
def __call__(self, inter):
pass
class DensityF0D:
def __init__(self, sigma=2.0):
pass
def __call__(self, it):
pass
class DensityF1D:
def __init__(self, sigma=2.0, integration_type=IntegrationType.MEAN, sampling=2.0):
pass
def __call__(self, inter):
pass
class GetCompleteViewMapDensityF1D:
def __init__(self, level, integration_type=IntegrationType.MEAN, sampling=2.0):
pass
def __call__(self, inter):
pass
class GetCurvilinearAbscissaF0D:
def __init__(self):
pass
def __call__(self, it):
pass
class GetDirectionalViewMapDensityF1D:
def __init__(self, orientation, level, integration_type=IntegrationType.MEAN, sampling=2.0):
pass
def __call__(self, inter):
pass
class GetOccludeeF0D:
def __init__(self):
pass
def __call__(self, it):
pass
class GetOccludeeF1D:
def __init__(self):
pass
def __call__(self, inter):
pass
class GetOccludersF0D:
def __init__(self):
pass
def __call__(self, it):
pass
class GetOccludersF1D:
def __init__(self):
pass
def __call__(self, inter):
pass
class GetParameterF0D:
def __init__(self):
pass
def __call__(self, it):
pass
class GetProjectedXF0D:
def __init__(self):
pass
def __call__(self, it):
pass
class GetProjectedXF1D:
def __init__(self, integration_type=IntegrationType.MEAN):
pass
def __call__(self, inter):
pass
class GetProjectedYF0D:
def __init__(self):
pass
def __call__(self, it):
pass
class GetProjectedYF1D:
def __init__(self, integration_type=IntegrationType.MEAN):
pass
def __call__(self, inter):
pass
class GetProjectedZF0D:
def __init__(self):
pass
def __call__(self, it):
pass
class GetProjectedZF1D:
def __init__(self, integration_type=IntegrationType.MEAN):
pass
def __call__(self, inter):
pass
class GetShapeF0D:
def __init__(self):
pass
def __call__(self, it):
pass
class GetShapeF1D:
def __init__(self):
pass
def __call__(self, inter):
pass
class GetSteerableViewMapDensityF1D:
def __init__(self, level, integration_type=IntegrationType.MEAN, sampling=2.0):
pass
def __call__(self, inter):
pass
class GetViewMapGradientNormF0D:
def __init__(self, level):
pass
def __call__(self, it):
pass
class GetViewMapGradientNormF1D:
def __init__(self, level, integration_type=IntegrationType.MEAN, sampling=2.0):
pass
def __call__(self, inter):
pass
class GetXF0D:
def __init__(self):
pass
def __call__(self, it):
pass
class GetXF1D:
def __init__(self, integration_type=IntegrationType.MEAN):
pass
def __call__(self, inter):
pass
class GetYF0D:
def __init__(self):
pass
def __call__(self, it):
pass
class GetYF1D:
def __init__(self, integration_type=IntegrationType.MEAN):
pass
def __call__(self, inter):
pass
class GetZF0D:
def __init__(self):
pass
def __call__(self, it):
pass
class GetZF1D:
def __init__(self, integration_type=IntegrationType.MEAN):
pass
def __call__(self, inter):
pass
class IncrementChainingTimeStampF1D:
def __init__(self):
pass
def __call__(self, inter):
pass
class LocalAverageDepthF0D:
def __init__(self, mask_size=5.0):
pass
def __call__(self, it):
pass
class LocalAverageDepthF1D:
def __init__(self, sigma, integration_type=IntegrationType.MEAN):
pass
def __call__(self, inter):
pass
class MaterialF0D:
def __init__(self):
pass
def __call__(self, it):
pass
class Normal2DF0D:
def __init__(self):
pass
def __call__(self, it):
pass
class Normal2DF1D:
def __init__(self, integration_type=IntegrationType.MEAN):
pass
def __call__(self, inter):
pass
class Orientation2DF1D:
def __init__(self, integration_type=IntegrationType.MEAN):
pass
def __call__(self, inter):
pass
class Orientation3DF1D:
def __init__(self, integration_type=IntegrationType.MEAN):
pass
def __call__(self, inter):
pass
class QuantitativeInvisibilityF0D:
def __init__(self):
pass
def __call__(self, it):
pass
class QuantitativeInvisibilityF1D:
def __init__(self, integration_type=IntegrationType.MEAN):
pass
def __call__(self, inter):
pass
class ReadCompleteViewMapPixelF0D:
def __init__(self, level):
pass
def __call__(self, it):
pass
class ReadMapPixelF0D:
def __init__(self, map_name, level):
pass
def __call__(self, it):
pass
class ReadSteerableViewMapPixelF0D:
def __init__(self, orientation, level):
pass
def __call__(self, it):
pass
class ShapeIdF0D:
def __init__(self):
pass
def __call__(self, it):
pass
class TimeStampF1D:
def __init__(self):
pass
def __call__(self, inter):
pass
class VertexOrientation2DF0D:
def __init__(self):
pass
def __call__(self, it):
pass
class VertexOrientation3DF0D:
def __init__(self):
pass
def __call__(self, it):
pass
class ZDiscontinuityF0D:
def __init__(self):
pass
def __call__(self, it):
pass
class ZDiscontinuityF1D:
def __init__(self, integration_type=IntegrationType.MEAN):
pass
def __call__(self, inter):
pass
class pyCurvilinearLengthF0D:
pass
class pyDensityAnisotropyF0D:
pass
class pyDensityAnisotropyF1D:
pass
class pyGetInverseProjectedZF1D:
pass
class pyGetSquareInverseProjectedZF1D:
pass
class pyInverseCurvature2DAngleF0D:
pass
class pyViewMapGradientNormF0D:
pass
class pyViewMapGradientNormF1D:
pass
class pyViewMapGradientVectorF0D:
def __init__(self, level):
pass
| 12.431616
| 96
| 0.627947
| 709
| 6,999
| 5.588152
| 0.118477
| 0.136295
| 0.144372
| 0.193084
| 0.687027
| 0.670873
| 0.662039
| 0.662039
| 0.632761
| 0.632761
| 0
| 0.017515
| 0.298471
| 6,999
| 562
| 97
| 12.453737
| 0.789409
| 0
| 0
| 0.753623
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.373188
| false
| 0.405797
| 0
| 0
| 0.594203
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
3103ec75192ef137fa9a0eceec55142a3a9fbe40
| 60
|
py
|
Python
|
Unicorn.py
|
Hackerx0406/termux.clone
|
31d7d7320eda8db35c7e6a48cd20edd0da0e441d
|
[
"Apache-2.0"
] | null | null | null |
Unicorn.py
|
Hackerx0406/termux.clone
|
31d7d7320eda8db35c7e6a48cd20edd0da0e441d
|
[
"Apache-2.0"
] | null | null | null |
Unicorn.py
|
Hackerx0406/termux.clone
|
31d7d7320eda8db35c7e6a48cd20edd0da0e441d
|
[
"Apache-2.0"
] | null | null | null |
print("Hello User!")
print ("stay safe & have a nice day!")
| 20
| 38
| 0.65
| 10
| 60
| 3.9
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 60
| 2
| 39
| 30
| 0.78
| 0
| 0
| 0
| 0
| 0
| 0.65
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
31136bccb043f041d3a62fd66e12d2049a118cc4
| 126,549
|
py
|
Python
|
NetFlax_pipeline_code/scripts/TAGs.py
|
GCA-VH-lab/coreNet
|
19d328e808df93b7ea8ac18ed54982a6e720c09e
|
[
"MIT"
] | null | null | null |
NetFlax_pipeline_code/scripts/TAGs.py
|
GCA-VH-lab/coreNet
|
19d328e808df93b7ea8ac18ed54982a6e720c09e
|
[
"MIT"
] | null | null | null |
NetFlax_pipeline_code/scripts/TAGs.py
|
GCA-VH-lab/coreNet
|
19d328e808df93b7ea8ac18ed54982a6e720c09e
|
[
"MIT"
] | null | null | null |
__author__ = "Chayan Kumar Saha, Gemma C. Atkinson"
__copyright__ = "MIT License: Copyright (c) 2020 Chayan Kumar Saha"
__email__ = "chayan.sust7@gmail.com"
from Bio import SeqIO
from Bio import Entrez
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import generic_protein, IUPAC
import math, re
import argparse
import ftplib
import socket
import random
import time
from random import randint
import colorsys
import os, sys, os.path, math
import gzip
import getopt
from collections import OrderedDict
import subprocess
from tkinter import *
master = Tk()
usage= ''' Description: TAGs in an extended version of FlaGs, which finds TA-like [type 2] structure by analysing conservation of genomic neighbourhood of protein accession input made of either toxins or antitoxins; and visualize the structure; Requirement= Python3, BioPython; tkinter ; Optional Requirement= ETE3. '''
parser = argparse.ArgumentParser(description=usage)
parser.add_argument("-a", "--assemblyList", help=" Protein Accession with assembly Identifier eg. GCF_000001765.3 in a text file separated by newline. ")
parser.add_argument("-p", "--proteinList", help=" Protein Accession eg. XP_ or WP_047256880.1 in a text file separated by newline. ")
parser.add_argument("-l", "--localGenomeList", help=" Genome File name and Protein Accession ")
parser.add_argument("-ld", "--localGenomeDirectory", help=" Path for Local Files, Default directory is './' which is the same directory where the script is located or running from. ")
parser.add_argument("-r", "--redundant", help=" To search all assembly type -r A or -r a but for selected number of assembly eg.,5 for each query use -r 5. ")
parser.add_argument("-e", "--ethreshold", help=" E value threshold. Default = 1e-10 ")
parser.add_argument("-n", "--number", help=" Number of Jackhmmer iterations. Default = 3")
parser.add_argument("-g", "--gene", help=" Number of genes for looking up or downstream. Default = 4 ")
parser.add_argument("-i", "--intergenic", help=" Number of intergenic space. Default = 100 ")
parser.add_argument("-t", "--tree", action="store_true", help=" If you want to see flanking genes along with phylogenetic tree, requires ETE3 installation. By default it will not produce. ")
parser.add_argument("-ts", "--tshape", help=" Size of triangle shapes that represent flanking genes, this option only works when -t is used. Default = 12 ")
parser.add_argument("-tf", "--tfontsize", help=" Size of font inside triangles that represent flanking genes, this option only works when -t is used. Default = 4 ")
parser.add_argument("-tl", "--taxalist", help="List of Taxa name and GCF")
parser.add_argument("-to", "--tree_order", action="store_true", help=" Generate Output with Tree, and then use the tree order to generate other view. ")
parser.add_argument("-u", "--user_email", required=True, action="append", metavar="RECIPIENT",default=[], dest="recipients", help=" User Email Address (at least one required) ")
parser.add_argument("-api", "--api_key", help=" NCBI API Key, To get this key kindly check https://ncbiinsights.ncbi.nlm.nih.gov/2017/11/02/new-api-keys-for-the-e-utilities/ ")
parser.add_argument("-o", "--out_prefix", required= True, help=" Any Keyword to define your output eg. MyQuery ")
parser.add_argument("-c", "--cpu", help="Maximum number of parallel CPU workers to use for multithreads. ")
parser.add_argument("-k", "--keep", action="store_true", help=" If you want to keep the intermediate files eg. gff3 use [-k]. By default it will remove. ")
parser.add_argument("-v", "--version", action="version", version='%(prog)s 1.2.7.21')
parser.add_argument("-vb", "--verbose", action="store_true", help=" Use this option to see the work progress for each query as stdout. ")
args = parser.parse_args()
parser.parse_args()
print('\nStarting TAGs version 1.2.7.21 \nPlease only run one instance of TAGs at a time to avoid making more queries than NCBI’s limit.')
print('For more information, please check https://ncbiinsights.ncbi.nlm.nih.gov/2017/11/02/new-api-keys-for-the-e-utilities/ \n')
print('Checking for RefSeq and Genbank summary files and downloading if needed ... \n')
Entrez.tool = 'FlaGs'
ncbi_time=0.4
timeout = 10
socket.setdefaulttimeout(timeout)
def checkBioPython(): #Checking Biopython Version
import Bio
return (Bio.__version__)
from tkinter.font import Font #Font for postscript to-scale output
myFont12 = Font(family="Helvetica", size=12)
myFont7 = Font(family="Helvetica", size=7)
Entrez.email = args.recipients[0] #User email
Entrez.max_tries = 5
Entrez.sleep_between_tries = 60
if not args.localGenomeList:
if args.api_key:
Entrez.api_key = args.api_key #Valid API-key allows 10 queries per seconds, which makes the tool run faster
else:
if args.api_key:
print('Since FlaGs will use Local Data api_key is not necessary, Thanks!')
sys.exit()
#Color generator
def random_color(h=None):
"""Generates a random color in RGB format."""
if not h:
c = random.random()
d = 0.5
e = 0.5
return _hls2hex(c, d, e)
def _hls2hex(c, d, e):
return '#%02x%02x%02x' %tuple(map(lambda f: int(f*255),colorsys.hls_to_rgb(c, d, e)))
def outliner (item):
if item =='#ffffff':
return '#bebebe'
elif item =='#f2f2f2':
return '#008000'
elif item =='#f2f2f3':
return '#000080'
else:
return item
if args.redundant: #Search flanking genes in limited or all available GCFs for each query
if args.redundant.isdigit():
if int(args.redundant)==0:
print('Please use -r option correctly, kindly check help or manual and try again. Thanks!')
sys.exit()
else:
pass
if not args.redundant.isdigit():
if args.redundant.lower()=='a':
pass
else:
print('Please use -r option correctly, kindly check help or manual and try again. Thanks!')
sys.exit()
if args.cpu:
if int(args.cpu)>0:
core=int(args.cpu)
else:
print('Please use number eg, 1,2...')
sys.exit()
if args.assemblyList:
if args.redundant:
print('"-r" option is only works with "-p", please try again with proper command')
sys.exit()
if args.localGenomeList:
if args.redundant:
print('"-r" option is only works with "-p", please try again with proper command')
sys.exit()
if args.tree:
if args.tshape:
if int(args.tshape)>0:
size=int(args.tshape)
else:
print("Kindly input the size of triangles, recommended 12. Not applicable for 0 and negative values")
else:
size=12
else:
if args.tshape:
print("Kindly make sure that you are using -t to make this -ts argument working")
sys.exit()
if args.tree:
if args.tfontsize:
if int(args.tfontsize)>0:
fsize=str(args.tfontsize)
else:
print("Kindly input the font Size required inside triangles, recommended 4. Not applicable for 0 and negative values")
else:
fsize=str(4)
else:
if args.tfontsize:
print("Kindly make sure that you are using -t to make this -tf argument working")
sys.exit()
if args.tree_order:
if not args.tree:
print("Kindly make sure that you are using -t to make this -to argument working")
sys.exit()
if args.ethreshold:
evthresh=args.ethreshold
else:
evthresh="1e-10"
if args.number:
iters=args.number
else:
iters="3"
if args.gene:
if int(args.gene)>0:
s= str(int(args.gene)+1)
else:
print('Please insert positive values, starting from 1')
sys.exit()
else:
s= "5"
if args.localGenomeList:
if args.localGenomeDirectory:
if os.path.isdir(args.localGenomeDirectory):
if args.localGenomeDirectory[-1]=='/':
localDirIn=args.localGenomeDirectory
print('Local Data path : ', localDirIn, '\n')
else:
localDirIn=args.localGenomeDirectory+'/'
print('Local Data path : ', localDirIn, '\n')
else:
print('No directory Found as : '+ args.localGenomeDirectory)
sys.exit()
else:
localDirIn='./'
else:
if args.localGenomeDirectory:
print('Please use -l flag to make -ld flag working')
sys.exit()
if not args.localGenomeList:
localDir='./'
else:
localDir=localDirIn
#TAGs
if args.intergenic:
if int(args.intergenic)>0:
gappyness=int(args.intergenic)
else:
print('Please insert positive values, starting from 1')
sys.exit()
else:
gappyness=100
def checkChar(item): #removing characters
import re
items=item.replace('\t','').replace(' ','')
return re.sub("[a-zA-Z0-9_.]","",items)
queryList=[] #Formatting input as a list
#(queryList)
#eg 1. [['WP_019504790.1', 'GCF_000332195.1'], ['WP_028108719.1', 'GCF_000422645.1']]
#eg 2. [['WP_019504790.1'], ['WP_028108719.1']]
#TAGs
netIn_SpDict={}
if args.taxalist:
with open(args.taxalist,'r') as taxaName:
for line in taxaName:
Line=line.rstrip().split('\t')
netIn_SpDict[Line[0]]=Line[1]
def tspLocal(faa,acc): #getting species name using assembly number or accession
if netIn_SpDict: #TAGS
if faa in netIn_SpDict:
return netIn_SpDict[faa]
if not netIn_SpDict:
faaFile=faa+'.faa.gz'
fastaSeq = gzip.open(localDir+faaFile, "rt")
for record in SeqIO.parse(fastaSeq, "fasta"):
if record.id==acc:
return remBadChar(record.description.split('[')[-1][:-1])
if args.localGenomeList:
with open (args.localGenomeList, 'r') as gList, open (args.out_prefix+'_taxaList.txt', 'w') as taxOut:
for line in gList:
if checkChar(line.rstrip().replace(' ',''))=='':
Line=line.rstrip().replace(' ','').split('\t')
if len(Line)<2:
print('Check Input file, Incorrect Format.')
sys.exit()
else: #GCF_001639265.1 WP_066970962.1 Methanobrevibacter_filiformis_DSM_11501
newFormat=Line[1]+'\t'+Line[0]
queryList.append(newFormat.split('\t'))
print(Line[0], Line[1], tspLocal(Line[0],Line[1]), sep='\t', file=taxOut)
if Line[0] not in netIn_SpDict:
netIn_SpDict[Line[0]]=tspLocal(Line[0],Line[1])
#netIn_SpDict[Line[0]]=Line[2]
else:
print('The submitted query might include characters not found in NCBI protein accessions eg. > , # , ! etc. Please provide correct format, Thanks!')
sys.exit()
else:
if args.proteinList and not args.assemblyList:
with open (args.proteinList, 'r') as pList:
for line in pList:
if checkChar(line.rstrip().replace(' ',''))=='':
Line=line.rstrip().replace(' ','').split('\t')
if len(Line)>1:
print('Check Input file, Incorrect Format.')
sys.exit()
else:
queryList.append(Line)
else:
print('The submitted query might include characters not found in NCBI protein accessions eg. > , # , ! etc. Please provide correct format, Thanks!')
sys.exit()
elif args.assemblyList and not args.proteinList :
with open (args.assemblyList, 'r') as apList:
for line in apList:
if checkChar(line.rstrip().replace(' ',''))=='':
Line=line.rstrip().replace(' ','').split('\t')
if len(Line)<2:
print('Check Input file, Incorrect Format.')
sys.exit()
else:
newFormat=Line[1]+'\t'+Line[0]
queryList.append(newFormat.split('\t'))
else:
print('The submitted query might include characters not found in NCBI protein accessions eg. > , # , ! etc. Please provide correct format, Thanks!')
sys.exit()
else:
print('Incorrect Input!')
sys.exit()
def accession_from_xp(accession_nr):
"""
:param accession_nr: NCBI protein accession
:return: Bioproject number of all species for that protein which is used to grab Assembly number
"""
i=1
retry=True
while (retry) and i<6: #Retry 5 times with changed socket timeout
#Entrez.email = "_@gmail.com" # If you do >3 entrez searches on NCBI per second, your ip will be
# blocked, warning is sent to this email.
try:
variableIsquare=i**2
changedtimeout = 10*variableIsquare #10-250 seconds (when i =1 , changedtimeout= 10*1 =10 | when i=2, changedtimeout= 10 * 2square =40 )
socket.setdefaulttimeout(changedtimeout)
time.sleep(ncbi_time)
handle = Entrez.efetch(db="protein", id=accession_nr, rettype="gbwithparts", retmode="text")
if handle:
retry=False
record = SeqIO.read(handle, "genbank")
bioproj=record.dbxrefs
handle.close()
bio=[]
for item in bioproj:
if item.split(':')[0]=='BioProject':
bio.append(item.split(':')[1])
if bio:
return set(bio)
else:
return {'NAI'}
else:
i+=1
retry=True
except Exception as e:
retry=True
i+=1
if not i<6:
print("\t\tQuery {}, not found in database. \n" "\t\tContinuing with the next protein in the list ... \n".format(accession_nr))
return False
def accession_from_wp(accession_nr):
"""
:param accession_nr: NCBI protein accession
:return: Set of assembly number of all species for particular protein
"""
i=1
retry=True
while (retry) and i<6: #Retry 5 times with changed socket timeout
#Entrez.email = "_@gmail.com" # If you do >3 entrez searches on NCBI per second, your ip will be
# blocked, warning is sent to this email.
try:
variableIsquare=i**2
changedtimeout = 10*variableIsquare #10-250 seconds (when i =1 , changedtimeout= 10*1 =10 | when i=2, changedtimeout= 10 * 2square =40 )
socket.setdefaulttimeout(changedtimeout)
time.sleep(ncbi_time)
handle = Entrez.efetch(db="protein", id=accession_nr, rettype="ipg", retmode="xml")
if handle:
retry=False
if float(checkBioPython())<=1.72:
record = list(Entrez.parse(handle))
handle.close()
assembly = re.findall("GC._\d*\.\d", str(record))
if assembly and len(assembly)>0:
return (set(assembly))
else:
return {'NAI'}
else:
record = Entrez.read(handle)
handle.close()
assembly = re.findall("GC._\d*\.\d", str(record['IPGReport']))
if assembly and len(assembly)>0:
return (set(assembly))
else:
return {'NAI'}
else:
i+=1
retry=True
except Exception as e:
retry=True
i+=1
if not i<6:
print("\t\tQuery {}, not found in database. \n" "\t\tContinuing with the next protein in the list ... \n".format(accession_nr))
return False
def seq_from_wp(accession_nr):
"""
:param accession_nr: NCBI protein accession
:return: Protein Sequence
"""
if accession_nr[-1]!='*':
#Entrez.email = "_@gmail.com" # If you do >3 entrez searches on NCBI per second, your ip will be
# blocked, warning is sent to this email.
try:
time.sleep(ncbi_time)
handle = Entrez.efetch(db="protein", id=accession_nr, rettype="gbwithparts", retmode="text")
except Exception as e:
print(str(e), ", error in entrez-fetch protein accession, {}, not found in database. \n" "Continuing with the next protein in the list. \nError in function: {}".format(accession_nr, seq_from_wp.__name__))
return False
record = SeqIO.read(handle, "genbank")
handle.close()
return record.description.split('[')[0]+'\t'+record.seq
else:
return accession_nr[:-1]+'\t'+'--'
def remBadChar(item): #removing characters from species name
import re
return re.sub("[^a-zA-Z0-9]"," ",item).replace(" ","_")
def identicalProtID(accnr): #searching for identical proteins
i=1
retry=True
while (retry) and i<6: #Retry 5 times with changed socket timeout
#Entrez.email = "_@gmail.com" # If you do >3 entrez searches on NCBI per second, your ip will be
# blocked, warning is sent to this email.
try:
variableIsquare=i**2
changedtimeout = 10*variableIsquare #10-250 seconds (when i =1 , changedtimeout= 10*1 =10 | when i=2, changedtimeout= 10 * 2square =40 )
socket.setdefaulttimeout(changedtimeout)
time.sleep(ncbi_time)
epost_1 = Entrez.read(Entrez.epost(db="protein", id=accnr))
webenv = epost_1["WebEnv"]
query_key = epost_1["QueryKey"]
iden_prots = Entrez.efetch(db="protein", rettype='ipg', retmode='text', webenv=epost_1["WebEnv"], query_key=epost_1["QueryKey"])
iAccSet=set()
sAccSet=set()
inrAccSet=set()
snrAccSet=set()
for item in iden_prots:
if item[0:2]!='Id':
if re.search("GC._\d*\.\d", item):
itemLine=item.rstrip().split('\t')
iAccession=itemLine[6]
iAssembly=itemLine[-1]
if iAccession!=accnr and iAccession[2]=='_':
iAccSet.add(iAccession)
if iAccession!=accnr and iAccession[2]!='_':
inrAccSet.add(iAccession)
if iAccession==accnr and iAccession[2]=='_':
sAccSet.add(iAccession)
if iAccession==accnr and iAccession[2]!='_':
snrAccSet.add(iAccession)
if len(iAccSet)>0 and len(sAccSet)==0:
for ids in random.sample(iAccSet,1):
return ids
elif len(sAccSet)!=0:
for ids in random.sample(sAccSet,1):
return ids
else:
if len(inrAccSet)>0 and len(snrAccSet)==0 and len(iAccSet)==0:
for ids in random.sample(inrAccSet,1):
return ids
else:
return accnr
retry=False
except:
retry=True
i+=1
if not i<6:
return accnr
def identicalProtID_WP(accnr): #searching for identical proteins
i=1
retry=True
while (retry) and i<6: #Retry 5 times with changed socket timeout
#Entrez.email = "_@gmail.com" # If you do >3 entrez searches on NCBI per second, your ip will be
# blocked, warning is sent to this email.
try:
variableIsquare=i**2
changedtimeout = 10*variableIsquare #10-250 seconds (when i =1 , changedtimeout= 10*1 =10 | when i=2, changedtimeout= 10 * 2square =40 )
socket.setdefaulttimeout(changedtimeout)
time.sleep(ncbi_time)
epost_1 = Entrez.read(Entrez.epost(db="protein", id=accnr))
webenv = epost_1["WebEnv"]
query_key = epost_1["QueryKey"]
iden_prots = Entrez.efetch(db="protein", rettype='ipg', retmode='text', webenv=epost_1["WebEnv"], query_key=epost_1["QueryKey"])
iAccSet=set()
for item in iden_prots:
if item[0:2]!='Id':
if re.search("GC._\d*\.\d", item):
itemLine=item.rstrip().split('\t')
iAccession=itemLine[6]
iAssembly=itemLine[-1]
if iAccession!=accnr and iAccession[:3]=='WP_':
iAccSet.add(iAccession)
if len(iAccSet)>0:
for ids in random.sample(iAccSet,1):
return ids
else:
return accnr
retry=False
except:
retry=True
i+=1
if not i<6:
return accnr
def identicalProtID_WP_Sp(accnr): #searching for identical proteins with same assembly for 'NP_417570.1' > WP_000785722.1|GCF_000005845.2
i=1
retry=True
while (retry) and i<6: #Retry 5 times with changed socket timeout
#Entrez.email = "_@gmail.com" # If you do >3 entrez searches on NCBI per second, your ip will be
# blocked, warning is sent to this email.
try:
variableIsquare=i**2
changedtimeout = 10*variableIsquare #10-250 seconds (when i =1 , changedtimeout= 10*1 =10 | when i=2, changedtimeout= 10 * 2square =40 )
socket.setdefaulttimeout(changedtimeout)
time.sleep(ncbi_time)
epost_1 = Entrez.read(Entrez.epost(db="protein", id=accnr))
webenv = epost_1["WebEnv"]
query_key = epost_1["QueryKey"]
iden_prots = Entrez.efetch(db="protein", rettype='ipg', retmode='text', webenv=epost_1["WebEnv"], query_key=epost_1["QueryKey"])
iAccSetSpecial=set()
iAccNRSetSpecial=set()
iAssemblyList=[]
iAssemblyListNR=[]
for item in iden_prots:
if item[0:2]!='Id':
if re.search("GC._\d*\.\d", item):
itemLine=item.rstrip().split('\t')
iAccession=itemLine[6]
iAssembly=itemLine[-1]
if iAccession==accnr and iAccession[2]=='_':
iAssemblyList.append(iAssembly)
if iAccession==accnr and iAccession[-2]=='.' and iAssembly[0]=='G':
iAssemblyListNR.append(iAssembly)
if iAccession!=accnr and iAccession[-2]=='.' and iAssembly[0]=='G':
assNR=iAccession+'|'+iAssembly
iAccNRSetSpecial.add(assNR)
if iAccession!=accnr and iAccession[:3]=='WP_':
if iAssemblyList:
if iAssembly==iAssemblyList[0]:
assWp=iAccession+'|'+iAssembly
iAccSetSpecial.add(assWp)
if iAccSetSpecial:
for ids in random.sample(iAccSetSpecial,1):
return ids
else:
if iAssemblyList:
for ids in random.sample(iAssemblyList,1):
return accnr+'|'+ids
else:
if iAssemblyListNR:
for nrids in random.sample(iAssemblyListNR,1):
return accnr+'|'+nrids
else:
if iAccNRSetSpecial:
for ids in random.sample(iAccNRSetSpecial,1):
return ids
else:
return '#'
retry=False
except:
retry=True
i+=1
if not i<6:
return '#'
def identicalProtID_redundant(accnr): #searching for identical proteins with same assembly for 'NP_417570.1' > WP_000785722.1|GCF_000005845.2
i=1
retry=True
while (retry) and i<6: #Retry 5 times with changed socket timeout
#Entrez.email = "_@gmail.com" # If you do >3 entrez searches on NCBI per second, your ip will be
# blocked, warning is sent to this email.
try:
variableIsquare=i**2
changedtimeout = 10*variableIsquare #10-250 seconds (when i =1 , changedtimeout= 10*1 =10 | when i=2, changedtimeout= 10 * 2square =40 )
socket.setdefaulttimeout(changedtimeout)
time.sleep(ncbi_time)
epost_1 = Entrez.read(Entrez.epost(db="protein", id=accnr))
webenv = epost_1["WebEnv"]
query_key = epost_1["QueryKey"]
iden_prots = Entrez.efetch(db="protein", rettype='ipg', retmode='text', webenv=epost_1["WebEnv"], query_key=epost_1["QueryKey"])
iAssemblyset=set()
for item in iden_prots:
if item[0:2]!='Id':
if re.search("GC._\d*\.\d", item):
itemLine=item.rstrip().split('\t')
iAccession=itemLine[6]
iAssembly=itemLine[-1]
if iAccession==accnr:
iAssemblyset.add(iAssembly)
if iAssemblyset and len(iAssemblyset)>0:
return iAssemblyset
else:
return '#'
retry=False
except:
retry=True
i+=1
if not i<6:
return '#'
def sortGCFvsGCA(gcagcfSet):
if gcagcfSet!='NAI':
Aset=set()
Fset=set()
for items in gcagcfSet:
if items[2]=='A':
Aset.add(items)
if items[2]=='F':
Fset.add(items)
if len(Fset)>0:
return Fset
elif len(Fset)==0 and len(Aset)>0:
return Aset
else:
return gcagcfSet
else:
gcagcfSet
def des_check(item):
if item:
return item
else:
return 'notFound'
def normalize_strand(item1, item2): #Strand direction change
if item1=='+':
return item2
else:
if item2=='+':
return '-'
else:
return '+'
def up(item):
if item=='+':
return 'Upstream '
else:
return 'Downstream '
def down(item):
if item=='+':
return 'Downstream '
else:
return 'Upstream '
def ups(item):
if item=='+':
return '-'
else:
return '+'
def downs(item):
if item=='+':
return '+'
else:
return '-'
def lcheck(item):
if 1 in item:
return 1
else:
return 0
def postscriptSize(item):
if int(item)<1000:
return(0)
else:
return(int(item)/1000)
def getSpeciesFromGCF(faa,item):
if item!='':
if args.redundant:
return remBadChar(item)+'_'+remBadChar(faa)
else:
return remBadChar(item)
else:
return 'Nothing'
def spLocal(faa,acc): #getting species name using assembly number or accession
if netIn_SpDict: #TAGS
if faa in netIn_SpDict:
return netIn_SpDict[faa]
if not netIn_SpDict:
if faa in speciesNameFromOnlineDict:
return speciesNameFromOnlineDict[faa]
else:
faaFile=faa+'.faa.gz'
fastaSeq = gzip.open(localDir+faaFile, "rt")
for record in SeqIO.parse(fastaSeq, "fasta"):
if record.id==acc:
if args.redundant:
return remBadChar(record.description.split('[')[-1][:-1])+'_'+remBadChar(faa)
else:
return remBadChar(record.description.split('[')[-1][:-1])
def desLocal(faa,acc):
faaFile=faa+'.faa.gz'
fastaSeq = gzip.open(localDir+faaFile, "rt")
for record in SeqIO.parse(fastaSeq, "fasta"):
if record.id==acc:
return record.description.split('[')[0]
def seqLocal(faa,acc):
faaFile=faa+'.faa.gz'
fastaSeq = gzip.open(localDir+faaFile, "rt")
for record in SeqIO.parse(fastaSeq, "fasta"):
if record.id==acc:
return str(record.seq)
def localNone(item):
if item==None:
return '--'
else:
return item
def seqFasLocal(faa,acc): #making fasta file from accession
if spLocal(faa,acc):
faaFile=faa+'.faa.gz'
fastaSeq = gzip.open(localDir+faaFile, "rt")
for record in SeqIO.parse(fastaSeq, "fasta"):
if record.id==acc.split('#')[0]:
record.id=acc+'|'+spLocal(faa,acc)#.replace(':','_').replace('[','_').replace(']','_')
record.description=''
return record.format("fasta")
else:
faaFile=faa+'.faa.gz'
fastaSeq = gzip.open(localDir+faaFile, "rt")
for record in SeqIO.parse(fastaSeq, "fasta"):
if record.id==acc.split('#')[0]:
if args.redundant:
record.id=acc+'|'+remBadChar(record.description.split('[')[-1][:-1])+'_'+remBadChar(faa)#.replace(':','_').replace('[','_').replace(']','_')
else:
record.id=acc+'|'+remBadChar(record.description.split('[')[-1][:-1])
record.description=''
return record.format("fasta")
def seqFasLenLocal(faa,acc): #making fasta file from accession
if spLocal(faa,acc):
faaFile=faa+'.faa.gz'
fastaSeq = gzip.open(localDir+faaFile, "rt")
for record in SeqIO.parse(fastaSeq, "fasta"):
if record.id==acc.split('#')[0]:
record.id=acc+'|'+spLocal(faa,acc)#.replace(':','_').replace('[','_').replace(']','_')
record.description=''
if record.seq:
return len(record.seq)
else:
faaFile=faa+'.faa.gz'
fastaSeq = gzip.open(localDir+faaFile, "rt")
for record in SeqIO.parse(fastaSeq, "fasta"):
if record.id==acc.split('#')[0]:
if args.redundant:
record.id=acc+'|'+remBadChar(record.description.split('[')[-1][:-1])+'_'+remBadChar(faa)#.replace(':','_').replace('[','_').replace(']','_')
else:
record.id=acc+'|'+remBadChar(record.description.split('[')[-1][:-1])
record.description=''
if record.seq:
return len(record.seq)
def redundantCreate(setDict,nums):
if nums=='A' or nums=='a':
newList=random.sample(setDict,len(setDict))
else:
if len(setDict)>int(nums):
newList=random.sample(setDict,int(nums))
else:
newList=random.sample(setDict,len(setDict))
return newList
#Download assembly summary report from NCBI Refseq and genBank
if not args.localGenomeList:
refDb='./refSeq.db'
genDb='./genBank.db'
if os.path.isfile(refDb):
refDbSize=os.path.getsize(refDb)
else:
refDbSize='0'
if os.path.isfile(genDb):
genDbSize=os.path.getsize(genDb)
else:
genDbSize='0'
ftp = ftplib.FTP('ftp.ncbi.nih.gov', 'anonymous', 'anonymous@ftp.ncbi.nih.gov')
ftp.cwd("/genomes/refseq") # move to refseq directory
filenames = ftp.nlst() # get file/directory names within the directory
if 'assembly_summary_refseq.txt' in filenames:
ftp.sendcmd("TYPE i")
if int(ftp.size('assembly_summary_refseq.txt'))!=int(refDbSize):#check if the previously downloaded db exists and if that's updated to recent one
ftp.retrbinary('RETR ' + 'assembly_summary_refseq.txt', open('refSeq.db', 'wb').write) # get the assembly summary from refseq
else:
pass
ftp_gen = ftplib.FTP('ftp.ncbi.nih.gov', 'anonymous', 'anonymous@ftp.ncbi.nih.gov')
ftp_gen.cwd("/genomes/genbank") # move to refseq directory
filenames = ftp_gen.nlst() # get file/directory names within the directory
if 'assembly_summary_genbank.txt' in filenames:
ftp_gen.sendcmd("TYPE i")
if int(ftp_gen.size('assembly_summary_genbank.txt'))!=int(genDbSize):#check if the previously downloaded db exists and if that's updated to recent one
ftp_gen.retrbinary('RETR ' + 'assembly_summary_genbank.txt', open('genBank.db', 'wb').write) # get the assembly summary from refseq
else:
pass
assemblyName={}
bioDict={} #bioproject as keys and assemble number (eg.GCF_000001765.1) as value
accnr_list_dict={} #create a dictionary accessionNumber is a key and Organism name and ftp Gff3 download Link as value
with open('refSeq.db', 'r') as fileIn:
for line in fileIn:
if line[0]!='#':
Line=line.rstrip().split('\t')
accnr_list_dict[Line[0]]= Line[7]+'\t'+Line[19]
bioDict[Line[1]]=Line[0]
assemblyName[Line[0]]=Line[0]
ftp_gen = ftplib.FTP('ftp.ncbi.nih.gov', 'anonymous', 'anonymous@ftp.ncbi.nih.gov')
ftp_gen.cwd("/genomes/genbank") # move to refseq directory
assemblyName_GCA={}
bioDict_gen={}
accnr_list_dict_gen={} #create a dictionary accessionNumber is a key and Organism name and ftp Gff3 download Link as value
with open('genBank.db', 'r') as fileIn:
for line in fileIn:
if line[0]!='#':
Line=line.rstrip().split('\t')
if len(Line)>19:
if Line[18]=='identical':
if Line[17] in accnr_list_dict:
bioDict_gen[Line[1]]=Line[0]
accnr_list_dict_gen[Line[0]]= accnr_list_dict[Line[17]]
assemblyName_GCA[Line[0]]=Line[17]
else:
accnr_list_dict_gen[Line[0]]=Line[7]+'\t'+Line[19]
else:
accnr_list_dict_gen[Line[0]]=Line[7]+'\t'+Line[19]
bioDict.update(bioDict_gen)
accnr_list_dict.update(accnr_list_dict_gen)
assemblyName.update(assemblyName_GCA)
print ('\n'+ '>> Database Downloaded. Cross-checking of the accession list in progress ...'+ '\n')
q=0
ne=0
queryDict={} #protein Id as query and a set of assembly number as value [either All or Species of interest]
#{'WP_019504790.1#1': {'GCF_000332195.1'}, 'WP_028108719.1#2': {'GCF_000422645.1'}, 'WP_087820443.1#3': {'GCF_900185565.1'}}
#{'WP_019504790.1#1': 'GCF_000332195.1', 'WP_028108719.1#2': 'GCF_000422645.1', 'WP_087820443.1#3': 'GCF_900185565.1'} local
if not args.localGenomeList:
with open (args.out_prefix+'_NameError.txt', 'w') as fbad:
for query in queryList:
q+=1
accession_from_wp_out=''
accession_from_xp_out=''
identicalProtID_Out=''
accession_from_wp_ID_out=''
accession_from_xp_ID_out=''
accession_from_wp_IDSame_out=''
exceptionalWP_out=''
accession_from_wp_exceptional=''
special_out = ''
assembly_from_identical=''
if args.verbose:
print('\t Checking Query '+ query[0] +' ....'+ '('+str(q)+'/'+str(len(queryList))+')')
if len(query)<2:
if query[0][:2]=='WP' and query[0][-2]=='.': #WP Accession full WP_000785722.1
accession_from_wp_out=accession_from_wp(query[0])
if accession_from_wp_out:
queryDict[query[0]+'#'+str(q)]=sortGCFvsGCA(accession_from_wp_out)
else:
ne+=1
print(query[0], file= fbad)
elif query[0][:2]=='XP' and query[0][-2]=='.': #XP Accession full XP_003256407.1
accession_from_xp_out=accession_from_xp(query[0])
if accession_from_xp_out:
assemList=[]
for bioprojs in accession_from_xp_out:
if bioprojs in bioDict:
assemList.append(bioDict[bioprojs])
if assemList:
queryDict[query[0]+'#'+str(q)]=sortGCFvsGCA(set(assemList))
else:
ne+=1
print(query[0], file= fbad)
else: #other accessions can be XP_003256407 , WP_000785722 too
identicalProtID_Out=identicalProtID(query[0])
if identicalProtID_Out!=query[0]: #can be anything XP_ or WP_ or YP_ or NP_
if identicalProtID_Out[:-3]!='XP_': #Not XPs
accession_from_wp_ID_out=accession_from_wp(identicalProtID_Out)
if accession_from_wp_ID_out:
asset=set()
for elements in accession_from_wp_ID_out:
asset.add(elements)
if len(asset)>0:
queryDict[identicalProtID_Out+'#'+str(q)+'.'+query[0]]=sortGCFvsGCA(asset)
else:
ne+=1
print(query[0], file= fbad)
if identicalProtID_Out[:-3]=='XP_': #if XPs
accession_from_xp_ID_out=accession_from_xp(identicalProtID_Out)
if accession_from_xp_ID_out:
assemList=[]
for bioprojs in accession_from_xp_ID_out:
if bioprojs in bioDict:
assemList.append(bioDict[bioprojs])
if assemList:
queryDict[identicalProtID_Out+'#'+str(q)+'.'+query[0]]=sortGCFvsGCA(set(assemList))
else:
ne+=1
print(query[0], file= fbad)
elif identicalProtID_Out==query[0]: #excluding XP Wp pre #YP NP
exceptionalWP_out = identicalProtID_WP(identicalProtID_Out)
special_out = identicalProtID_WP_Sp(identicalProtID_Out) #list Query GCF
if not args.redundant:
if special_out!='#':
asset=set()
asset.add(special_out.split('|')[1])
queryDict[query[0]+'#'+str(q)]=asset
else:
if exceptionalWP_out[:3]=='WP_':
accession_from_wp_exceptional=accession_from_wp(exceptionalWP_out)
if accession_from_wp_exceptional:
asset=set()
for elements in accession_from_wp_exceptional:
asset.add(elements)
if len(asset)>0:
if query[0]!=exceptionalWP_out:
queryDict[exceptionalWP_out+'#'+str(q)+'.'+query[0]]=sortGCFvsGCA(asset)
else:
queryDict[exceptionalWP_out+'#'+str(q)]=sortGCFvsGCA(asset)
else:
ne+=1
print(query[0], file= fbad)
if args.redundant:
if exceptionalWP_out[1:3]=='P_':
accession_from_wp_exceptional=accession_from_wp(exceptionalWP_out)
if accession_from_wp_exceptional:
asset=set()
for elements in accession_from_wp_exceptional:
asset.add(elements)
if len(asset)>0:
if query[0]!=exceptionalWP_out:
queryDict[exceptionalWP_out+'#'+str(q)+'.'+query[0]]=sortGCFvsGCA(asset)
else:
queryDict[exceptionalWP_out+'#'+str(q)]=sortGCFvsGCA(asset)
else:
ne+=1
print(query[0], file= fbad)
elif exceptionalWP_out[2]!='_':
assembly_from_identical=identicalProtID_redundant(identicalProtID_Out)
if assembly_from_identical!='#':
asset=set()
for elements in assembly_from_identical:
asset.add(elements)
if len(asset)>0:
#queryDict[identicalProtID_Out+'#'+str(q)+'.'+query[0]]=sortGCFvsGCA(asset)
if query[0]!=exceptionalWP_out:
queryDict[exceptionalWP_out+'#'+str(q)+'.'+query[0]]=sortGCFvsGCA(asset)
else:
queryDict[exceptionalWP_out+'#'+str(q)]=sortGCFvsGCA(asset)
else:
ne+=1
print(query[0], file= fbad)
else:
if query[0][:3]=='XP_' and query[0][-2]=='.': #XP Accession
asset=set()
accession_from_xp_out=accession_from_xp(query[0])
if accession_from_xp_out:
for bioprojs in accession_from_xp_out:
if bioprojs in bioDict:
if bioDict[bioprojs]==query[1]:
asset.add(query[1])
if len(asset)>0:
queryDict[query[0]+'#'+str(q)]=asset
else:
ne+=1
print(query[0], file= fbad)
elif query[0][:3]!='XP_' and query[0][-2]=='.': #not XP Accession
asset=set()
accession_from_wp_out=accession_from_wp(query[0])
if accession_from_wp_out:
for elements in accession_from_wp_out:
if query[1]==elements:
asset.add(query[1])
if len(asset)>0:
queryDict[query[0]+'#'+str(q)]=asset
else:
ne+=1
print(query[0], file= fbad)
else:
for query in queryList:
q+=1
queryDict[query[0]+'#'+str(q)]=query[1]
nai=0
NqueryDict={} #{'WP_019504790.1#1': ['GCF_000332195.1'], 'WP_028108719.1#2': ['GCF_000422645.1'], 'WP_087820443.1#3': ['GCF_900185565.1']}
if args.localGenomeList:
with open (args.out_prefix+'_Insufficient_Info_In_DB.txt', 'w') as fNai:
for query in queryDict:
assemblyIdlist=[]
if queryDict[query]!={'NAI'}:
assemblyId=queryDict[query]
faa_gz=localDir+queryDict[query]+'.faa.gz'
if os.path.isfile(faa_gz):
with gzip.open(localDir+assemblyId+'.faa.gz', 'rb') as faaIn:
for line in faaIn:
if line.decode('utf-8')[0]=='>':
Line=line.decode('utf-8').rstrip()
if '>'+query.split('#')[0]==Line.split(' ')[0]:
gff_gz=localDir+assemblyId+'.gff.gz'
if os.path.isfile(gff_gz):
with gzip.open(localDir+assemblyId+'.gff.gz', 'rb') as lgffIn: #Download and read gff.gz
cds_c=0
name_c=0
prot_c=0
cset=set()
nset=set()
pset=set()
for line in lgffIn:
if line.decode('utf-8')[0]!='#':
Line=line.decode('utf-8').rstrip().split('\t')
if Line[2]=='CDS':
cds_c=1
cset.add(cds_c)
if Line[8].split(';')[3][:5]=='Name=': #eliminates pseudo gene as they don't have 'Name='
name_c=1
nset.add(name_c)
if Line[8].split(';')[3].split('=')[1]==query.split('#')[0]:
assemblyIdlist.append(assemblyId)
NqueryDict[query]=list(set(assemblyIdlist))
prot_c=1
pset.add(prot_c)
else:
prot_c=0
pset.add(prot_c)
else:
name_c=0
nset.add(name_c)
else:
cds_c=0
cset.add(cds_c)
if lcheck(pset)>0:
pass
elif lcheck(pset)==0:
if lcheck(cset)>0 and lcheck(nset)>0:
print(query.split('#')[0],' did not match with supplement local GFF File')
print(query, file=fNai)
nai+=1
else:
print('Use recommended [NCBI refseq] format of GFF file')
break
else:
print("Error: %s file not found" % gff_gz)
else:
print("Error: %s file not found" % faa_gz)
else:
print(query, file=fNai)
nai+=1
else:
with open (args.out_prefix+'_Insufficient_Info_In_DB.txt', 'w') as fNai:
for query in queryDict:
#print('\t', query, queryDict[query], 'All')
if queryDict[query]:
if len(queryDict[query])!=0:
if queryDict[query]!={'NAI'}:
#print('\t', query, queryDict[query], 'filtered')
if args.redundant:
redun=0
for newRed in (redundantCreate(queryDict[query],args.redundant)):
redun+=1
NqueryDict[query+'.'+str(redun)]=list(str(newRed).split())
else:
NqueryDict[query]=random.sample(queryDict[query],1)
else:
print(query, file=fNai)
nai+=1
else:
print(query, file=fNai)
nai+=1
else:
print(query, file=fNai)
nai+=1
#print(NqueryDict)
if not args.localGenomeList:
print('\n> Downloading Genome Assembly Files from NCBI FTP Server \n')
speciesNameFromOnlineDict={}
newQ=0
for query in NqueryDict:
newQ+=1
total=0
for item in NqueryDict[query]:
if not args.localGenomeList:
AssemDown=0
AssemFailed=0
if item in accnr_list_dict:
speciesNameFromOnlineDict[item]=getSpeciesFromGCF(item, accnr_list_dict[item].split('\t')[0])
retry = True
while (retry):
try:
ftpLine=accnr_list_dict[item].split('\t')[1]
ftpsplitDir = ftpLine.split('/')[3:]
ftp_path = '/'.join(map(str,ftpsplitDir))
#time.sleep(1)
ftp = ftplib.FTP('ftp.ncbi.nih.gov', 'anonymous', 'anonymous@ftp.ncbi.nih.gov')
ftp.cwd('/'+ftp_path)
files = ftp.nlst()
FileToDownload=[]
for ftpelements in files:
if '_genomic.gff.gz' in ftpelements:
FileToDownload.append(ftpelements)
if '_protein.faa.gz' in ftpelements:
FileToDownload.append(ftpelements)
if len(FileToDownload)==2:
for elements in FileToDownload:
#print(elements) #GCF_000964005.1_WiARP1.0_genomic.gff.gz GCF_000964005.1_WiARP1.0_protein.faa.gz
if '_genomic.gff.gz' in elements: #Check if GFF.gz is there
if args.verbose:
ftp.set_debuglevel(1)
ftp.set_pasv(True)
ftp.voidcmd('TYPE I')
ftp.sendcmd("TYPE i")
try:
gffFileSize=ftp.size(elements)
gffFileName=item+'.gff.gz'
gffFileDownloaded=open(item+'.gff.gz', 'wb')
ftp.retrbinary('RETR ' + elements, gffFileDownloaded.write)
ftp.sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
ftp.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, 15)
ftp.sock.setsockopt(socket.IPPROTO_TCP,socket.TCP_KEEPCNT, 8)
ftp.voidcmd("NOOP")
gffFileDownloaded.close()
if os.path.isfile(localDir+gffFileName):
if os.path.getsize(gffFileName)==gffFileSize:
AssemDown+=1
retry = False
except:
AssemFailed+=-1
retry = True
if '_protein.faa.gz' in elements: #Check if faa.gz is there
if args.verbose:
ftp.set_debuglevel(1)
ftp.set_pasv(True)
ftp.voidcmd('TYPE I')
ftp.sendcmd("TYPE i")
ftp.sendcmd("TYPE i")
try:
faaFileSize=ftp.size(elements)
faaFileName=item+'.faa.gz'
faaFileDownloaded=open(item+'.faa.gz', 'wb')
ftp.retrbinary('RETR ' + elements, faaFileDownloaded.write)
ftp.sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
ftp.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, 15)
ftp.sock.setsockopt(socket.IPPROTO_TCP,socket.TCP_KEEPCNT, 8)
ftp.voidcmd("NOOP")
faaFileDownloaded.close()
if os.path.isfile(localDir+faaFileName):
if os.path.getsize(faaFileName)==faaFileSize:
AssemDown+=1
retry = False
except:
AssemFailed+=-1
retry = True
ftp.close()
else:
AssemFailed+=-2
retry = False
except:
retry = True
else:
AssemFailed+=-2
total=AssemDown+AssemFailed
if total==2:
if args.verbose:
print('\n\t'+'> NCBI Genome Assembly has been downloaded for '+query.split('#')[0]+' ('+str(newQ)+'/'+str(len(NqueryDict))+')'+'\n')
for query in NqueryDict:
for item in NqueryDict[query]:
if args.localGenomeList:
faaFile=item+'.faa.gz'
fastaSeq = gzip.open(localDir+faaFile, "rt")
for record in SeqIO.parse(fastaSeq, "fasta"):
if record.id==query.split('#')[0]:
if args.redundant:
speciesNameFromOnlineDict[item]=remBadChar(record.description.split('[')[-1][:-1])+'_'+remBadChar(item)
else:
speciesNameFromOnlineDict[item]=remBadChar(record.description.split('[')[-1][:-1])
else:
if item not in speciesNameFromOnlineDict or speciesNameFromOnlineDict[item]=='Nothing':
faaFile=item+'.faa.gz'
if os.path.isfile(localDir+faaFile):
fastaSeq = gzip.open(localDir+faaFile, "rt")
for record in SeqIO.parse(fastaSeq, "fasta"):
if record.id==query.split('#')[0]:
if args.redundant:
speciesNameFromOnlineDict[item]=remBadChar(record.description.split('[')[-1][:-1])+'_'+remBadChar(item)
else:
speciesNameFromOnlineDict[item]=remBadChar(record.description.split('[')[-1][:-1])
else:
speciesNameFromOnlineDict[item]=item+'#not_found'
if args.keep:
with open(args.out_prefix+'_speciesInfo.txt','w') as asmOut:
for query in NqueryDict:
for item in NqueryDict[query]:
print(item, query.split('#')[0], speciesNameFromOnlineDict[item], sep='\t', file=asmOut)
#print(NqueryDict) #{'WP_019504790.1#1': ['GCF_000332195.1'], 'WP_028108719.1#2': ['GCF_000422645.1'], 'WP_087820443.1#3': ['GCF_900185565.1']}
print('\n'+'>> Input file assessment report: ')
print('\t'+'Discarded protein ids with improper accession : '+str(ne)+'. See "'+args.out_prefix+'_NameError.txt'+'" file for details.')
print('\t'+'Discarded protein ids lacking proper information in RefSeq DB : '+str(nai)+'. See "'+args.out_prefix+'_Insufficient_Info_In_DB.txt'+'" file for details.')
print('\t'+'Remaining queries: '+str(newQ))
def getGeneId(item):
matchObject = re.search('(GeneID:.*?,)', item)
if matchObject:
return matchObject.group(1)[:-1]
def getGeneId_gene(item):
matchObject2 = re.search('(GeneID:.*;)', item)
if matchObject2:
return matchObject2.group(1).split(';')[0]
def lenChecker(List):
if List:
found=0
for item in List:
if item==True and item>5000:
found+=1
if found==0:
return 'keep'
else:
return 'delete'
else:
return 'keep'
FoundDict={} #Accession that found in Refseq
FlankFoundDict={} #Accession that have flanking genes
accFlankDict={} #{'WP_092250023.1#1': {0: 'WP_092250023.1+', 1: 'WP_092250020.1+', 2: 'WP_092250017.1-', -1: 'tRNA*+', -2: 'WP_092250026.1-'}}
positionDict={} #Accession as keys:Start and end position as value
speciesDict={} #SpeciesName stored here
queryStrand={} #Strand Information for each query
LengthDict={} #Length of each query
seqDict={}
desDict={}
acc_CGF_Dict={}
treeFastadict={} #Query as key and sequence in fasta as value
querySeqDict={} #For Tree Command
count=0
for query in NqueryDict:
count+=1
if args.verbose:
print('\n'+'> '+str(count)+' in process out of '+str(newQ)+' ... '+'\n')
print('Query Name =', query.split('#')[0], '\n')
for item in NqueryDict[query]: #{'WP_019504790.1#1': ['GCF_000332195.1'],NqueryDict
a=0
LineList=[]
geneProt={} # 'gene2504': 'WP_092248795.1', 'gene1943': 'tRNA'
geneChrom={} #'gene1708': 'NZ_MJLP01000030.1'
#item='GCF_'+items[4:]
speciesNameFromDB=speciesNameFromOnlineDict[item]
gff_gz=localDir+item+'.gff.gz'
if os.path.isfile(gff_gz):
LineList=[]
geneProt={} # 'gene2504': 'WP_092248795.1', 'gene1943': 'tRNA'
geneChrom={} #'gene1708': 'NZ_MJLP01000030.1'
with gzip.open(localDir+item+'.gff.gz', 'rb') as gffIn: #Download and read gff.gz
for line in gffIn:
if line.decode('utf-8')[0]!='#':
Line=line.decode('utf-8').rstrip().split('\t')
if Line[2]=='CDS':
if Line[8].split(';')[3][:5]=='Name=': #eliminates pseudo gene as they don't have 'Name='
if query.split('_')[0]=='XP':
if 'GeneID:' in Line[8]:
geneProt[getGeneId(Line[8])]=Line[8].split(';')[3].split('=')[1]
geneChrom[getGeneId(Line[8])]=Line[0]
#print(getGeneId(Line[8]), Line[8].split(';')[3].split('=')[1], Line[0], 'gpc#')
else:
#print(Line[8].split(';')[1].split('=')[1], Line[8].split(';')[3].split('=')[1], Line[0], 'gpc#')
geneProt[Line[8].split(';')[1].split('=')[1]]=Line[8].split(';')[3].split('=')[1]
geneChrom[Line[8].split(';')[1].split('=')[1]]=Line[0]
##print(geneProt)>'GeneID:187667': 'NP_493855.2' NP_417570.1 gene-b3099
if Line[2][-4:]=='gene':
a+=1
if query.split('_')[0]=='XP':
if 'GeneID:' in Line[8]:
newGene=str(a)+'\t'+getGeneId_gene(Line[8])+'\t'+ Line[3]+'\t'+Line[4]+'\t'+ Line[6]+ '\t'+ Line[0]
#print(newGene) #1 GeneID:353377 3747 3909 - NC_003279.8
LineList.append(newGene.split('\t'))
for genDes in Line[8].split(';'):
if 'gene_biotype=' in genDes:
if getGeneId_gene(Line[8]) not in geneProt:
geneProt[getGeneId_gene(Line[8])]=genDes.split('=')[1]+'_'+query.split('#')[1]+'.'+str(random.randint(0,int(s)*2-1))+'*'
else:
newGene=str(a)+'\t'+Line[8].split(';')[0][3:]+'\t'+ Line[3]+'\t'+Line[4]+'\t'+ Line[6]+ '\t'+ Line[0]
LineList.append(newGene.split('\t')) #1 gene3006 10266 10342 - NZ_FPCC01000034.1
for genDes in Line[8].split(';'):
if 'gene_biotype=' in genDes:
if Line[8].split(';')[0][3:] not in geneProt:
geneProt[Line[8].split(';')[0][3:]]=genDes.split('=')[1]+'_'+query.split('#')[1]+'.'+str(random.randint(0,int(s)*2-1))+'*'
geneList=[] ##List of gene names coding same protein (accession), we are taking one from them
for genes in geneProt:
if geneProt[genes]==query.split('#')[0]:
geneList.append(genes)
if len(geneList)>0:
rangeList=[]
for line in LineList:
if geneChrom[geneList[0]]==line[5]:
rangeList.append(int(LineList[LineList.index(line)][0]))
for genes in geneProt:
if genes==geneList[0]:
if query.split('#')[0]==geneProt[genes]:
for line in LineList:
if genes==line[1]:
FoundDict[query]='Yes'
if args.tree:
#print(query, item, seqFasLocal(item,query), tspLocal(item,query.split('#')[0]), seqLocal(item,query.split('#')[0]))
treeFastadict[query]=str(seqFasLocal(item,query))
querySeqDict[query+'|'+tspLocal(item,query.split('#')[0])]=str(seqLocal(item,query.split('#')[0]))
#querySeqDict[query+'|'+remBadChar(spLocal(item,query.split('#')[0]))]=str(seqLocal(item,query.split('#')[0]))
#TAGs
#if speciesNameFromDB!='Nothing' or speciesNameFromDB!='':
#print(speciesNameFromDB, 1)
# speciesDict[query]=speciesNameFromDB
#else:
#print(spLocal(item, query.split('#')[0]), 2)
speciesDict[query]=spLocal(item, query.split('#')[0])
if query not in speciesDict:
if speciesNameFromDB!='Nothing' or speciesNameFromDB!='':
speciesDict[query]=speciesNameFromDB
queryStrand[query]= LineList[LineList.index(line)][4]
positionDict[query]= ("\t".join(map(str,LineList[LineList.index(line)][2:-2])))
LengthDict[query]= int(LineList[LineList.index(line)][3])-int(LineList[LineList.index(line)][2])+1
udsDict={}
dsDict={}
udsDict[0]= query+'+' # O strand
lengthCheck=[]
for x in range(1,int(s)):
if LineList.index(line)-x>=0 and LineList.index(line)-x<len(LineList):
if rangeList.count(int(LineList[LineList.index(line)-x][0]))>0:
acc_CGF_Dict[query]= LineList[LineList.index(line)-x][-1] +'\t'+ item
seqDict[str(geneProt[LineList[LineList.index(line)-x][1]])]=localNone(seqLocal(item, geneProt[LineList[LineList.index(line)-x][1]]))
desDict[geneProt[LineList[LineList.index(line)-x][1]]]=desLocal(item, geneProt[LineList[LineList.index(line)-x][1]])
positionDict[geneProt[LineList[LineList.index(line)-x][1]]+'#'+query.split('#')[1]]= ("\t".join(map(str,LineList[LineList.index(line)-x][2:-2])))
lengthCheck.append(seqFasLenLocal(item,geneProt[LineList[LineList.index(line)-x][1]]+'#'+query.split('#')[1]))
LengthDict[geneProt[LineList[LineList.index(line)-x][1]]+'#'+query.split('#')[1]]= int(LineList[LineList.index(line)-x][3])-int(LineList[LineList.index(line)-x][2])+1
udsDict[int(ups(LineList[LineList.index(line)][4])[0]+str(x))]= geneProt[LineList[LineList.index(line)-x][1]]+'#'+query.split('#')[1]+\
normalize_strand(LineList[LineList.index(line)][4],LineList[LineList.index(line)-x][4])
for y in range(1,int(s)):
if LineList.index(line)+y<len(LineList):
if rangeList.count(int(LineList[LineList.index(line)+y][0]))>0:
acc_CGF_Dict[query]= LineList[LineList.index(line)+y][-1] +'\t'+ item
seqDict[str(geneProt[LineList[LineList.index(line)+y][1]])]=localNone(seqLocal(item,geneProt[LineList[LineList.index(line)+y][1]]))
desDict[geneProt[LineList[LineList.index(line)+y][1]]]=desLocal(item,geneProt[LineList[LineList.index(line)+y][1]])
positionDict[geneProt[LineList[LineList.index(line)+y][1]]+'#'+query.split('#')[1]]= ("\t".join(map(str,LineList[LineList.index(line)+y][2:-2])))
lengthCheck.append(seqFasLenLocal(item,geneProt[LineList[LineList.index(line)+y][1]]+'#'+query.split('#')[1]))
LengthDict[geneProt[LineList[LineList.index(line)+y][1]]+'#'+query.split('#')[1]]= int(LineList[LineList.index(line)+y][3])-int(LineList[LineList.index(line)+y][2])+1
dsDict[int(downs(LineList[LineList.index(line)][4])[0]+str(y))]= geneProt[LineList[LineList.index(line)+y][1]]+'#'+query.split('#')[1]+\
normalize_strand(LineList[LineList.index(line)][4],LineList[LineList.index(line)+y][4])
if lenChecker(lengthCheck)=='keep':
udsDict.update(dsDict)
accFlankDict[query]=udsDict
if query in accFlankDict:
if len(accFlankDict[query])>0:
FlankFoundDict[query]='Yes'
if args.verbose:
print('\t', query.split('#')[0], 'Report: Flanking Genes Found', '\n')
else:
FlankFoundDict[query]='No'
if args.verbose:
print('\t', query.split('#')[0], 'Report: Flanking Genes Not Found', '\n')
else:
FlankFoundDict[query]='No'
if args.verbose:
print('\t', query.split('#')[0], 'Report: Flanking Genes Not Found', '\n')
else:
FlankFoundDict[query]='Yes'
if args.verbose:
print('\t', query.split('#')[0], 'Report: Flanking Gene is longer than 7000 amino acid, thus query is discarded', '\n')
else:
if query not in FoundDict:
FlankFoundDict[query]='No'
if args.verbose:
print('\t', query.split('#')[0], 'Report: Flanking Genes Not Found', '\n')
else:
FlankFoundDict[query]='No'
FoundDict[query]='No: ProteinID was not found in Genome Assembly'
if args.verbose:
print('\t', query.split('#')[0], 'Report: Flanking Genes Not Found', '\n')
else:
FlankFoundDict[query]='No'
FoundDict[query]='No: ProteinID was not found in Genome Assembly'
if args.verbose:
print('\t', query.split('#')[0], 'Report: Flanking Genes Not Found', '\n')
if not args.localGenomeList:
if args.keep:
pass
else:
subprocess.Popen("rm GC*_*.gz", shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
allFlankGeneList=[]
for keys in accFlankDict:
for item in accFlankDict[keys]:
allFlankGeneList.append(accFlankDict[keys][item].split('#')[0])
flankF=0
with open (args.out_prefix+'_flankgene_Report.log', 'w') as errOut:
serial=0
print('#Serial','Query','Assembly_Found', 'FlankingGene_Found', sep='\t', file=errOut)
for queries in NqueryDict:
serial+=1
if queries in FoundDict:
if queries in FlankFoundDict:
if FlankFoundDict[queries]=='Yes':
flankF+=1
print(str(serial), queries.split('#')[0], FoundDict[queries], FlankFoundDict[queries], sep='\t', file=errOut)
else:
print(str(serial), queries.split('#')[0], FoundDict[queries], 'No', sep='\t', file=errOut)
else:
print(str(serial), queries.split('#')[0], 'No', 'No', sep='\t', file=errOut)
reportDict={}
for query in queryList:
queryNumList=[]
for queryNum in NqueryDict:
if query[0] in queryNum:
queryNumList.append(queryNum)
if len(queryNumList)>0:
reportDict[query[0]]=queryNumList
else:
reportDict[query[0]]=str('no').split()
def similarityID (item1, item2):
if item1==item2:
return 'Same'
else:
return 'Changed'
def reporter(i1,i2,i3,i4,i5):
if i2=='No' and i3=='No' and i4=='No' and i5=='No':
return i1 + ' Failed : No record for accession ' + i1
if i2!='No' and i3=='Same' and i4!='No' and i5=='No':
return i1 + ' is a valid NCBI protein accession but Discarded : No Flanking Gene was found for ' + i1 + ' in Assembly ID '+ i4
if i2!='No' and i3=='Same' and i4!='No' and i5!='No':
return i1 + ' is valid as a NCBI protein accession and reported in Assembly ID '+ i4
if i2!='No' and i3=='Changed' and i4!='No' and i5=='No':
return i1 + ' is invalid NCBI protein accession therefore converted to identical RefSeq sequence with accession '+ i2 + ' but Discarded : No Flanking Gene was found for ' + i2 + 'in Assembly ID '+ i4
if i2!='No' and i3=='Changed' and i4!='No' and i5!='No':
return i1 + ' is invalid NCBI protein accession therefore converted to identical RefSeq sequence with accession '+ i2 + ' which is reported in Assembly ID '+ i4
qcount=0
discardedGene=0
with open (args.out_prefix+'_QueryStatus.txt', 'w') as sumOut:
print('#Serial', 'Status', sep='\t', file=sumOut)
for query in queryList:
#print('query',query)
qcount+=1
for item in reportDict[query[0]]:
#print('item',item)
if item in FlankFoundDict:
if item in accFlankDict:
if FlankFoundDict[item]=='Yes':
print(str(qcount), reporter(query[0], item.split('#')[0], similarityID(query[0], item.split('#')[0]), ''.join(NqueryDict[item]), 'Yes'), sep='\t', file=sumOut)
else:
print(str(qcount), reporter(query[0], item.split('#')[0], similarityID(query[0], item.split('#')[0]), ''.join(NqueryDict[item]), 'No'), sep='\t', file=sumOut)
else:
if FlankFoundDict[item]=='Yes':
discardedGene+=1
print(str(qcount), query[0]+' is a valid NCBI protein accession but Discarded : Flanking gene with a length more than 5000 amino acid detected.', sep='\t',file=sumOut)
else:
print(str(qcount), reporter(query[0], item.split('#')[0], similarityID(query[0], item.split('#')[0]), ''.join(NqueryDict[item]), 'No'), sep='\t', file=sumOut)
else:
print(str(qcount), reporter(query[0], 'No', 'No', 'No', 'No'), sep='\t', file=sumOut)
print('\n'+'>> Flanking Genes found : '+str(flankF)+' out of remaining '+str(serial)+'. See "'+args.out_prefix+'_flankgene_Report.log'+'" file for details.'+'\n'+'\n')
if int(flankF)==0:
print('>> No Flanking Genes found, please update your accession list')
sys.exit()
else:
pass
if args.tree: #Generate fasta file for making phylogenetic Tree
with open(args.out_prefix+'_tree.fasta', 'w') as treeOut:
for queries in NqueryDict:
if queries in treeFastadict:
print(treeFastadict[queries], file=treeOut)
if len(seqDict)!=len(desDict):
if len(seqDict)>len(desDict):
for seqids in sorted(seqDict):
if seqids not in desDict:
desDict[seqids]=des_check(str(seq_from_wp(seqids).split('\t')[0]))
else:
for seqids in sorted(desDict):
if seqids not in seqDict:
seqDict[seqids]=str(seq_from_wp(seqids).split('\t')[1])
else:
if args.verbose:
print ('Description collected for Flanking Genes!')
b=0
with open (args.out_prefix+'_flankgene.fasta'+'_cluster_out', 'w') as fastaNew:
for seqids in sorted(seqDict):
if seqDict[seqids]!='--':
b+=1
print('>'+seqids+'|'+desDict[seqids]+'\n'+seqDict[seqids], file=fastaNew)
if args.verbose:
print ('Total Flanking genes found = '+ str(b))
print('\n>> Now running Jackhmmer and clustering flanking genes\n')
infilename=args.out_prefix+'_flankgene.fasta'+'_cluster_out'
directory = args.out_prefix+'_flankgene.fasta'+'_cluster_out_individuals'
if not os.path.exists(directory):
os.makedirs(directory)
infile=open(args.out_prefix+'_flankgene.fasta'+'_cluster_out',"r").read()
al=infilename+"_"+iters+"_"+evthresh+"_jackhits.tsv"
outacclists=open(al,"w")
percentileJack=0
i=1
for seqids in sorted(seqDict): #Running Jackhmmer for finding homologs
if seqDict[seqids]!='--':
percentileJack+=1
if args.verbose:
if percentileJack % 5 == 0:
print('\t'+'>>> '+str(round(int(percentileJack)*100/b))+'%'+' Completed...'+'('+str(percentileJack)+'/'+str(b)+')')
if percentileJack % b == 0:
print('\t'+'>>> Completed ' +'\n')
i_f=directory+"/"+str(i)+".txt"
indivfile=open(i_f,"w")
indivfile.write(">"+seqids+'\n'+seqDict[seqids])
indivfile.close()
if args.cpu:
command="jackhmmer --cpu %s -N %s --incE %s --incdomE %s --tblout %s/tblout%s.txt %s %s>%s/out%s.txt" %(core, iters, evthresh, evthresh, directory, str(i), i_f, infilename, directory, str(i))
else:
command="jackhmmer -N %s --incE %s --incdomE %s --tblout %s/tblout%s.txt %s %s>%s/out%s.txt" %(iters, evthresh, evthresh, directory, str(i), i_f, infilename, directory, str(i))
os.system(command)
tbl=open(directory+"/tblout"+str(i)+".txt").read()
part=tbl.split("----------\n")[1].split("\n#")[0]
lines=part.splitlines()
acclist=[]
for line in lines:
lineList=line.split()
if len(lineList)>17:
inc=line.split()[17]
acc=line.split('|')[0]
if inc=="1":
acclist.append(acc)
outacclists.write(str(i)+"\t"+str(acclist)+"\n")
i=i+1
outacclists.close()
raw=open(al).read().strip()
d={}
index=0
for line in raw.split("\n"):
if line.split("\t")[1]!='[]':
index+=1
actxt=line.split("\t")[1].replace(",","").replace("[","").replace("]","").replace("'","")
actlist=actxt.split(" ")
d[index]=(actlist)
i=1
while i<len(d)+1: #use i and j to iterate through the combinations
list1=d[i]
j=i+1
while j<len(d)+1:
list2=d[j]
#print "i", i, list1, " vs ",
#print "j", j, list2
if set(list1) & (set(list2)): # if there is an intersection
#print "yes there is", list1, list2
union=list(set(list2) | set(list1))
d[j]=union
d[i]=[] #...and empty the redundant list
j=j+1
i=i+1
#d : {1: ['WP_000153877.1'], 2: [], 3: ['WP_000291520.1'], 4: ['WP_000342211.1'], 5: [],... 30: ['WP_055032751.1', 'WP_001246052.1']}
trueAccessionCount={}
for keys in d:
numbers=[]
for item in d[keys]:
numbers.append(allFlankGeneList.count(item))
trueAccessionCount[(';'.join(map(str,d[keys])))]=sum(numbers)
#trueAccessionCount: 'WP_001229260.1;WP_001229255.1': 4
odtrue=OrderedDict(sorted(trueAccessionCount.items(), key= lambda item:item[1],reverse=True))
familyNumber=0
with open(infilename+"_"+iters+"_"+evthresh+"_clusters.tsv","w") as clusOut:
for k, v in odtrue.items():
#print (k.split(';'), odtrue[k], v)
if len(k.split(';'))>0 and v>0:
familyNumber+=1
print(str(familyNumber),str(odtrue[k]),k, sep='\t', file=clusOut)
outfile_des=open(infilename+"_"+iters+"_"+evthresh+"_outdesc.txt","w")
inf=open(infilename+"_"+iters+"_"+evthresh+"_clusters.tsv","r")
acclists=inf.read().splitlines()
for line in acclists:
acclist=line.split("\t")[2].split(";")
familyAssignedValue=line.split("\t")[0]
if int(line.split("\t")[1])>1:
for acc in acclist:
outfile_des.write(familyAssignedValue+'('+str(allFlankGeneList.count(acc))+')'+"\t"+acc+"\t"+desDict[acc]+"\n")
outfile_des.write ("\n\n")
familyDict={} # Accession:Assigned family Number from Jackhammer
with open(args.out_prefix+'_flankgene.fasta_cluster_out_'+iters+'_'+evthresh+'_clusters.tsv', 'r') as clusterIn:
for line in clusterIn:
if line[0]!='#':
line=line.rstrip().split('\t')
if int(line[1])>1:
for item in (line[2].split(';')):
familyDict[item]=int(line[0])
else:
familyDict[line[2]]=0
familynum=[]
for acc in familyDict:
familynum.append(familyDict[acc])
center=int(max(familynum))+1
noProt=int(max(familynum))+2
noProtP=int(max(familynum))+3
noColor=int(max(familynum))+4
for ids in LengthDict:
if ids.split('#')[0][-1]=='*':
if ids.split('#')[0][:2].lower()=='ps':
familyDict[ids.split('#')[0]]=noProtP
else:
familyDict[ids.split('#')[0]]=noProt
if ids.split('#')[0] not in familyDict:
if ids in NqueryDict:
familyDict[ids.split('#')[0]]=center
else:
familyDict[ids.split('#')[0]]=noColor
color={}
color[noColor]='#ffffff'
color[center]='#000000'
color[noProt]='#f2f2f2'
color[noProtP]='#f2f2f3'
colorDict={} #Assigned family Number from Jackhammer : colorcode
for families in set(familynum):
if families == 0:
colorDict[families]=str('#ffffff')
else:
if random_color()!='#ffffff' or random_color()!='#000000' or random_color()!='#f2f2f2' or random_color()!='#f2f2f3' :
colorDict[families]=random_color()
colorDict.update(color)
maxs=(int(s)-1) # required to calculate border size of postscript output
mins=maxs-(maxs*2) # required to calculate border size of postscript output
if not args.tree_order:
nPos=[]
pPos=[]
with open(args.out_prefix+'_operon.tsv', 'w') as opOut:
for queries in accFlankDict:
for items in sorted(accFlankDict[queries]):
if queryStrand[queries]=='+':
ids=accFlankDict[queries][items][:-1]
lengths=LengthDict[accFlankDict[queries][items][:-1]]
species=queries+'|'+remBadChar(speciesDict[queries])
qStrand=queryStrand[queries]
nStrand=accFlankDict[queries][items][-1]
family=familyDict[accFlankDict[queries][items][:-1].split('#')[0]]
startPos=int(positionDict[accFlankDict[queries][0][:-1]].split('\t')[0])-1
start=int(positionDict[accFlankDict[queries][items][:-1]].split('\t')[0])
end=int(positionDict[accFlankDict[queries][items][:-1]].split('\t')[1])
if queries in acc_CGF_Dict:
info=acc_CGF_Dict[queries]
else:
info='not_found'+'\t'+'not_found'+'\t'+'not_found'
print(species, lengths, qStrand, nStrand, family, start-startPos, end-startPos, start, end, ids, info, sep='\t', file=opOut)
nP=start-startPos
pP=end-startPos
nPos.append(nP)
pPos.append(pP)
else:
ids=accFlankDict[queries][items][:-1]
lengths=LengthDict[accFlankDict[queries][items][:-1]] #c2
species=queries+'|'+remBadChar(speciesDict[queries])
qStrand=queryStrand[queries]
nStrand=accFlankDict[queries][items][-1]
family=familyDict[accFlankDict[queries][items][:-1].split('#')[0]]
startPos=int(positionDict[accFlankDict[queries][0][:-1]].split('\t')[1])+1
start=int(positionDict[accFlankDict[queries][items][:-1]].split('\t')[1])
end=int(positionDict[accFlankDict[queries][items][:-1]].split('\t')[0])
if queries in acc_CGF_Dict:
info=acc_CGF_Dict[queries]
else:
info='not_found'+'\t'+'not_found'+'\t'+'not_found'
print(species, lengths, qStrand, nStrand, family, startPos-start, startPos-end, end, start, ids, info, sep='\t', file=opOut)
nP=startPos-start
pP=startPos-end
nPos.append(nP)
pPos.append(pP)
print('\n\n', file=opOut)
windowMost=round(((max(pPos)+abs(min(nPos))+1)*4)/100)
widthM=(windowMost*3)+500
heightM=int(newQ)*20
canvas = Canvas(master, width=widthM,height=heightM,background='white', scrollregion=(0,0,round(widthM*2.5),round(heightM*2.5)))
hbar=Scrollbar(master,orient=HORIZONTAL)
hbar.pack(side=BOTTOM,fill=X)
hbar.config(command=canvas.xview)
vbar=Scrollbar(master,orient=VERTICAL)
vbar.pack(side=RIGHT,fill=Y)
vbar.config(command=canvas.yview)
canvas.config(xscrollcommand=hbar.set, yscrollcommand=vbar.set)
canvas.pack(side=LEFT,expand=True,fill=BOTH)
def operonFamily(item):
if item==0:
return ' '
elif item==center:
return ' '
elif item==noProt:
return ' '
elif item==noProtP:
return ' '
elif item==noColor:
return ' '
else:
return item
eg1=open(args.out_prefix+'_operon.tsv','r').read()
egs=eg1.split("\n\n\n\n")
line_pos_y=0
for eg in egs:
if eg!='':
coln=0
entries=eg.splitlines()
ndoms=len(entries)
ptnstats=entries[0].split("\t") #c2
org=ptnstats[0][:ptnstats[0].index('|')]+ptnstats[0][ptnstats[0].index('|'):].replace('_',' ')
textspace=widthM/2
line_pos_y=line_pos_y+16-round(postscriptSize(newQ))
half_dom_height=5-round(postscriptSize(newQ))
text = canvas.create_text(textspace/2-textspace/8,line_pos_y, text=org, fill="#404040", font=myFont12)
for entry in entries:
items=entry.split("\t")
aln_start=round(int(items[5])*4/100)
aln_end=round(int(items[6])*4/100)
strandType=items[3]
dom1_name=int(items[4])
dom1_len=(aln_end-aln_start)
oL80=round(dom1_len*80/100)
dom1_start=aln_start+textspace
dom1_end=dom1_len+dom1_start
if strandType=='+':
rect = canvas.create_polygon(dom1_start, line_pos_y+half_dom_height, dom1_start, line_pos_y-half_dom_height,dom1_start+oL80, line_pos_y-half_dom_height, dom1_end, line_pos_y, dom1_start+oL80, line_pos_y+half_dom_height,fill=colorDict[dom1_name], outline=outliner(colorDict[dom1_name]))
else:
rect = canvas.create_polygon(dom1_end-oL80, line_pos_y+half_dom_height, dom1_start, line_pos_y, dom1_end-oL80, line_pos_y-half_dom_height,dom1_end, line_pos_y-half_dom_height, dom1_end, line_pos_y+half_dom_height, fill=colorDict[dom1_name], outline=outliner(colorDict[dom1_name]))
textd1 = canvas.create_text(dom1_start+(dom1_len/2),line_pos_y, text=operonFamily(dom1_name), font=myFont7)
coln=coln+1
retval = canvas.postscript(file=args.out_prefix+"_flankgenes.ps", height=heightM, width=widthM, colormode="color")
##TAGs
egsList=[]
queryNameList=set()
family_Query_set=set()
querySPDict={}
querySPSDict={}
fgInfoDict={}# WP_090521055.1#293|9:7:0 WP_090521058.1#+ 0 954:1322
for Line in egs:
if Line!='':
FlankSet=Line.split('\n')
egsList.append(FlankSet)
fgTotal=len(FlankSet)
count=0
for item in FlankSet:
count+=1
queryACC=item.split('\t')[0].split('#')[0]
querySerial=item.split('\t')[9].split('#')[1]
fgACC=item.split('\t')[9].split('#')[0]
strandPN=str(item.split('\t')[3])
fgFamily=int(item.split('\t')[4])
fgStart=int(item.split('\t')[5])
fgEnd=int(item.split('\t')[6])
##chayanChange
if queryACC==fgACC:
family_Query_set.add(fgFamily)
queryACCnum=''
if queryACC==fgACC:
queryACCnum=str(count)
else:
queryACCnum=str(0)
queryNameList.add(str(queryACC+'#'+str(querySerial)))
fgInfoDict[str(queryACC+'#'+str(querySerial)+'|'+str(fgTotal)+':'+str(count)+':'+str(queryACCnum))]= fgACC+'#'+strandPN+'\t'+str(fgFamily)+'\t'+str(fgStart)+':'+str(fgEnd)
querySPDict[str(queryACC+'#'+str(querySerial)+'|'+str(fgTotal)+':'+str(count)+':'+str(queryACCnum))]=item.split('\t')[0]
querySPSDict[str(queryACC+'#'+str(querySerial))]=item.split('\t')[0]
queryNameListDict={}
strandListDict={}
AccessionNameListDict={}
FamListDict={}
posListDict={}
for item in queryNameList:
queryNameList=[]
AccessionNameList=[]
strandList=[]
FamList=[]
posList=[]
for query in fgInfoDict:
if item==query.split('|')[0]:
queryNameList.append(query)
strandList.append(fgInfoDict[query].split('\t')[0].split('#')[1])
FamList.append(fgInfoDict[query].split('\t')[1])
posList.append(fgInfoDict[query].split('\t')[2])
AccessionNameList.append(fgInfoDict[query].split('\t')[0].split('#')[0])
queryNameListDict[item]=queryNameList
strandListDict[item]=strandList
AccessionNameListDict[item]=AccessionNameList
FamListDict[item]=FamList
posListDict[item]=posList
def continuousVerifyCheck(item):
strings=';'.join(map(str,item))
splitStrings=strings.split(';')
listC=[]
for item in splitStrings:
listC.append(int(item))
#print (sorted(listC), list(range(min(listC), max(listC)+1)))
if sorted(listC) == list(range(min(listC), max(listC)+1)):
return strings.split(';')
sameStrandWindowDict={}
noToxDict_dis={}
startContig_dis={}
endContig_dis={}
for keys in queryNameListDict:
query=keys.split('#')[0]
mainPoint=AccessionNameListDict[keys].index(query)
LengthOfFlankFound=len(AccessionNameListDict[keys])
if mainPoint!=0: #Not starting of Contig
if mainPoint!=LengthOfFlankFound-1: #Not end of Contig
numList=[] #Window size of FlankGenes
for num in range(LengthOfFlankFound):
numList.append(num)
sameStrand=[]
for x in range(LengthOfFlankFound):
if strandListDict[keys][mainPoint]==strandListDict[keys][x]:
sameStrand.append(x)
diffStrand=[]
for y in range(LengthOfFlankFound):
if strandListDict[keys][mainPoint]!=strandListDict[keys][y]:
diffStrand.append(y)
#print(query, mainPoint, LengthOfFlankFound, sameStrand, diffStrand)
sameStrandWindowD=[]
sameStrandWindowU=[]
if mainPoint in sameStrand:
sameStrandWindowD.append(mainPoint)
if mainPoint+1 in sameStrand or mainPoint+1 in diffStrand:
GapStart=int(posListDict[keys][mainPoint].split(':')[1])
GapEnd=int(posListDict[keys][mainPoint+1].split(':')[0])
difference=(GapEnd-GapStart)-1
if int(gappyness)>int(difference) or int(gappyness)==int(difference):
if int(FamListDict[keys][mainPoint+1])!=0:#and int(FamListDict[keys][mainPoint+1])!=int(FamListDict[keys][mainPoint]):
sameStrandWindowD.append(mainPoint+1)
#print(keys,mainPoint, LengthOfFlankFound, strandListDict[keys],posListDict[keys][mainPoint], posListDict[keys][mainPoint+1], difference)
if mainPoint+2 in sameStrand or mainPoint+2 in diffStrand:
GapStart2=int(posListDict[keys][mainPoint+1].split(':')[1])
GapEnd2=int(posListDict[keys][mainPoint+2].split(':')[0])
difference2=(GapEnd2-GapStart2)-1
if int(gappyness)>int(difference2) or int(gappyness)==int(difference2):
if int(FamListDict[keys][mainPoint+2])!=0:
sameStrandWindowD.append(mainPoint+2)
#print(keys,mainPoint, LengthOfFlankFound, strandListDict[keys],posListDict[keys][mainPoint+1], posListDict[keys][mainPoint+2], difference2)
if mainPoint-1 in sameStrand or mainPoint-1 in diffStrand:
GapStart4=int(posListDict[keys][mainPoint-1].split(':')[1])
GapEnd4=int(posListDict[keys][mainPoint].split(':')[0])
difference4=(GapEnd4-GapStart4)-1
if int(gappyness)>int(difference4) or int(gappyness)==int(difference4):
if int(FamListDict[keys][mainPoint-1])!=0 :#and int(FamListDict[keys][mainPoint-1])!=int(FamListDict[keys][mainPoint]):
sameStrandWindowU.append(mainPoint-1)
#print(keys,mainPoint, LengthOfFlankFound, strandListDict[keys],posListDict[keys][mainPoint-1], posListDict[keys][mainPoint], difference4)
if mainPoint-2 in sameStrand or mainPoint-2 in diffStrand:
GapStart5=int(posListDict[keys][mainPoint-2].split(':')[1])
GapEnd5=int(posListDict[keys][mainPoint-1].split(':')[0])
difference5=(GapEnd5-GapStart5)-1
if int(gappyness)>int(difference5) or int(gappyness)==int(difference5):
if int(FamListDict[keys][mainPoint-2])!=0:
sameStrandWindowU.append(mainPoint-2)
#print(keys,mainPoint, LengthOfFlankFound, strandListDict[keys],posListDict[keys][mainPoint-2], posListDict[keys][mainPoint-1], difference5)
if continuousVerifyCheck(sorted(sameStrandWindowU+sameStrandWindowD)):
if len(continuousVerifyCheck(sorted(sameStrandWindowU+sameStrandWindowD)))>1:
sameStrandWindowDict[keys+':'+str(continuousVerifyCheck(sorted(sameStrandWindowU+sameStrandWindowD)).index(str(mainPoint)))]=continuousVerifyCheck(sorted(sameStrandWindowU+sameStrandWindowD))
#print(keys,mainPoint, LengthOfFlankFound, strandListDict[keys], sameStrand, continuousVerifyCheck(sorted(sameStrandWindowU+sameStrandWindowD)), FamListDict[keys])
else:
noToxDict_dis[keys]='No Cluster found or '+ 'Gap >'+str(gappyness)+' or Strand Difference'
else:
endContig_dis[keys]='Contig Error: Query is in the end of Contig'
else:
startContig_dis[keys]='Contig Error: Query is in the start of contig'
OperonFamilyDict={}
for keys in sameStrandWindowDict:
opFamList=[]
for item in sameStrandWindowDict[keys]:
opFamList.append(str(FamListDict[keys[:-2]][int(item)]))
OperonFamilyDict[keys]=opFamList
#print(OperonFamilyDict, 'OperonFamilyDict') #'WP_127015184.1#37:1': ['36', '47', '4', '35']
queryFamilySet=set()
for keys in OperonFamilyDict:
queryFamilySet.add(int(OperonFamilyDict[keys][int(keys[-1:])]))
#print(keys, OperonFamilyDict[keys])
aattDict_dis={}
nocoding_dis={}
probableTApairFamDict={}
probableTApairFamPosDict={}
for keys in OperonFamilyDict:
taPairList=[]
taPairPosList=[]
for i in range (len(OperonFamilyDict[keys])-1):
for j in range (1,len(OperonFamilyDict[keys])):
if j==i+1:
if i==int(keys[-1]) or j==int(keys[-1]):
if int(OperonFamilyDict[keys][i]) in queryFamilySet and int(OperonFamilyDict[keys][j]) in queryFamilySet:
aattDict_dis[keys.split(':')[0]]='Pair Error: Paired with homolog cluster'
pass #if condition like 'Toxin(518)-Toxin(518)' we discard
else:
if int(OperonFamilyDict[keys][i]) in queryFamilySet or int(OperonFamilyDict[keys][j]) in queryFamilySet: #if one of them in queryFamily
if int(OperonFamilyDict[keys][i])<=max(queryFamilySet) and int(OperonFamilyDict[keys][j])<=max(queryFamilySet): #not pseudo/noncoding rna
probableTA=str(OperonFamilyDict[keys][i])+'.'+str(OperonFamilyDict[keys][j])
probableTAPos=str(i)+'.'+str(j)
taPairList.append(probableTA)
taPairPosList.append(probableTAPos)
else:
nocoding_dis[keys.split(':')[0]]='Pair Error: Non coding RNA'
#else:
#print(keys)
if len(taPairList)>0:
probableTApairFamDict[keys]=taPairList
probableTApairFamPosDict[keys]=taPairPosList
#else:
#print(keys, OperonFamilyDict[keys], taPairList)
probableOperonFamDict={}
probableOperonFamSet=set()
probableOperonList=[]
for keys in OperonFamilyDict:
operonList=[]
if len(OperonFamilyDict[keys])>2:
for i in range (len(OperonFamilyDict[keys])-2):
for j in range (1,len(OperonFamilyDict[keys])-1):
for k in range (2,len(OperonFamilyDict[keys])):
if j==i+1 and k==j+1:
#we need to get operon that contains query pair
if int(OperonFamilyDict[keys][i]) in queryFamilySet or int(OperonFamilyDict[keys][j]) in queryFamilySet \
or int(OperonFamilyDict[keys][k]) in queryFamilySet :
probableOperon=str(OperonFamilyDict[keys][i])+'.'+str(OperonFamilyDict[keys][j])+'.'+\
str(OperonFamilyDict[keys][k])
operonList.append(probableOperon)
probableOperonList.append(probableOperon)
probableOperonFamSet.add(probableOperon)
if len(operonList)>0:
probableOperonFamDict[keys]=operonList
else:
#print(keys)
noToxDict_dis[keys.split(':')[0]]='No Cluster found or '+ 'Gap >'+str(gappyness)+' or Strand Difference'
def pseudoCheck(item):
itemSplit=item.split('.')
if int((max(itemSplit)))>max(queryFamilySet):
return 'pseudoFound'
operonOccuranceDict={} #operons that are conserved more than one time
trueOperonOccuranceDict={}
for item in probableOperonFamSet:
if pseudoCheck(item)!='pseudoFound': #remove pseudogenes/noncoding containing operons
keyset=set()
for keys in probableOperonFamDict:
if item in probableOperonFamDict[keys]:
keyset.add(keys.split(':')[0])
if len(keyset)>1: #operons that are conserved more than one time
operonOccuranceDict[item]=keyset
filteredTApairPosFamily={}
filteredTApairFamily={}
filteredTApairSet=set()
for items in probableTApairFamDict:
Fpset=set() #rev1
for item in probableTApairFamDict[items]:
for operons in operonOccuranceDict:
taItem=item.split('.')
operonItem1=operons.split('.')[0:2]
operonItem2=operons.split('.')[1:3]
#rev1 operonItem3=operons.split('.')[2:4]
if taItem==operonItem1 or taItem==operonItem2: #rev1 or taItem==operonItem3:
if items.split(':')[0] in operonOccuranceDict[operons]:
Fpset.add(items)
#print(items, probableTApairFamPosDict[items], probableTApairFamDict[items], operons, probableOperonList.count(operons), 'FP')
if items not in Fpset:
#print(items, probableTApairFamPosDict[items], probableTApairFamDict[items], operons, probableOperonList.count(operons), 'TP')
filteredTApairFamily[items]=probableTApairFamDict[items] #'WP_057732687.1#38:2': ['2.518', '518.9'],
filteredTApairPosFamily[items]=probableTApairFamPosDict[items] #'WP_015310665.1#174:1': ['0.1', '1.2']
for elements in probableTApairFamDict[items]:
filteredTApairSet.add(elements)
def getTA_ACCpair(pair, accessionSerial):
if accessionSerial in filteredTApairFamily:
if accessionSerial in probableTApairFamDict:
for pairVar in probableTApairFamDict[accessionSerial]:
if pairVar==pair:
splitPair=pair.split('.')
pairIndex=probableTApairFamPosDict[accessionSerial][probableTApairFamDict[accessionSerial].index(pairVar)]
#print('new',pairVar, pairIndex)#new 12.1658 0.1
index1=int(sameStrandWindowDict[accessionSerial][int(pairIndex.split('.')[0])])
index2=int(sameStrandWindowDict[accessionSerial][int(pairIndex.split('.')[1])])
accession1=AccessionNameListDict[accessionSerial.split(':')[0]][index1]
accession2=AccessionNameListDict[accessionSerial.split(':')[0]][index2]
#print(index1,index2,accession1, accession2)
position1=posListDict[accessionSerial.split(':')[0]][index1].split(':')[1]
position2=posListDict[accessionSerial.split(':')[0]][index2].split(':')[0]
difference=(int(position2)-int(position1))-1
return accession1+'|'+accession2+'\t'+str(difference)+'\t'+splitPair[0]+'|'+splitPair[1]
def descriptionFromTA(item):
descriptionList=[]
itemList=item.split('\t')[0].split('|')
for item in itemList:
if item in desDict:
descriptionList.append(desDict[item])
return '|'.join(map(str,descriptionList))
trueTApairCount={}
trueTApairAccession={}
for items in filteredTApairSet:
accessionSet=set()
accessionSet2=set()
for keys in filteredTApairFamily:
if items in filteredTApairFamily[keys]:
accessionSet.add(keys)
accessionSet2.add(keys.split(':')[0])
trueTApairAccession[items]=accessionSet #518.50 {'WP_010875036.1#440:2', 'WP_010869490.1#706:3'}
trueTApairCount[items]=len(accessionSet2)
def contigCheck(accs, accPair, strandDict, accsInSPList):
if accs in accPair:
accPairSplit=accPair.split('|')
accPairSplit.remove(accs)
if strandDict[accsInSPList.index(accs)]==strandDict[accsInSPList.index(accPairSplit[0])]:
rangeL=[]
for i in range (1,len(accsInSPList)-1):
rangeL.append(i)
if accsInSPList.index(accPairSplit[0]) in rangeL:
return 'P'
else:
return 'Fc'
else:
return 'Fs'
else:
return 'Fx'
discard_dis={}
twoThree_List=[]
tsvAccessionSet=set()
ta_Accession=set()
operonTA_TXT=[]
#with open (args.out_prefix+'_operonTA.txt', 'w') as opTAOut:
# print('#Query_Species', 'QueryAccession', 'TA_System', 'AccessopnInPredictedTA-LikeRegion', 'IntergenicSpace', 'ClusterNumber_TA-likeSystem', 'Number_Occurred', 'Conserve(%)', 'Description',sep='\t', file=opTAOut)
for item in sorted(trueTApairAccession):
#print (item, (trueTApairCount[item]), len(trueTApairCount[item]))
for accs in trueTApairAccession[item]:
#if accs=='WP_056969168.1#334:2':
if contigCheck(accs.split('#')[0], getTA_ACCpair(item, accs).split('\t')[0], strandListDict[accs[:-2]], AccessionNameListDict[accs[:-2]])=='P':
#print(accs, item, getTA_ACCpair(item, accs))
if accs.split('#')[0] in getTA_ACCpair(item, accs).split('\t')[0]:
pairQuery=accs.split(':')[0]+'\t'+getTA_ACCpair(item, accs).split('\t')[0]
twoThree_List.append(pairQuery.split('\t'))
tsvAccessionSet.add(querySPSDict[accs.split(':')[0]])
ta_Accession.add(accs.split(':')[0])
#print(querySPSDict[accs.split(':')[0]], accs.split('#')[0], len(item.split('.')), getTA_ACCpair(item, accs), str(trueTApairCount[item])+'/'+str(len(querySPSDict)), round(int(trueTApairCount[item])*100/len(querySPSDict),2), sep='\t', file=opTAOut)#descriptionFromTA(getTA_ACCpair(item, accs))
operonTAInfo=str(querySPSDict[accs.split(':')[0]])+'\t'+str(accs.split('#')[0])+'\t'+str(len(item.split('.')))+'\t'+str(getTA_ACCpair(item, accs))+'\t'+str(trueTApairCount[item])+'/'+str(len(querySPSDict))+'\t'+str(round(int(trueTApairCount[item])*100/len(querySPSDict),2))
operonTAInfosplit=operonTAInfo.split('\t')
operonTA_TXT.append(operonTAInfosplit)
else:
if contigCheck(accs.split('#')[0], getTA_ACCpair(item, accs).split('\t')[0], strandListDict[accs[:-2]], AccessionNameListDict[accs[:-2]])=='Fc':
if accs.split('#')[0] in getTA_ACCpair(item, accs).split('\t')[0]:
if accs.split(':')[0] not in startContig_dis and endContig_dis and noToxDict_dis and aattDict_dis and nocoding_dis:
#print(accs.split(':')[0], 'Fc')
discard_dis[accs.split(':')[0]]='Contig Error: Contig starts/ends with predicted TA'
if contigCheck(accs.split('#')[0], getTA_ACCpair(item, accs).split('\t')[0], strandListDict[accs[:-2]], AccessionNameListDict[accs[:-2]])=='Fs':
if accs.split('#')[0] in getTA_ACCpair(item, accs).split('\t')[0]:
if accs.split(':')[0] not in startContig_dis and endContig_dis and noToxDict_dis and aattDict_dis and nocoding_dis:
#print(accs.split(':')[0], 'Fs')
discard_dis[accs.split(':')[0]]='No Cluster found or '+ 'Gap >'+str(gappyness)+' or Strand Difference'
if contigCheck(accs.split('#')[0], getTA_ACCpair(item, accs).split('\t')[0], strandListDict[accs[:-2]], AccessionNameListDict[accs[:-2]])=='Fx':
if accs.split('#')[0] in getTA_ACCpair(item, accs).split('\t')[0]:
if accs.split(':')[0] not in startContig_dis and endContig_dis and noToxDict_dis and aattDict_dis and nocoding_dis:
#print(accs.split(':')[0], 'Fx')
discard_dis[accs.split(':')[0]]='NotAcceptable'
operonLong_dis={}
for item in querySPSDict:
if item not in ta_Accession:
for element in operonOccuranceDict:
if item in operonOccuranceDict[element]:
operonLong_dis[item]='Long Conserved Operon'
def TA_status(query):
if query not in ta_Accession:
reasonList=[]
if query in startContig_dis:
reasonList.append(startContig_dis[query])
if query in endContig_dis:
reasonList.append(endContig_dis[query])
if query in noToxDict_dis:
reasonList.append(noToxDict_dis[query])
if query in aattDict_dis :
reasonList.append(aattDict_dis[query])
if query in nocoding_dis :
reasonList.append(nocoding_dis[query])
if query in operonLong_dis:
reasonList.append(operonLong_dis[query])
if query in discard_dis:
reasonList.append(discard_dis[query])
if len(reasonList)>0:
if len(reasonList)==1:
return 'notTA_like'+'\t'+reasonList[0]
else:
return 'notTA_like'+'\t'+'; '.join(map(str,list(set(reasonList))))
else:
for elements in filteredTApairPosFamily:
if elements.split(':')[0]==query and len(filteredTApairPosFamily[elements])==1:
glist=[]
for items in twoThree_List:
if query==items[0]:
glist.append(items[1])
if len(glist)==1:
return 'TA_like'+'\t'+'2G'
if elements.split(':')[0]==query and len(filteredTApairPosFamily[elements])==2:
return 'TA_like'+'\t'+'3G'
i=0
discard3Gset=set()
with open (args.out_prefix+'_TA_Status_report.txt', 'w') as sTAOut:
print('#Serial', 'Query', 'Status', 'Reason', sep='\t', file=sTAOut)
for item in querySPSDict:
i+=1
print(i, querySPSDict[item], TA_status(item), sep='\t', file=sTAOut)
if TA_status(item)=='TA_like'+'\t'+'3G':
discard3Gset.add(querySPSDict[item])
#print(discard3Gset)
with open (args.out_prefix+'_operonTA.txt', 'w') as opTAOut:
print('#Query_Species', 'QueryAccession', 'TA_System', 'AccessopnInPredictedTA-LikeRegion', 'IntergenicSpace', 'ClusterNumber_TA-likeSystem', 'Number_Occurred', 'Conserve(%)', 'Description',sep='\t', file=opTAOut)
for item in operonTA_TXT:
if item[0] not in discard3Gset:
print('\t'.join(map(str,item)), file=opTAOut)
with open (args.out_prefix+'_operonTA.tsv', 'w') as opTAtsv:
for item in egsList:
if item!='':
#tsvAccession=item[0].split('\t')[0].split('#')[0]+'#'+item[0].split('\t')[0].split('#')[1].split('_')[0]
#print(item[0].split('\t')[0])
if item[0].split('\t')[0] in tsvAccessionSet:
if item[0].split('\t')[0] not in discard3Gset:
print('\n'.join(item), file=opTAtsv)
print("\n\n", file=opTAtsv)
windowMost=round(((max(pPos)+abs(min(nPos))+1)*4)/100)
widthM=(windowMost*3)+500
heightM=int(newQ)*20
#aheightM=heightM*1.3
from tkinter import *
master = Tk()
canvas = Canvas(master, width=widthM,height=heightM,background='white', scrollregion=(0,0,round(widthM*2.5),round(heightM*2.5)))
hbar=Scrollbar(master,orient=HORIZONTAL)
hbar.pack(side=BOTTOM,fill=X)
hbar.config(command=canvas.xview)
vbar=Scrollbar(master,orient=VERTICAL)
vbar.pack(side=RIGHT,fill=Y)
vbar.config(command=canvas.yview)
#canvas.config(width=1500,height=1000)
canvas.config(xscrollcommand=hbar.set, yscrollcommand=vbar.set)
canvas.pack(side=LEFT,expand=True,fill=BOTH)
def operonFamily(item):
if item==0:
return ' '
elif item==center:
return ' '
elif item==noProt:
return ' '
elif item==noProtP:
return ' '
elif item==noColor:
return ' '
else:
return item
egTA=open(args.out_prefix+'_operonTA.tsv','r').read()
egTAs=egTA.split("\n\n\n\n")
line_pos_y=0
for eg in egTAs:
if eg!='':
coln=0
entries=eg.splitlines()
ndoms=len(entries)
ptnstats=entries[0].split("\t")
org=ptnstats[0].replace("_"," ")
textspace=widthM/2
line_pos_y=line_pos_y+16-round(postscriptSize(newQ))
half_dom_height=5-round(postscriptSize(newQ))
text = canvas.create_text(textspace/2,line_pos_y, text=org, fill="#404040", font=("Arial", "12"))
for entry in entries:
items=entry.split("\t")
aln_start=round(int(items[5])*4/100)
aln_end=round(int(items[6])*4/100)
strandType=items[3]
dom1_name=int(items[4])
dom1_len=(aln_end-aln_start)
oL80=round(dom1_len*80/100)
dom1_start=aln_start+textspace
dom1_end=dom1_len+dom1_start
if strandType=='+':
rect = canvas.create_polygon(dom1_start, line_pos_y+half_dom_height, dom1_start, line_pos_y-half_dom_height,dom1_start+oL80, line_pos_y-half_dom_height, dom1_end, line_pos_y, dom1_start+oL80, line_pos_y+half_dom_height,fill=colorDict[dom1_name], outline=outliner(colorDict[dom1_name]))
else:
rect = canvas.create_polygon(dom1_end-oL80, line_pos_y+half_dom_height, dom1_start, line_pos_y, dom1_end-oL80, line_pos_y-half_dom_height,dom1_end, line_pos_y-half_dom_height, dom1_end, line_pos_y+half_dom_height, fill=colorDict[dom1_name], outline=outliner(colorDict[dom1_name]))
textd1 = canvas.create_text(dom1_start+(dom1_len/2),line_pos_y, text=operonFamily(dom1_name), font=("Arial", "7"))
coln=coln+1
retval2 = canvas.postscript(file=args.out_prefix+"_flankgenesTA.ps", height=heightM, width=widthM, colormode="color")
#disqualified
with open (args.out_prefix+'_operonTAdisqualified.tsv', 'w') as opTAtsvd:
for item in egsList:
if item!='':
#tsvAccession=item[0].split('\t')[0].split('#')[0]+'#'+item[0].split('\t')[0].split('#')[1].split('_')[0]
#print(item[0].split('\t')[0])
if item[0].split('\t')[0] not in tsvAccessionSet:
print('\n'.join(item), file=opTAtsvd)
print("\n\n", file=opTAtsvd)
windowMost=round(((max(pPos)+abs(min(nPos))+1)*4)/100)
widthM=(windowMost*3)+500
heightM=int(newQ)*20
#aheightM=heightM*1.3
from tkinter import *
master = Tk()
canvas = Canvas(master, width=widthM,height=heightM,background='white', scrollregion=(0,0,round(widthM*2.5),round(heightM*2.5)))
hbar=Scrollbar(master,orient=HORIZONTAL)
hbar.pack(side=BOTTOM,fill=X)
hbar.config(command=canvas.xview)
vbar=Scrollbar(master,orient=VERTICAL)
vbar.pack(side=RIGHT,fill=Y)
vbar.config(command=canvas.yview)
#canvas.config(width=1500,height=1000)
canvas.config(xscrollcommand=hbar.set, yscrollcommand=vbar.set)
canvas.pack(side=LEFT,expand=True,fill=BOTH)
def operonFamily(item):
if item==0:
return ' '
elif item==center:
return ' '
elif item==noProt:
return ' '
elif item==noProtP:
return ' '
elif item==noColor:
return ' '
else:
return item
egTAD=open(args.out_prefix+'_operonTAdisqualified.tsv','r').read()
egTADs=egTAD.split("\n\n\n\n")
line_pos_y=0
for eg in egTADs:
if eg!='':
coln=0
entries=eg.splitlines()
ndoms=len(entries)
ptnstats=entries[0].split("\t")
org=ptnstats[0].replace("_"," ")
textspace=widthM/2
line_pos_y=line_pos_y+16-round(postscriptSize(newQ))
half_dom_height=5-round(postscriptSize(newQ))
text = canvas.create_text(textspace/2,line_pos_y, text=org, fill="#404040", font=("Arial", "12"))
for entry in entries:
items=entry.split("\t")
aln_start=round(int(items[5])*4/100)
aln_end=round(int(items[6])*4/100)
strandType=items[3]
dom1_name=int(items[4])
dom1_len=(aln_end-aln_start)
oL80=round(dom1_len*80/100)
dom1_start=aln_start+textspace
dom1_end=dom1_len+dom1_start
if strandType=='+':
rect = canvas.create_polygon(dom1_start, line_pos_y+half_dom_height, dom1_start, line_pos_y-half_dom_height,dom1_start+oL80, line_pos_y-half_dom_height, dom1_end, line_pos_y, dom1_start+oL80, line_pos_y+half_dom_height,fill=colorDict[dom1_name], outline=outliner(colorDict[dom1_name]))
else:
rect = canvas.create_polygon(dom1_end-oL80, line_pos_y+half_dom_height, dom1_start, line_pos_y, dom1_end-oL80, line_pos_y-half_dom_height,dom1_end, line_pos_y-half_dom_height, dom1_end, line_pos_y+half_dom_height, fill=colorDict[dom1_name], outline=outliner(colorDict[dom1_name]))
textd1 = canvas.create_text(dom1_start+(dom1_len/2),line_pos_y, text=operonFamily(dom1_name), font=("Arial", "7"))
coln=coln+1
retval3 = canvas.postscript(file=args.out_prefix+"_flankgenesTAdisqualified.ps", height=heightM, width=widthM, colormode="color")
if args.tree:###Tree Command with ETE###
tree_file= args.out_prefix+'_tree.fasta'
if args.cpu:
tree_command="ete3 build -a %s -o %s --nochecks --clearall -w mafft_default-trimal01-none-fasttree_full --rename-dup-seqnames --cpu %s" %(tree_file, tree_file[:-6], core)
else:
tree_command="ete3 build -a %s -o %s --nochecks --clearall -w mafft_default-trimal01-none-fasttree_full --rename-dup-seqnames" %(tree_file, tree_file[:-6])
os.system(tree_command)
from ete3 import Tree, SeqMotifFace, TreeStyle, add_face_to_node
def normalize_strandView(item): #Strand view change
if item=='+':
return '>'
else:
return '<'
def familyView(item): #Strand view change
if item==0:
return ' '
elif item==center:
return ' '
elif item==noProt:
return ' '
elif item==noProtP:
return ' '
elif item==noColor:
return ' '
else:
return str(item)
seqMult=((maxs)*2)+1
seq = ("XXXXXXXXXXXXX--"*seqMult)
startDict={}
udList=[]
for ud in range (mins, maxs+1, 1):
udList.append(ud)
sList=[]
for sa in range(1, 15*seqMult, 15):
sList.append(sa)
for ln in range(len(udList)):
startDict[udList[ln]]=sList[ln]
nwTree=''
motifDict={}
motifDict_2={}
if os.path.isfile(args.out_prefix+'_tree/mafft_default-trimal01-none-fasttree_full/'+args.out_prefix+'_tree.fasta.final_tree.nw') == True:
with open(args.out_prefix+'_tree/mafft_default-trimal01-none-fasttree_full/'+args.out_prefix+'_tree.fasta.final_tree.nw', 'r') as treeIn:
for line in treeIn:
nwTree=line
for items in line.replace('(','').replace(')', '').replace(';', '').replace(',','\t').split('\t'):
item=items.split('|')[0]
simple_motifs=[]
simple_motifs_2=[]
for keys in sorted(startDict):
if keys in accFlankDict[item]:
simple_motifs_s = [startDict[keys], startDict[keys]+13, normalize_strandView(accFlankDict[item][keys][-1]), None, size, outliner(colorDict[familyDict[accFlankDict[item][keys][:-1].split('#')[0]]]), 'rgradient:'+colorDict[familyDict[accFlankDict[item][keys][:-1].split('#')[0]]], "arial|"+fsize+"|black|"+familyView(familyDict[accFlankDict[item][keys][:-1].split('#')[0]])]
simple_motifs.append(simple_motifs_s)
simple_motifs_2_s = [startDict[keys], startDict[keys]+13, normalize_strandView(accFlankDict[item][keys][-1]), None, size, outliner(colorDict[familyDict[accFlankDict[item][keys][:-1].split('#')[0]]]),colorDict[familyDict[accFlankDict[item][keys][:-1].split('#')[0]]], "arial|"+fsize+"|black|"]
simple_motifs_2.append(simple_motifs_2_s)
else:
simple_motifs_s = [startDict[keys], startDict[keys]+13, '[]', None, size, '#eeeeee', 'rgradient:'+'#ffffff', "arial|"+fsize+"|black|"]
simple_motifs.append(simple_motifs_s)
simple_motifs_2_s = [startDict[keys], startDict[keys]+13, '[]', None, size, '#eeeeee', '#ffffff', "arial|"+fsize+"|black|"]
simple_motifs_2.append(simple_motifs_2_s)
motifDict[items[:items.index(':')]]=simple_motifs
motifDict_2[items[:items.index(':')]]=simple_motifs_2
else:
print('> ETE3 failed to create tree due to lack of valid protein accesions, at least 2 required !')
sys.exit()
def get_example_tree():
# Create a random tree and add to each leaf a random set of motifs
# from the original set
t= Tree(nwTree)
for item in nwTree.replace('(','').replace(')', '').replace(';', '').replace(',','\t').split('\t'):
seqFace = SeqMotifFace(seq, motifs=motifDict[item[:item.index(':')]], seq_format="-", gap_format="blank")
(t & item[:item.index(':')]).add_face(seqFace, 0, "aligned")
t.ladderize()
return t
def get_example_tree_2():
# Create a random tree and add to each leaf a random set of motifs
# from the original set
t= Tree(nwTree)
for item in nwTree.replace('(','').replace(')', '').replace(';', '').replace(',','\t').split('\t'):
seqFace2 = SeqMotifFace(seq, motifs=motifDict_2[item[:item.index(':')]], seq_format="-", gap_format="blank")
(t & item[:item.index(':')]).add_face(seqFace2, 0, "aligned")
t.ladderize()
return t
if __name__ == '__main__':
t = get_example_tree()
ts = TreeStyle()
ts.tree_width = 300
ts.show_branch_support = True
if args.tree_order:
t.write(outfile=args.out_prefix+'_ladderTree.nw')
t.render(args.out_prefix+"_flankgenes_1.svg",tree_style=ts)
else:
t.render(args.out_prefix+"_flankgenes_1.svg",tree_style=ts)
if __name__ == '__main__':
t = get_example_tree_2()
ts = TreeStyle()
ts.tree_width = 300
ts.show_branch_support = True
if args.tree_order:
t.write(outfile=args.out_prefix+'_ladderTree.nw')
t.render(args.out_prefix+"_flankgenes_2.svg", tree_style=ts)
else:
t.render(args.out_prefix+"_flankgenes_2.svg", tree_style=ts)
if args.tree and args.tree_order: # Queries in postscript file will be presented as tree order
treeOrderList=[]
with open(args.out_prefix+'_ladderTree.nw', 'r') as laddertreeIn:
for line in laddertreeIn:
for items in line.replace('(','').replace(')', '').replace(';', '').replace(',','\t').split('\t'):
item=items.split('|')[0]
treeOrderList.append(item)
ntPos=[]
ptPos=[]
with open(args.out_prefix+'_TreeOrder_operon.tsv', 'w') as opOut:
for queries in treeOrderList:
for items in sorted(accFlankDict[queries]):
if queryStrand[queries]=='+':
ids=accFlankDict[queries][items][:-1]
lengths=LengthDict[accFlankDict[queries][items][:-1]]
species=queries+'|'+remBadChar(speciesDict[queries])
qStrand=queryStrand[queries]
nStrand=accFlankDict[queries][items][-1]
family=familyDict[accFlankDict[queries][items][:-1].split('#')[0]]
startPos=int(positionDict[accFlankDict[queries][0][:-1]].split('\t')[0])-1
start=int(positionDict[accFlankDict[queries][items][:-1]].split('\t')[0])
end=int(positionDict[accFlankDict[queries][items][:-1]].split('\t')[1])
if queries in acc_CGF_Dict:
info=acc_CGF_Dict[queries]
else:
info='not_found'+'\t'+'not_found'+'\t'+'not_found'
print(species, lengths, qStrand, nStrand, family, start-startPos, end-startPos, start, end, ids, info, sep='\t', file=opOut)
nP=start-startPos
pP=end-startPos
ntPos.append(nP)
ptPos.append(pP)
else:
ids=accFlankDict[queries][items][:-1]
lengths=LengthDict[accFlankDict[queries][items][:-1]]
species=queries+'|'+remBadChar(speciesDict[queries])
qStrand=queryStrand[queries]
nStrand=accFlankDict[queries][items][-1]
family=familyDict[accFlankDict[queries][items][:-1].split('#')[0]]
startPos=int(positionDict[accFlankDict[queries][0][:-1]].split('\t')[1])+1
start=int(positionDict[accFlankDict[queries][items][:-1]].split('\t')[1])
end=int(positionDict[accFlankDict[queries][items][:-1]].split('\t')[0])
if queries in acc_CGF_Dict:
info=acc_CGF_Dict[queries]
else:
info='not_found'+'\t'+'not_found'+'\t'+'not_found'
print(species, lengths, qStrand, nStrand, family, startPos-start, startPos-end, end, start, ids, info, sep='\t', file=opOut)
nP=startPos-start
pP=startPos-end
ntPos.append(nP)
ptPos.append(pP)
print('\n\n', file=opOut)
windowMost=round(((max(ptPos)+abs(min(ntPos))+1)*4)/100)
widthM=(windowMost*3)+500
heightM=int(newQ)*20
canvas = Canvas(master, width=widthM,height=heightM,background='white', scrollregion=(0,0,round(widthM*2.5),round(heightM*2.5)))
hbar=Scrollbar(master,orient=HORIZONTAL)
hbar.pack(side=BOTTOM,fill=X)
hbar.config(command=canvas.xview)
vbar=Scrollbar(master,orient=VERTICAL)
vbar.pack(side=RIGHT,fill=Y)
vbar.config(command=canvas.yview)
canvas.config(xscrollcommand=hbar.set, yscrollcommand=vbar.set)
canvas.pack(side=LEFT,expand=True,fill=BOTH)
def operonFamily(item):
if item==0:
return ' '
elif item==center:
return ' '
elif item==noProt:
return ' '
elif item==noProtP:
return ' '
elif item==noColor:
return ' '
else:
return item
eg1=open(args.out_prefix+'_TreeOrder_operon.tsv','r').read()
egs=eg1.split("\n\n\n\n")
line_pos_y=0
for eg in egs:
if eg!='':
coln=0
entries=eg.splitlines()
ndoms=len(entries)
ptnstats=entries[0].split("\t")
org=ptnstats[0][:ptnstats[0].index('|')]+ptnstats[0][ptnstats[0].index('|'):].replace('_',' ')
textspace=widthM/2
line_pos_y=line_pos_y+16-round(postscriptSize(newQ))
half_dom_height=5-round(postscriptSize(newQ))
text = canvas.create_text(textspace/2-textspace/8,line_pos_y, text=org, fill="#404040", font=myFont12)
for entry in entries:
items=entry.split("\t")
aln_start=round(int(items[5])*4/100)
aln_end=round(int(items[6])*4/100)
strandType=items[3]
dom1_name=int(items[4])
dom1_len=(aln_end-aln_start)
oL80=round(dom1_len*80/100)
dom1_start=aln_start+textspace
dom1_end=dom1_len+dom1_start
if strandType=='+':
rect = canvas.create_polygon(dom1_start, line_pos_y+half_dom_height, dom1_start, line_pos_y-half_dom_height,dom1_start+oL80, line_pos_y-half_dom_height, dom1_end, line_pos_y, dom1_start+oL80, line_pos_y+half_dom_height,fill=colorDict[dom1_name], outline=outliner(colorDict[dom1_name]))
else:
rect = canvas.create_polygon(dom1_end-oL80, line_pos_y+half_dom_height, dom1_start, line_pos_y, dom1_end-oL80, line_pos_y-half_dom_height,dom1_end, line_pos_y-half_dom_height, dom1_end, line_pos_y+half_dom_height, fill=colorDict[dom1_name], outline=outliner(colorDict[dom1_name]))
textd1 = canvas.create_text(dom1_start+(dom1_len/2),line_pos_y, text=operonFamily(dom1_name), font=myFont7)
coln=coln+1
retval = canvas.postscript(file=args.out_prefix+"_treeOrder_flankgenes.ps", height=heightM, width=widthM, colormode="color")
#TAGs
egsList=[]
queryNameList=set()
family_Query_set=set()
querySPDict={}
querySPSDict={}
fgInfoDict={}# WP_090521055.1#293|9:7:0 WP_090521058.1#+ 0 954:1322
for Line in egs:
if Line!='':
FlankSet=Line.split('\n')
egsList.append(FlankSet)
fgTotal=len(FlankSet)
count=0
for item in FlankSet:
count+=1
queryACC=item.split('\t')[0].split('#')[0]
querySerial=item.split('\t')[9].split('#')[1]
fgACC=item.split('\t')[9].split('#')[0]
strandPN=str(item.split('\t')[3])
fgFamily=int(item.split('\t')[4])
fgStart=int(item.split('\t')[5])
fgEnd=int(item.split('\t')[6])
##chayanChange
if queryACC==fgACC:
family_Query_set.add(fgFamily)
queryACCnum=''
if queryACC==fgACC:
queryACCnum=str(count)
else:
queryACCnum=str(0)
queryNameList.add(str(queryACC+'#'+str(querySerial)))
fgInfoDict[str(queryACC+'#'+str(querySerial)+'|'+str(fgTotal)+':'+str(count)+':'+str(queryACCnum))]= fgACC+'#'+strandPN+'\t'+str(fgFamily)+'\t'+str(fgStart)+':'+str(fgEnd)
querySPDict[str(queryACC+'#'+str(querySerial)+'|'+str(fgTotal)+':'+str(count)+':'+str(queryACCnum))]=item.split('\t')[0]
querySPSDict[str(queryACC+'#'+str(querySerial))]=item.split('\t')[0]
#print(queryACC+'#'+querySerial, queryACCnum, fgTotal, count, fgACC, fgFamily, fgStart, fgEnd, sep='\t')
#print(fgInfoDict)
queryNameListDict={}
strandListDict={}
AccessionNameListDict={}
FamListDict={}
posListDict={}
for item in queryNameList:
queryNameList=[]
AccessionNameList=[]
strandList=[]
FamList=[]
posList=[]
for query in fgInfoDict:
if item==query.split('|')[0]:
queryNameList.append(query)
strandList.append(fgInfoDict[query].split('\t')[0].split('#')[1])
FamList.append(fgInfoDict[query].split('\t')[1])
posList.append(fgInfoDict[query].split('\t')[2])
AccessionNameList.append(fgInfoDict[query].split('\t')[0].split('#')[0])
queryNameListDict[item]=queryNameList
strandListDict[item]=strandList
AccessionNameListDict[item]=AccessionNameList
FamListDict[item]=FamList
posListDict[item]=posList
#print(item, queryNameList, AccessionNameList, strandList, FamList)
def continuousVerifyCheck(item):
strings=';'.join(map(str,item))
splitStrings=strings.split(';')
listC=[]
for item in splitStrings:
listC.append(int(item))
#print (sorted(listC), list(range(min(listC), max(listC)+1)))
if sorted(listC) == list(range(min(listC), max(listC)+1)):
return strings.split(';')
sameStrandWindowDict={}
noToxDict_dis={}
startContig_dis={}
endContig_dis={}
for keys in queryNameListDict:
query=keys.split('#')[0]
mainPoint=AccessionNameListDict[keys].index(query)
LengthOfFlankFound=len(AccessionNameListDict[keys])
if mainPoint!=0: #Not starting of Contig
if mainPoint!=LengthOfFlankFound-1: #Not end of Contig
numList=[] #Window size of FlankGenes
for num in range(LengthOfFlankFound):
numList.append(num)
sameStrand=[]
for x in range(LengthOfFlankFound):
if strandListDict[keys][mainPoint]==strandListDict[keys][x]:
sameStrand.append(x)
diffStrand=[]
for y in range(LengthOfFlankFound):
if strandListDict[keys][mainPoint]!=strandListDict[keys][y]:
diffStrand.append(y)
#print(query, mainPoint, LengthOfFlankFound, sameStrand, diffStrand)
sameStrandWindowD=[]
sameStrandWindowU=[]
if mainPoint in sameStrand:
sameStrandWindowD.append(mainPoint)
if mainPoint+1 in sameStrand or mainPoint+1 in diffStrand:
GapStart=int(posListDict[keys][mainPoint].split(':')[1])
GapEnd=int(posListDict[keys][mainPoint+1].split(':')[0])
difference=(GapEnd-GapStart)-1
if int(gappyness)>int(difference) or int(gappyness)==int(difference):
if int(FamListDict[keys][mainPoint+1])!=0:#and int(FamListDict[keys][mainPoint+1])!=int(FamListDict[keys][mainPoint]):
sameStrandWindowD.append(mainPoint+1)
#print(keys,mainPoint, LengthOfFlankFound, strandListDict[keys],posListDict[keys][mainPoint], posListDict[keys][mainPoint+1], difference)
if mainPoint+2 in sameStrand or mainPoint+2 in diffStrand:
GapStart2=int(posListDict[keys][mainPoint+1].split(':')[1])
GapEnd2=int(posListDict[keys][mainPoint+2].split(':')[0])
difference2=(GapEnd2-GapStart2)-1
if int(gappyness)>int(difference2) or int(gappyness)==int(difference2):
if int(FamListDict[keys][mainPoint+2])!=0:
sameStrandWindowD.append(mainPoint+2)
#print(keys,mainPoint, LengthOfFlankFound, strandListDict[keys],posListDict[keys][mainPoint+1], posListDict[keys][mainPoint+2], difference2)
if mainPoint-1 in sameStrand or mainPoint-1 in diffStrand:
GapStart4=int(posListDict[keys][mainPoint-1].split(':')[1])
GapEnd4=int(posListDict[keys][mainPoint].split(':')[0])
difference4=(GapEnd4-GapStart4)-1
if int(gappyness)>int(difference4) or int(gappyness)==int(difference4):
if int(FamListDict[keys][mainPoint-1])!=0 :#and int(FamListDict[keys][mainPoint-1])!=int(FamListDict[keys][mainPoint]):
sameStrandWindowU.append(mainPoint-1)
#print(keys,mainPoint, LengthOfFlankFound, strandListDict[keys],posListDict[keys][mainPoint-1], posListDict[keys][mainPoint], difference4)
if mainPoint-2 in sameStrand or mainPoint-2 in diffStrand:
GapStart5=int(posListDict[keys][mainPoint-2].split(':')[1])
GapEnd5=int(posListDict[keys][mainPoint-1].split(':')[0])
difference5=(GapEnd5-GapStart5)-1
if int(gappyness)>int(difference5) or int(gappyness)==int(difference5):
if int(FamListDict[keys][mainPoint-2])!=0:
sameStrandWindowU.append(mainPoint-2)
#print(keys,mainPoint, LengthOfFlankFound, strandListDict[keys],posListDict[keys][mainPoint-2], posListDict[keys][mainPoint-1], difference5)
if continuousVerifyCheck(sorted(sameStrandWindowU+sameStrandWindowD)):
if len(continuousVerifyCheck(sorted(sameStrandWindowU+sameStrandWindowD)))>1:
sameStrandWindowDict[keys+':'+str(continuousVerifyCheck(sorted(sameStrandWindowU+sameStrandWindowD)).index(str(mainPoint)))]=continuousVerifyCheck(sorted(sameStrandWindowU+sameStrandWindowD))
#print(keys,mainPoint, LengthOfFlankFound, strandListDict[keys], sameStrand, continuousVerifyCheck(sorted(sameStrandWindowU+sameStrandWindowD)), FamListDict[keys])
else:
noToxDict_dis[keys]='No Cluster found or '+ 'Gap >'+str(gappyness)+' or Strand Difference'
else:
endContig_dis[keys]='Contig Error: Query is in the end of Contig'
else:
startContig_dis[keys]='Contig Error: Query is in the start of contig'
OperonFamilyDict={}
for keys in sameStrandWindowDict:
opFamList=[]
for item in sameStrandWindowDict[keys]:
opFamList.append(str(FamListDict[keys[:-2]][int(item)]))
OperonFamilyDict[keys]=opFamList
queryFamilySet=set()
for keys in OperonFamilyDict:
queryFamilySet.add(int(OperonFamilyDict[keys][int(keys[-1:])]))
#print(keys, OperonFamilyDict[keys])
#print(queryFamilySet, 'queryFamilySet')
aattDict_dis={}
nocoding_dis={}
probableTApairFamDict={}
probableTApairFamPosDict={}
for keys in OperonFamilyDict:
taPairList=[]
taPairPosList=[]
for i in range (len(OperonFamilyDict[keys])-1):
for j in range (1,len(OperonFamilyDict[keys])):
if j==i+1:
if i==int(keys[-1]) or j==int(keys[-1]):
if int(OperonFamilyDict[keys][i]) in queryFamilySet and int(OperonFamilyDict[keys][j]) in queryFamilySet:
aattDict_dis[keys.split(':')[0]]='Pair Error: Paired with homolog cluster'
pass #if condition like 'Toxin(518)-Toxin(518)' we discard
else:
if int(OperonFamilyDict[keys][i]) in queryFamilySet or int(OperonFamilyDict[keys][j]) in queryFamilySet: #if one of them in queryFamily
if int(OperonFamilyDict[keys][i])<=max(queryFamilySet) and int(OperonFamilyDict[keys][j])<=max(queryFamilySet): #not pseudo/noncoding rna
probableTA=str(OperonFamilyDict[keys][i])+'.'+str(OperonFamilyDict[keys][j])
probableTAPos=str(i)+'.'+str(j)
taPairList.append(probableTA)
taPairPosList.append(probableTAPos)
else:
nocoding_dis[keys.split(':')[0]]='Pair Error: Non coding RNA'
#else:
#print(keys)
if len(taPairList)>0:
probableTApairFamDict[keys]=taPairList
probableTApairFamPosDict[keys]=taPairPosList
#else:
#print(keys, OperonFamilyDict[keys], taPairList)
probableOperonFamDict={}
probableOperonFamSet=set()
probableOperonList=[]
for keys in OperonFamilyDict:
operonList=[]
#rev1
if len(OperonFamilyDict[keys])>2:
for i in range (len(OperonFamilyDict[keys])-2):
for j in range (1,len(OperonFamilyDict[keys])-1):
for k in range (2,len(OperonFamilyDict[keys])):
if j==i+1 and k==j+1:
#we need to get operon that contains query pair
if int(OperonFamilyDict[keys][i]) in queryFamilySet or int(OperonFamilyDict[keys][j]) in queryFamilySet \
or int(OperonFamilyDict[keys][k]) in queryFamilySet :
probableOperon=str(OperonFamilyDict[keys][i])+'.'+str(OperonFamilyDict[keys][j])+'.'+\
str(OperonFamilyDict[keys][k])
operonList.append(probableOperon)
probableOperonList.append(probableOperon)
probableOperonFamSet.add(probableOperon)
if len(operonList)>0:
probableOperonFamDict[keys]=operonList
else:
noToxDict_dis[keys.split(':')[0]]='No Cluster found or '+ 'Gap >'+str(gappyness)+' or Strand Difference'
def pseudoCheck(item):
itemSplit=item.split('.')
if int((max(itemSplit)))>max(queryFamilySet):
return 'pseudoFound'
operonOccuranceDict={} #operons that are conserved more than one time
trueOperonOccuranceDict={}
for item in probableOperonFamSet:
if pseudoCheck(item)!='pseudoFound': #remove pseudogenes/noncoding containing operons
keyset=set()
for keys in probableOperonFamDict:
if item in probableOperonFamDict[keys]:
keyset.add(keys.split(':')[0])
if len(keyset)>1: #operons that are conserved more than one time
operonOccuranceDict[item]=keyset
#else:
# print(item,pseudoCheck(item))
filteredTApairPosFamily={}
filteredTApairFamily={}
filteredTApairSet=set()
for items in probableTApairFamDict:
Fpset=set()#rev1
for item in probableTApairFamDict[items]:
for operons in operonOccuranceDict:
taItem=item.split('.')
operonItem1=operons.split('.')[0:2]
operonItem2=operons.split('.')[1:3]
#rev1 operonItem3=operons.split('.')[2:4]
if taItem==operonItem1 or taItem==operonItem2: #rev1 or taItem==operonItem3:
if items.split(':')[0] in operonOccuranceDict[operons]:
Fpset.add(items)
#print(items, probableTApairFamPosDict[items], probableTApairFamDict[items], operons, probableOperonList.count(operons), 'FP')
if items not in Fpset:
#print(items, probableTApairFamPosDict[items], probableTApairFamDict[items], operons, probableOperonList.count(operons), 'TP')
filteredTApairFamily[items]=probableTApairFamDict[items] #'WP_057732687.1#38:2': ['2.518', '518.9'],
filteredTApairPosFamily[items]=probableTApairFamPosDict[items] #'WP_015310665.1#174:1': ['0.1', '1.2']
for elements in probableTApairFamDict[items]:
filteredTApairSet.add(elements)
def getTA_ACCpair(pair, accessionSerial):
if accessionSerial in filteredTApairFamily:
if accessionSerial in probableTApairFamDict:
for pairVar in probableTApairFamDict[accessionSerial]:
if pairVar==pair:
splitPair=pair.split('.')
pairIndex=probableTApairFamPosDict[accessionSerial][probableTApairFamDict[accessionSerial].index(pairVar)]
#print('new',pairVar, pairIndex)#new 12.1658 0.1
index1=int(sameStrandWindowDict[accessionSerial][int(pairIndex.split('.')[0])])
index2=int(sameStrandWindowDict[accessionSerial][int(pairIndex.split('.')[1])])
accession1=AccessionNameListDict[accessionSerial.split(':')[0]][index1]
accession2=AccessionNameListDict[accessionSerial.split(':')[0]][index2]
#print(index1,index2,accession1, accession2)
position1=posListDict[accessionSerial.split(':')[0]][index1].split(':')[1]
position2=posListDict[accessionSerial.split(':')[0]][index2].split(':')[0]
difference=(int(position2)-int(position1))-1
return accession1+'|'+accession2+'\t'+str(difference)+'\t'+splitPair[0]+'|'+splitPair[1]
def descriptionFromTA(item):
descriptionList=[]
itemList=item.split('\t')[0].split('|')
for item in itemList:
if item in desDict:
descriptionList.append(desDict[item])
return '|'.join(map(str,descriptionList))
trueTApairCount={}
trueTApairAccession={}
for items in filteredTApairSet:
accessionSet=set()
accessionSet2=set()
for keys in filteredTApairFamily:
if items in filteredTApairFamily[keys]:
accessionSet.add(keys)
accessionSet2.add(keys.split(':')[0])
trueTApairAccession[items]=accessionSet
trueTApairCount[items]=len(accessionSet2)
def contigCheck(accs, accPair, strandDict, accsInSPList):
if accs in accPair:
accPairSplit=accPair.split('|')
accPairSplit.remove(accs)
if strandDict[accsInSPList.index(accs)]==strandDict[accsInSPList.index(accPairSplit[0])]:
rangeL=[]
for i in range (1,len(accsInSPList)-1):
rangeL.append(i)
if accsInSPList.index(accPairSplit[0]) in rangeL:
return 'P'
else:
return 'Fc'
else:
return 'Fs'
else:
return 'Fx'
discard_dis={}
twoThree_List=[]
tsvAccessionSet=set()
ta_Accession=set()
operonTA_TXT=[]
#with open (args.out_prefix+'_TreeOrderOperonTA.txt', 'w') as opTAOut:
# print('#Query_Species', 'QueryAccession', 'TA_System', 'AccessopnInPredictedTA-LikeRegion', 'IntergenicSpace', 'ClusterNumber_TA-likeSystem', 'Number_Occurred', 'Conserve(%)', 'Description',sep='\t', file=opTAOut)
for item in sorted(trueTApairAccession):
#print (item, (trueTApairCount[item]), len(trueTApairCount[item]))
for accs in trueTApairAccession[item]:
#if accs=='WP_056969168.1#334:2':
if contigCheck(accs.split('#')[0], getTA_ACCpair(item, accs).split('\t')[0], strandListDict[accs[:-2]], AccessionNameListDict[accs[:-2]])=='P':
#print(accs, item, getTA_ACCpair(item, accs))
if accs.split('#')[0] in getTA_ACCpair(item, accs).split('\t')[0]:
pairQuery=accs.split(':')[0]+'\t'+getTA_ACCpair(item, accs).split('\t')[0]
twoThree_List.append(pairQuery.split('\t'))
tsvAccessionSet.add(querySPSDict[accs.split(':')[0]])
ta_Accession.add(accs.split(':')[0])
#print(querySPSDict[accs.split(':')[0]], accs.split('#')[0], len(item.split('.')), getTA_ACCpair(item, accs), str(trueTApairCount[item])+'/'+str(len(querySPSDict)), round(int(trueTApairCount[item])*100/len(querySPSDict),2), sep='\t', file=opTAOut)#descriptionFromTA(getTA_ACCpair(item, accs))
operonTAInfo=str(querySPSDict[accs.split(':')[0]])+'\t'+str(accs.split('#')[0])+'\t'+str(len(item.split('.')))+'\t'+str(getTA_ACCpair(item, accs))+'\t'+str(trueTApairCount[item])+'/'+str(len(querySPSDict))+'\t'+str(round(int(trueTApairCount[item])*100/len(querySPSDict),2))
operonTAInfosplit=operonTAInfo.split('\t')
operonTA_TXT.append(operonTAInfosplit)
else:
if contigCheck(accs.split('#')[0], getTA_ACCpair(item, accs).split('\t')[0], strandListDict[accs[:-2]], AccessionNameListDict[accs[:-2]])=='Fc':
if accs.split('#')[0] in getTA_ACCpair(item, accs).split('\t')[0]:
if accs.split(':')[0] not in startContig_dis and endContig_dis and noToxDict_dis and aattDict_dis and nocoding_dis:
#print(accs.split(':')[0], 'Fc')
discard_dis[accs.split(':')[0]]='Contig Error: Contig starts/ends with predicted TA'
if contigCheck(accs.split('#')[0], getTA_ACCpair(item, accs).split('\t')[0], strandListDict[accs[:-2]], AccessionNameListDict[accs[:-2]])=='Fs':
if accs.split('#')[0] in getTA_ACCpair(item, accs).split('\t')[0]:
if accs.split(':')[0] not in startContig_dis and endContig_dis and noToxDict_dis and aattDict_dis and nocoding_dis:
#print(accs.split(':')[0], 'Fs')
discard_dis[accs.split(':')[0]]='No Cluster found or '+ 'Gap >'+str(gappyness)+' or Strand Difference'
if contigCheck(accs.split('#')[0], getTA_ACCpair(item, accs).split('\t')[0], strandListDict[accs[:-2]], AccessionNameListDict[accs[:-2]])=='Fx':
if accs.split('#')[0] in getTA_ACCpair(item, accs).split('\t')[0]:
if accs.split(':')[0] not in startContig_dis and endContig_dis and noToxDict_dis and aattDict_dis and nocoding_dis:
#print(accs.split(':')[0], 'Fx')
discard_dis[accs.split(':')[0]]='NotAcceptable'
operonLong_dis={}
for item in querySPSDict:
if item not in ta_Accession:
for element in operonOccuranceDict:
if item in operonOccuranceDict[element]:
operonLong_dis[item]='Long Conserved Operon'
def TA_status(query):
if query not in ta_Accession:
reasonList=[]
if query in startContig_dis:
reasonList.append(startContig_dis[query])
if query in endContig_dis:
reasonList.append(endContig_dis[query])
if query in noToxDict_dis:
reasonList.append(noToxDict_dis[query])
if query in aattDict_dis :
reasonList.append(aattDict_dis[query])
if query in nocoding_dis :
reasonList.append(nocoding_dis[query])
if query in operonLong_dis:
reasonList.append(operonLong_dis[query])
if query in discard_dis:
reasonList.append(discard_dis[query])
if len(reasonList)>0:
if len(reasonList)==1:
return 'notTA_like'+'\t'+reasonList[0]
else:
return 'notTA_like'+'\t'+'; '.join(map(str,list(set(reasonList))))
else:
for elements in filteredTApairPosFamily:
if elements.split(':')[0]==query and len(filteredTApairPosFamily[elements])==1:
glist=[]
for items in twoThree_List:
if query==items[0]:
glist.append(items[1])
if len(glist)==1:
return 'TA_like'+'\t'+'2G'
if elements.split(':')[0]==query and len(filteredTApairPosFamily[elements])==2:
return 'TA_like'+'\t'+'3G'
i=0
discard3Gset=set()
with open (args.out_prefix+'_TA_Status_report.txt', 'w') as sTAOut:
print('#Serial', 'Query', 'Status', 'Reason', sep='\t', file=sTAOut)
for item in querySPSDict:
i+=1
print(i, querySPSDict[item], TA_status(item), sep='\t', file=sTAOut)
if TA_status(item)=='TA_like'+'\t'+'3G':
discard3Gset.add(querySPSDict[item])
#print(discard3Gset)
with open (args.out_prefix+'_TreeOrderOperonTA.txt', 'w') as opTAOut:
print('#Query_Species', 'QueryAccession', 'TA_System', 'AccessopnInPredictedTA-LikeRegion', 'IntergenicSpace', 'ClusterNumber_TA-likeSystem', 'Number_Occurred', 'Conserve(%)', 'Description',sep='\t', file=opTAOut)
for item in operonTA_TXT:
if item[0] not in discard3Gset:
print('\t'.join(map(str,item)), file=opTAOut)
with open (args.out_prefix+'_TreeOrderOperonTA.tsv', 'w') as opTAtsv:
for item in egsList:
if item!='':
#tsvAccession=item[0].split('\t')[0].split('#')[0]+'#'+item[0].split('\t')[0].split('#')[1].split('_')[0]
#print(item[0].split('\t')[0])
if item[0].split('\t')[0] in tsvAccessionSet:
if item[0].split('\t')[0] not in discard3Gset:
print('\n'.join(item), file=opTAtsv)
print("\n\n", file=opTAtsv)
windowMost=round(((max(ptPos)+abs(min(ntPos))+1)*4)/100)
widthM=(windowMost*3)+500
heightM=int(newQ)*20
#aheightM=heightM*1.3
from tkinter import *
master = Tk()
canvas = Canvas(master, width=widthM,height=heightM,background='white', scrollregion=(0,0,round(widthM*2.5),round(heightM*2.5)))
hbar=Scrollbar(master,orient=HORIZONTAL)
hbar.pack(side=BOTTOM,fill=X)
hbar.config(command=canvas.xview)
vbar=Scrollbar(master,orient=VERTICAL)
vbar.pack(side=RIGHT,fill=Y)
vbar.config(command=canvas.yview)
canvas.config(xscrollcommand=hbar.set, yscrollcommand=vbar.set)
canvas.pack(side=LEFT,expand=True,fill=BOTH)
def operonFamily(item):
if item==0:
return ' '
elif item==center:
return ' '
elif item==noProt:
return ' '
elif item==noProtP:
return ' '
elif item==noColor:
return ' '
else:
return item
egTA=open(args.out_prefix+'_TreeOrderOperonTA.tsv','r').read()
egTAs=egTA.split("\n\n\n\n")
line_pos_y=0
for eg in egTAs:
if eg!='':
coln=0
entries=eg.splitlines()
ndoms=len(entries)
ptnstats=entries[0].split("\t")
org=ptnstats[0].replace("_"," ")
textspace=widthM/2
line_pos_y=line_pos_y+16-round(postscriptSize(newQ))
half_dom_height=5-round(postscriptSize(newQ))
text = canvas.create_text(textspace/2,line_pos_y, text=org, fill="#404040", font=("Arial", "12"))
for entry in entries:
items=entry.split("\t")
aln_start=round(int(items[5])*4/100)
aln_end=round(int(items[6])*4/100)
strandType=items[3]
dom1_name=int(items[4])
dom1_len=(aln_end-aln_start)
oL80=round(dom1_len*80/100)
dom1_start=aln_start+textspace
dom1_end=dom1_len+dom1_start
if strandType=='+':
rect = canvas.create_polygon(dom1_start, line_pos_y+half_dom_height, dom1_start, line_pos_y-half_dom_height,dom1_start+oL80, line_pos_y-half_dom_height, dom1_end, line_pos_y, dom1_start+oL80, line_pos_y+half_dom_height,fill=colorDict[dom1_name], outline=outliner(colorDict[dom1_name]))
else:
rect = canvas.create_polygon(dom1_end-oL80, line_pos_y+half_dom_height, dom1_start, line_pos_y, dom1_end-oL80, line_pos_y-half_dom_height,dom1_end, line_pos_y-half_dom_height, dom1_end, line_pos_y+half_dom_height, fill=colorDict[dom1_name], outline=outliner(colorDict[dom1_name]))
textd1 = canvas.create_text(dom1_start+(dom1_len/2),line_pos_y, text=operonFamily(dom1_name), font=("Arial", "7"))
coln=coln+1
retval2 = canvas.postscript(file=args.out_prefix+"_treeOrder_flankgenesTA.ps", height=heightM, width=widthM, colormode="color")
#disqualified
with open (args.out_prefix+'_TreeOrderOperonTAdisqualified.tsv', 'w') as opTAtsvd:
for item in egsList:
if item!='':
#tsvAccession=item[0].split('\t')[0].split('#')[0]+'#'+item[0].split('\t')[0].split('#')[1].split('_')[0]
#print(item[0].split('\t')[0])
if item[0].split('\t')[0] not in tsvAccessionSet:
print('\n'.join(item), file=opTAtsvd)
print("\n\n", file=opTAtsvd)
windowMost=round(((max(ptPos)+abs(min(ntPos))+1)*4)/100)
widthM=(windowMost*3)+500
heightM=int(newQ)*20
#aheightM=heightM*1.3
from tkinter import *
master = Tk()
canvas = Canvas(master, width=widthM,height=heightM,background='white', scrollregion=(0,0,round(widthM*2.5),round(heightM*2.5)))
hbar=Scrollbar(master,orient=HORIZONTAL)
hbar.pack(side=BOTTOM,fill=X)
hbar.config(command=canvas.xview)
vbar=Scrollbar(master,orient=VERTICAL)
vbar.pack(side=RIGHT,fill=Y)
vbar.config(command=canvas.yview)
canvas.config(xscrollcommand=hbar.set, yscrollcommand=vbar.set)
canvas.pack(side=LEFT,expand=True,fill=BOTH)
def operonFamily(item):
if item==0:
return ' '
elif item==center:
return ' '
elif item==noProt:
return ' '
elif item==noProtP:
return ' '
elif item==noColor:
return ' '
else:
return item
egTAD=open(args.out_prefix+'_TreeOrderOperonTAdisqualified.tsv','r').read()
egTADs=egTAD.split("\n\n\n\n")
line_pos_y=0
for eg in egTADs:
if eg!='':
coln=0
entries=eg.splitlines()
ndoms=len(entries)
ptnstats=entries[0].split("\t")
org=ptnstats[0].replace("_"," ")
textspace=widthM/2
line_pos_y=line_pos_y+16-round(postscriptSize(newQ))
half_dom_height=5-round(postscriptSize(newQ))
text = canvas.create_text(textspace/2,line_pos_y, text=org, fill="#404040", font=("Arial", "12"))
for entry in entries:
items=entry.split("\t")
aln_start=round(int(items[5])*4/100)
aln_end=round(int(items[6])*4/100)
strandType=items[3]
dom1_name=int(items[4])
dom1_len=(aln_end-aln_start)
oL80=round(dom1_len*80/100)
dom1_start=aln_start+textspace
dom1_end=dom1_len+dom1_start
if strandType=='+':
rect = canvas.create_polygon(dom1_start, line_pos_y+half_dom_height, dom1_start, line_pos_y-half_dom_height,dom1_start+oL80, line_pos_y-half_dom_height, dom1_end, line_pos_y, dom1_start+oL80, line_pos_y+half_dom_height,fill=colorDict[dom1_name], outline=outliner(colorDict[dom1_name]))
else:
rect = canvas.create_polygon(dom1_end-oL80, line_pos_y+half_dom_height, dom1_start, line_pos_y, dom1_end-oL80, line_pos_y-half_dom_height,dom1_end, line_pos_y-half_dom_height, dom1_end, line_pos_y+half_dom_height, fill=colorDict[dom1_name], outline=outliner(colorDict[dom1_name]))
textd1 = canvas.create_text(dom1_start+(dom1_len/2),line_pos_y, text=operonFamily(dom1_name), font=("Arial", "7"))
coln=coln+1
retval3 = canvas.postscript(file=args.out_prefix+"_treeOrder_flankgenesTAdisqualified.ps", height=heightM, width=widthM, colormode="color")
#disqualified
import shutil
def remove_folder(path):
if os.path.exists(path):
shutil.rmtree(path)
else:
raise XXError("Files not found")
directory = args.out_prefix+'_flankgene.fasta'+'_cluster_out_individuals'
if os.path.exists(directory):
remove_folder(directory)
print('\n'+'<<< Done >>>')
print('\nIf you use TAGs in your work, please remember to cite these papers!'+'\n\n- Saha CK, Pires RS, Brolin H, Delannoy M, Atkinson GC. 2020. FlaGs and webFlaGs: discovering novel biology through the analysis of gene neighbourhood conservation. Bioinformatics.'+\
'\nhttps://doi.org/10.1093/bioinformatics/btaa788')
sys.exit()
| 39.670533
| 379
| 0.677698
| 17,116
| 126,549
| 4.919198
| 0.073323
| 0.012328
| 0.008551
| 0.006841
| 0.774279
| 0.748008
| 0.727948
| 0.712282
| 0.695274
| 0.68525
| 0
| 0.030495
| 0.155497
| 126,549
| 3,189
| 380
| 39.682973
| 0.757343
| 0.11601
| 0
| 0.700802
| 0
| 0.006924
| 0.11741
| 0.011571
| 0.004373
| 0
| 0
| 0
| 0
| 1
| 0.021501
| false
| 0.00328
| 0.010569
| 0.000364
| 0.088557
| 0.040816
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
31471ce1c12c18064307381d689bd7c604c7e3ce
| 149
|
py
|
Python
|
angr/procedures/libc/setbuf.py
|
Kyle-Kyle/angr
|
345b2131a7a67e3a6ffc7d9fd475146a3e12f837
|
[
"BSD-2-Clause"
] | 6,132
|
2015-08-06T23:24:47.000Z
|
2022-03-31T21:49:34.000Z
|
angr/procedures/libc/setbuf.py
|
Kyle-Kyle/angr
|
345b2131a7a67e3a6ffc7d9fd475146a3e12f837
|
[
"BSD-2-Clause"
] | 2,272
|
2015-08-10T08:40:07.000Z
|
2022-03-31T23:46:44.000Z
|
angr/procedures/libc/setbuf.py
|
Kyle-Kyle/angr
|
345b2131a7a67e3a6ffc7d9fd475146a3e12f837
|
[
"BSD-2-Clause"
] | 1,155
|
2015-08-06T23:37:39.000Z
|
2022-03-31T05:54:11.000Z
|
import angr
class setbuf(angr.SimProcedure):
#pylint:disable=arguments-differ, unused-argument
def run(self, stream, buf):
return
| 16.555556
| 53
| 0.697987
| 18
| 149
| 5.777778
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.201342
| 149
| 8
| 54
| 18.625
| 0.87395
| 0.322148
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
31486c72d7dc2979eab1de3e230296c92ab947dd
| 170
|
wsgi
|
Python
|
Monitora-backend/main.wsgi
|
miiila/nasi-politici
|
3edbf7adc3d89e2839c50ff2e7693101784868a0
|
[
"MIT"
] | 16
|
2019-11-26T16:30:39.000Z
|
2021-07-25T19:13:15.000Z
|
Monitora-backend/main.wsgi
|
miiila/nasi-politici
|
3edbf7adc3d89e2839c50ff2e7693101784868a0
|
[
"MIT"
] | 33
|
2019-11-25T08:17:54.000Z
|
2022-02-26T21:08:41.000Z
|
Monitora-backend/main.wsgi
|
miiila/nasi-politici
|
3edbf7adc3d89e2839c50ff2e7693101784868a0
|
[
"MIT"
] | 8
|
2020-01-06T10:39:52.000Z
|
2021-10-16T15:06:08.000Z
|
#!/usr/bin/python
import sys
import logging
logging.basicConfig(stream=sys.stderr)
sys.path.insert(0,"/var/www/fullreport/API/main/")
from main import app as application
| 24.285714
| 50
| 0.788235
| 27
| 170
| 4.962963
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006369
| 0.076471
| 170
| 7
| 51
| 24.285714
| 0.847134
| 0.094118
| 0
| 0
| 0
| 0
| 0.188312
| 0.188312
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
314ba3a5881c9855c3210c8a581e1ee7bed8a510
| 168
|
py
|
Python
|
tests/test_utils/__init__.py
|
ketyi/dgl
|
a1b859c29b63a673c148d13231a49504740e0e01
|
[
"Apache-2.0"
] | null | null | null |
tests/test_utils/__init__.py
|
ketyi/dgl
|
a1b859c29b63a673c148d13231a49504740e0e01
|
[
"Apache-2.0"
] | null | null | null |
tests/test_utils/__init__.py
|
ketyi/dgl
|
a1b859c29b63a673c148d13231a49504740e0e01
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import backend as F
parametrize_idtype = pytest.mark.parametrize("idtype", [F.int32, F.int64])
from .checks import *
from .graph_cases import get_cases
| 21
| 74
| 0.779762
| 25
| 168
| 5.12
| 0.6
| 0.265625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027211
| 0.125
| 168
| 7
| 75
| 24
| 0.843537
| 0
| 0
| 0
| 0
| 0
| 0.035714
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.8
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
314cd3af076d89a6ef53dd505680bb47e99707ba
| 2,881
|
py
|
Python
|
tests/test_influx.py
|
letrout/home_automation
|
290a710ebecbb799746b4eaa1377865f833009bb
|
[
"Apache-2.0"
] | null | null | null |
tests/test_influx.py
|
letrout/home_automation
|
290a710ebecbb799746b4eaa1377865f833009bb
|
[
"Apache-2.0"
] | 1
|
2022-01-04T19:24:40.000Z
|
2022-01-29T19:24:02.000Z
|
tests/test_influx.py
|
letrout/home_automation
|
290a710ebecbb799746b4eaa1377865f833009bb
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from sensors.lib.influx import influx
def test_lp_1f1v():
test = influx.influx_lp(
"temp",
{"field1": 1},
{"tag1": 2},
1634158455045502066
)
assert test == "temp,tag1=2 field1=1 1634158455045502066"
def test_lp_2f2v():
test = influx.influx_lp(
"temp",
{"field1": 1, "field2": 5.5},
{"tag1": 2, "tag2": 6},
1634158455045502066
)
assert test == "temp,tag1=2,tag2=6 field1=1,field2=5.5 1634158455045502066"
def test_lp_emptytag():
test = influx.influx_lp(
"temp",
{"field1": 1, "field2": 5.5},
{},
1634158455045502066
)
assert test == "temp field1=1,field2=5.5 1634158455045502066"
def test_lp_emptryfield():
test = influx.influx_lp(
"temp",
{},
{"tag1": 2, "tag2": 6},
1634158455045502066
)
assert test is None
# FIXME: don't know how to handle spaces in strings
#def test_lp_string_space():
# test = influx.influx_lp(
# "temp",
# {"field1": 1, "field2": "a string"},
# {"tag1": 2, "tag2": 6},
# 1634158455045502066
# )
# assert test == """temp,tag1=2,tag2=6 field1=1,field2="a string" 1634158455045502066"""
def test_lp_string():
test = influx.influx_lp(
"temp",
{"field1": 1, "field2": "string"},
{"tag1": 2, "tag2": 6},
1634158455045502066
)
assert test == """temp,tag1=2,tag2=6 field1=1,field2=string 1634158455045502066"""
def test_lp_true():
test = influx.influx_lp(
"temp",
{"field1": 1, "field2": "string"},
{"tag1": 2, "tag2": "true"},
1634158455045502066
)
assert test == 'temp,tag1=2,tag2=true field1=1,field2=string 1634158455045502066'
def test_lp_false():
test = influx.influx_lp(
"temp",
{"field1": 1, "field2": "string"},
{"tag1": 2, "tag2": "False"},
1634158455045502066
)
assert test == 'temp,tag1=2,tag2=False field1=1,field2=string 1634158455045502066'
def test_lp_falsestring():
test = influx.influx_lp(
"temp",
{"field1": 1, "field2": "string"},
{"tag1": 2, "tag2": "FAlse"},
1634158455045502066
)
assert test == 'temp,tag1=2,tag2=FAlse field1=1,field2=string 1634158455045502066'
def test_lp_bad_ts():
test = influx.influx_lp(
"temp",
{"field1": 1},
{"tag1": 2},
"manynanoseconds"
)
assert test is None
def test_lp_bad_field():
test = influx.influx_lp(
"temp",
["field1", 1],
{"tag1": 2},
1634158455045502066
)
assert test is None
def test_lp_bad_tag():
test = influx.influx_lp(
"temp",
{"field1": 1},
"tag1",
1634158455045502066
)
assert test is None
| 23.048
| 91
| 0.552933
| 323
| 2,881
| 4.80805
| 0.145511
| 0.085641
| 0.117193
| 0.139086
| 0.841597
| 0.777849
| 0.772054
| 0.705087
| 0.579524
| 0.510625
| 0
| 0.23657
| 0.295731
| 2,881
| 124
| 92
| 23.233871
| 0.528832
| 0.111073
| 0
| 0.588889
| 0
| 0
| 0.25637
| 0.059976
| 0
| 0
| 0
| 0.008065
| 0.122222
| 1
| 0.122222
| false
| 0
| 0.022222
| 0
| 0.144444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
9edb5b37e160f7a0e9572c7c5ee19a5cbb3758c9
| 192
|
py
|
Python
|
infiltrate/views/faq.py
|
Qazzquimby/eternalCardEvaluator
|
ef8640ed819a89e5198f8aedf0861a29c57c5720
|
[
"MIT"
] | 4
|
2019-04-08T09:30:10.000Z
|
2020-09-15T19:25:30.000Z
|
infiltrate/views/faq.py
|
Qazzquimby/eternalCardEvaluator
|
ef8640ed819a89e5198f8aedf0861a29c57c5720
|
[
"MIT"
] | 19
|
2019-04-09T19:02:14.000Z
|
2020-12-25T05:22:45.000Z
|
infiltrate/views/faq.py
|
Qazzquimby/eternalCardEvaluator
|
ef8640ed819a89e5198f8aedf0861a29c57c5720
|
[
"MIT"
] | null | null | null |
"""This is where the routes are defined."""
import flask
from flask_classful import FlaskView
class FaqView(FlaskView):
def index(self):
return flask.render_template("faq.html")
| 21.333333
| 48
| 0.729167
| 26
| 192
| 5.307692
| 0.846154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.171875
| 192
| 8
| 49
| 24
| 0.867925
| 0.192708
| 0
| 0
| 0
| 0
| 0.053691
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.4
| 0.2
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 5
|
7305f64bed74a8cd99a86a6e7a9ee70628c723d7
| 2,688
|
py
|
Python
|
apps/people/tests/test_create_people.py
|
bergran/people
|
a2639b238005bd37b7a08f220b57c4b5ad5c031d
|
[
"MIT"
] | null | null | null |
apps/people/tests/test_create_people.py
|
bergran/people
|
a2639b238005bd37b7a08f220b57c4b5ad5c031d
|
[
"MIT"
] | null | null | null |
apps/people/tests/test_create_people.py
|
bergran/people
|
a2639b238005bd37b7a08f220b57c4b5ad5c031d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from fastapi.encoders import jsonable_encoder
from starlette import status
from apps.people.serializers.people import PeopleOutSerializer
from core.test.transaction_test_case import TransactionTestCase
class CreatePeopleTestCase(TransactionTestCase):
@staticmethod
def get_url():
return '/api/v1/people/'
def check_fields(self, people, response):
payload = response.json()
for key, value in people.items():
self.assertEqual(value, payload.get(key))
def test_create_kink_successfully(self):
people = {
'first_name': 'Kirigaya',
'last_name': 'Kazuto',
'place_id': 1,
'is_king': True,
}
response = self.client.post(self.get_url(), json=people)
self.assertEqual(status.HTTP_201_CREATED, response.status_code)
self.check_fields(people, response)
def test_create_duplicated_people(self):
people = {
'first_name': 'Kirigaya',
'last_name': 'Kazuto',
'place_id': 1,
'is_king': False,
}
response = self.client.post(self.get_url(), json=people)
self.assertEqual(status.HTTP_201_CREATED, response.status_code)
self.check_fields(people, response)
response = self.client.post(self.get_url(), json=people)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
def test_create_duplicated_king(self):
people = {
'first_name': 'Kirigaya',
'last_name': 'Kazuto',
'place_id': 1,
'is_king': True,
}
response = self.client.post(self.get_url(), json=people)
self.assertEqual(status.HTTP_201_CREATED, response.status_code)
self.check_fields(people, response)
people['first_name'] = 'Asuna'
people['last_name'] = 'Yuuki'
response = self.client.post(self.get_url(), json=people)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
def test_create_duplicated_king_non_alive(self):
people = {
'first_name': 'Kirigaya',
'last_name': 'Kazuto',
'place_id': 1,
'is_king': True,
}
response = self.client.post(self.get_url(), json=people)
self.assertEqual(status.HTTP_201_CREATED, response.status_code)
self.check_fields(people, response)
people['first_name'] = 'Asuna'
people['last_name'] = 'Yuuki'
people['is_alive'] = False
response = self.client.post(self.get_url(), json=people)
self.assertEqual(status.HTTP_201_CREATED, response.status_code)
| 33.185185
| 75
| 0.629092
| 306
| 2,688
| 5.284314
| 0.228758
| 0.029685
| 0.077922
| 0.095238
| 0.717996
| 0.717996
| 0.717996
| 0.717996
| 0.717996
| 0.717996
| 0
| 0.013466
| 0.254092
| 2,688
| 80
| 76
| 33.6
| 0.793017
| 0.007813
| 0
| 0.650794
| 0
| 0
| 0.102439
| 0
| 0
| 0
| 0
| 0
| 0.126984
| 1
| 0.095238
| false
| 0
| 0.063492
| 0.015873
| 0.190476
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b44906c6a4f9e4f33b182018ea4cd4e8c43e4617
| 18,876
|
py
|
Python
|
GPyOpt/optimization/acquisition_optimizer.py
|
RaulAstudillo/bocf
|
cd84eab2d1b4ea5a4bdeeb452df92296afbafb87
|
[
"BSD-3-Clause"
] | null | null | null |
GPyOpt/optimization/acquisition_optimizer.py
|
RaulAstudillo/bocf
|
cd84eab2d1b4ea5a4bdeeb452df92296afbafb87
|
[
"BSD-3-Clause"
] | null | null | null |
GPyOpt/optimization/acquisition_optimizer.py
|
RaulAstudillo/bocf
|
cd84eab2d1b4ea5a4bdeeb452df92296afbafb87
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2016, the GPyOpt Authors
# Licensed under the BSD 3-clause license (see LICENSE.txt)
from .optimizer import OptLbfgs, OptSGD, OptDirect, OptCma, apply_optimizer, choose_optimizer, apply_optimizer_inner
from .anchor_points_generator import ObjectiveAnchorPointsGenerator, ThompsonSamplingAnchorPointsGenerator
from ..core.task.space import Design_space
from GPyOpt.experiment_design import initial_design
import multiprocessing
from pathos.multiprocessing import ProcessingPool as Pool
import numpy as np
import time
max_objective_anchor_points_logic = "max_objective"
thompson_sampling_anchor_points_logic = "thompsom_sampling"
sobol_design_type = "sobol"
random_design_type = "random"
latin_design_type = "latin"
class AcquisitionOptimizer(object):
"""
General class for acquisition optimizers defined in domains with mix of discrete, continuous, bandit variables
:param space: design space class from GPyOpt.
:param optimizer: optimizer to use. Can be selected among:
- 'lbfgs': L-BFGS.
- 'DIRECT': Dividing Rectangles.
- 'CMA': covariance matrix adaptation.
"""
def __init__(self, space, optimizer='lbfgs', inner_optimizer='lbfgs2', n_starting=400, n_anchor=16, **kwargs):
self.space = space
self.optimizer_name = optimizer
self.inner_optimizer_name = inner_optimizer
self.n_starting = n_starting
self.n_anchor = n_anchor
self.kwargs = kwargs
## -- save extra options than can be passed to the optimizer
if 'model' in self.kwargs:
self.model = self.kwargs['model']
if 'anchor_points_logic' in self.kwargs:
self.type_anchor_points_logic = self.kwargs['type_anchor_points_logic']
else:
self.type_anchor_points_logic = max_objective_anchor_points_logic
## -- Context handler: takes
self.context_manager = ContextManager(space)
## -- Set optimizer and inner optimizer (WARNING: this won't update context)
self.optimizer = choose_optimizer(self.optimizer_name, self.context_manager.noncontext_bounds)
self.inner_optimizer = choose_optimizer(self.inner_optimizer_name, self.context_manager.noncontext_bounds)
def optimize2(self, f=None, df=None, f_df=None, duplicate_manager=None):
"""
Optimizes the input function.
:param f: function to optimize.
:param df: gradient of the function to optimize.
:param f_df: returns both the function to optimize and its gradient.
"""
self.f = f
self.df = df
self.f_df = f_df
## --- Update the optimizer, in case context has beee passed.
self.optimizer = choose_optimizer(self.optimizer_name, self.context_manager.noncontext_bounds)
## --- Selecting the anchor points and removing duplicates
if self.type_anchor_points_logic == max_objective_anchor_points_logic:
anchor_points_generator = ObjectiveAnchorPointsGenerator(self.space, latin_design_type, f)
elif self.type_anchor_points_logic == thompson_sampling_anchor_points_logic:
anchor_points_generator = ThompsonSamplingAnchorPointsGenerator(self.space, sobol_design_type, self.model)
## -- Select the anchor points (with context)
anchor_points = anchor_points_generator.get(duplicate_manager=duplicate_manager, context_manager=self.context_manager)
print('anchor_points ready')
print(anchor_points)
pool = Pool(4)
optimized_points = pool.map(self._parallel_optimization_wrapper, anchor_points)
print('parallel')
print(optimized_points)
optimized_points2 = [apply_optimizer(self.optimizer, a, f=f, df=None, f_df=f_df, duplicate_manager=duplicate_manager, context_manager=self.context_manager, space = self.space) for a in anchor_points]
print('sequential')
print(optimized_points2)
x_min, fx_min = min(optimized_points, key=lambda t:t[1])
return x_min, fx_min
def optimize(self, f=None, df=None, f_df=None, duplicate_manager=None, x_baseline=None):
"""
Optimizes the input function.
:param f: function to optimize.
:param df: gradient of the function to optimize.
:param f_df: returns both the function to optimize and its gradient.
"""
self.f = f
self.df = df
self.f_df = f_df
## --- Update the optimizer, in case context has beee passed.
self.optimizer = choose_optimizer(self.optimizer_name, self.context_manager.noncontext_bounds)
## --- Selecting the anchor points and removing duplicates
if self.type_anchor_points_logic == max_objective_anchor_points_logic:
anchor_points_generator = ObjectiveAnchorPointsGenerator(self.space, random_design_type, f, self.n_starting)
elif self.type_anchor_points_logic == thompson_sampling_anchor_points_logic:
anchor_points_generator = ThompsonSamplingAnchorPointsGenerator(self.space, sobol_design_type, self.model)
## -- Select the anchor points (with context)
anchor_points, anchor_points_values = anchor_points_generator.get(num_anchor=self.n_anchor, duplicate_manager=duplicate_manager, context_manager=self.context_manager, get_scores=True)
print('anchor points')
print(anchor_points)
print(anchor_points_values)
parallel = True
if parallel:
pool = Pool(4)
optimized_points = pool.map(self._parallel_optimization_wrapper, anchor_points)
print('optimized points')
print(optimized_points)
else:
#pass
optimized_points = [apply_optimizer(self.optimizer, a, f=f, df=None, f_df=f_df, duplicate_manager=duplicate_manager, context_manager=self.context_manager, space = self.space) for a in anchor_points]
x_min, fx_min = min(optimized_points, key=lambda t:t[1])
if x_baseline is not None:
f_baseline = f(x_baseline)
if f_baseline < fx_min:
print('baseline was best found')
print(f_baseline)
x_min = x_baseline
fx_min = f_baseline
#if np.asscalar(anchor_points_values[0]) < np.asscalar(fx_min):
#print('anchor_point was best found')
#fx_min = np.atleast_2d(anchor_points_values[0])
#x_min = np.atleast_2d(anchor_points[0])
return x_min, fx_min
def optimize_comparison(self, f=None, df=None, f_df=None, duplicate_manager=None):
"""
Optimizes the input function.
:param f: function to optimize.
:param df: gradient of the function to optimize.
:param f_df: returns both the function to optimize and its gradient.
"""
self.f = f
self.df = df
self.f_df = f_df
## --- Update the optimizer, in case context has beee passed.
self.optimizer = choose_optimizer(self.optimizer_name, self.context_manager.noncontext_bounds)
## --- Selecting the anchor points and removing duplicates
if self.type_anchor_points_logic == max_objective_anchor_points_logic:
anchor_points_generator = ObjectiveAnchorPointsGenerator(self.space, random_design_type, f, self.n_starting)
elif self.type_anchor_points_logic == thompson_sampling_anchor_points_logic:
anchor_points_generator = ThompsonSamplingAnchorPointsGenerator(self.space, sobol_design_type, self.model)
## -- Select the anchor points (with context)
anchor_points, anchor_points_values = anchor_points_generator.get(num_anchor=self.n_anchor,
duplicate_manager=duplicate_manager,
context_manager=self.context_manager,
get_scores=True)
print('anchor points')
print(anchor_points)
print(anchor_points_values)
parallel = True
if parallel:
pool = Pool(4)
optimized_points = pool.map(self._parallel_optimization_wrapper, anchor_points)
print('optimized points')
print(optimized_points)
else:
# pass
optimized_points = [
apply_optimizer(self.optimizer, a, f=f, df=None, f_df=f_df, duplicate_manager=duplicate_manager,
context_manager=self.context_manager, space=self.space) for a in anchor_points]
x_min, fx_min = min(optimized_points, key=lambda t: t[1])
if np.asscalar(anchor_points_values[0]) < np.asscalar(fx_min):
print('anchor_point was best found')
fx_min = np.atleast_2d(anchor_points_values[0])
x_min = np.atleast_2d(anchor_points[0])
# Comparison
print('sgd results')
## --- Update the optimizer, in case context has beee passed.
self.optimizer = choose_optimizer('sgd', self.context_manager.noncontext_bounds)
parallel = True
if parallel:
pool = Pool(4)
optimized_points = pool.map(self._parallel_optimization_wrapper, anchor_points)
print('optimized points')
print(optimized_points)
else:
optimized_points = [
apply_optimizer(self.optimizer, a, f=f, df=None, f_df=f_df, duplicate_manager=duplicate_manager,
context_manager=self.context_manager, space=self.space) for a in anchor_points]
x_min, fx_min = min(optimized_points, key=lambda t: t[1])
if np.asscalar(anchor_points_values[0]) < np.asscalar(fx_min):
print('anchor_point was best found')
fx_min = np.atleast_2d(anchor_points_values[0])
x_min = np.atleast_2d(anchor_points[0])
return x_min, fx_min
def optimize1(self, f=None, df=None, f_df=None, duplicate_manager=None):
"""
Optimizes the input function.
:param f: function to optimize.
:param df: gradient of the function to optimize.
:param f_df: returns both the function to optimize and its gradient.
"""
self.f = f
self.df = df
self.f_df = f_df
## --- Update the optimizer, in case context has beee passed.
self.optimizer = choose_optimizer(self.optimizer_name, self.context_manager.noncontext_bounds)
## --- Selecting the anchor points and removing duplicates
if self.type_anchor_points_logic == max_objective_anchor_points_logic:
anchor_points_generator = ObjectiveAnchorPointsGenerator(self.space, random_design_type, f)
elif self.type_anchor_points_logic == thompson_sampling_anchor_points_logic:
anchor_points_generator = ThompsonSamplingAnchorPointsGenerator(self.space, sobol_design_type, self.model)
## -- Select the anchor points (with context)
anchor_points, anchor_points_values = anchor_points_generator.get(duplicate_manager=duplicate_manager, context_manager=self.context_manager)
## --- Applying local optimizers at the anchor points and update bounds of the optimizer (according to the context)
optimized_points = [apply_optimizer(self.optimizer, a, f=f, df=None, f_df=f_df, duplicate_manager=duplicate_manager, context_manager=self.context_manager, space = self.space) for a in anchor_points]
x_min, fx_min = min(optimized_points, key=lambda t:t[1])
#x_min, fx_min = min([apply_optimizer(self.optimizer, a, f=f, df=None, f_df=f_df, duplicate_manager=duplicate_manager, context_manager=self.context_manager, space = self.space) for a in anchor_points], key=lambda t:t[1])
return x_min, fx_min
def optimize_inner_func(self, f=None, df=None, f_df=None, duplicate_manager=None, n_starting=64, n_anchor=8):
"""
Optimizes the input function.
:param f: function to optimize.
:param df: gradient of the function to optimize.
:param f_df: returns both the function to optimize and its gradient.
"""
self.f = f
self.df = df
self.f_df = f_df
## --- Update the optimizer, in case context has beee passed.
self.inner_optimizer = choose_optimizer(self.inner_optimizer_name, self.context_manager.noncontext_bounds)
## --- Selecting the anchor points and removing duplicates
if self.type_anchor_points_logic == max_objective_anchor_points_logic:
anchor_points_generator = ObjectiveAnchorPointsGenerator(self.space, latin_design_type, f, n_starting)
elif self.type_anchor_points_logic == thompson_sampling_anchor_points_logic:
anchor_points_generator = ThompsonSamplingAnchorPointsGenerator(self.space, sobol_design_type, self.model)
## -- Select the anchor points (with context)
anchor_points, anchor_points_values = anchor_points_generator.get(num_anchor=n_anchor, duplicate_manager=duplicate_manager, context_manager=self.context_manager, get_scores=True)
#print(anchor_points)
## --- Applying local optimizers at the anchor points and update bounds of the optimizer (according to the context)
optimized_points = [apply_optimizer_inner(self.inner_optimizer, a, f=f, df=None, f_df=f_df, duplicate_manager=duplicate_manager, context_manager=self.context_manager, space = self.space) for a in anchor_points]
#print('inner optimized points')
#print(optimized_points)
x_min, fx_min = min(optimized_points, key=lambda t:t[1])
#x_min = np.atleast_2d(anchor_points[0])
#fx_min = np.atleast_2d(anchor_points_values[0])
return x_min, fx_min
def optimize_inner_func2(self, f=None, df=None, f_df=None, duplicate_manager=None, n_starting=64, n_anchor=32):
"""
Optimizes the input function.
:param f: function to optimize.
:param df: gradient of the function to optimize.
:param f_df: returns both the function to optimize and its gradient.
"""
self.f = f
self.df = df
self.f_df = f_df
## --- Update the optimizer, in case context has beee passed.
self.inner_optimizer = choose_optimizer(self.inner_optimizer_name, self.context_manager.noncontext_bounds)
## --- Selecting the anchor points and removing duplicates
if self.type_anchor_points_logic == max_objective_anchor_points_logic:
anchor_points_generator = ObjectiveAnchorPointsGenerator(self.space, latin_design_type, f, n_starting)
elif self.type_anchor_points_logic == thompson_sampling_anchor_points_logic:
anchor_points_generator = ThompsonSamplingAnchorPointsGenerator(self.space, sobol_design_type, self.model)
## -- Select the anchor points (with context)
anchor_points, anchor_points_values = anchor_points_generator.get(num_anchor=n_anchor,
duplicate_manager=duplicate_manager,
context_manager=self.context_manager,
get_scores=True)
## --- Applying local optimizers at the anchor points and update bounds of the optimizer (according to the context)
optimized_points = [
apply_optimizer_inner(self.inner_optimizer, a, f=f, df=None, f_df=f_df, duplicate_manager=duplicate_manager,
context_manager=self.context_manager, space=self.space) for a in anchor_points]
x_min, fx_min = min(optimized_points, key=lambda t: t[1])
print('test begins')
optimized_points2 = optimized_points[0:2]
x_min2, fx_min2 = min(optimized_points2, key=lambda t: t[1])
print(fx_min2-fx_min)
time.sleep(1)
#for i in range(len(optimized_points)):
#if np.array_equal(optimized_points[i][0], x_min):
#print('optimal point was found at anchor point: {}'.format(i))
#break
# x_min = np.atleast_2d(anchor_points[0])
# fx_min = np.atleast_2d(anchor_points_values[0])
return x_min, fx_min
def _parallel_optimization_wrapper(self, x0):
#print(x0)
return apply_optimizer(self.optimizer, x0, self.f, None, self.f_df)
class ContextManager(object):
"""
class to handle the context variable in the optimizer
:param space: design space class from GPyOpt.
:param context: dictionary of variables and their contex values
"""
def __init__ (self, space, context = None):
self.space = space
self.all_index = list(range(space.model_dimensionality))
self.all_index_obj = list(range(len(self.space.config_space_expanded)))
self.context_index = []
self.context_value = []
self.context_index_obj = []
self.nocontext_index_obj= self.all_index_obj
self.noncontext_bounds = self.space.get_bounds()[:]
self.noncontext_index = self.all_index[:]
if context is not None:
#print('context')
## -- Update new context
for context_variable in context.keys():
variable = self.space.find_variable(context_variable)
self.context_index += variable.index_in_model
self.context_index_obj += variable.index_in_objective
self.context_value += variable.objective_to_model(context[context_variable])
## --- Get bounds and index for non context
self.noncontext_index = [idx for idx in self.all_index if idx not in self.context_index]
self.noncontext_bounds = [self.noncontext_bounds[idx] for idx in self.noncontext_index]
## update non context index in objective
self.nocontext_index_obj = [idx for idx in self.all_index_obj if idx not in self.context_index_obj]
def _expand_vector(self,x):
'''
Takes a value x in the subspace of not fixed dimensions and expands it with the values of the fixed ones.
:param x: input vector to be expanded by adding the context values
'''
x = np.atleast_2d(x)
x_expanded = np.zeros((x.shape[0],self.space.model_dimensionality))
x_expanded[:,np.array(self.noncontext_index).astype(int)] = x
x_expanded[:,np.array(self.context_index).astype(int)] = self.context_value
return x_expanded
| 48.524422
| 247
| 0.66264
| 2,334
| 18,876
| 5.095973
| 0.100686
| 0.11098
| 0.044308
| 0.026484
| 0.751724
| 0.7302
| 0.72835
| 0.720111
| 0.711031
| 0.711031
| 0
| 0.005209
| 0.257523
| 18,876
| 388
| 248
| 48.649485
| 0.843453
| 0.228915
| 0
| 0.574879
| 0
| 0
| 0.022909
| 0.001702
| 0
| 0
| 0
| 0
| 0
| 1
| 0.048309
| false
| 0
| 0.038647
| 0.004831
| 0.135266
| 0.120773
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b45cd4c02512e87dfb95b5b451287e391f50f7ec
| 232
|
py
|
Python
|
assignment-3/solvers/__init__.py
|
ybhan/Artificial-Intelligence-Projects
|
f562f4e4bf0093da13b3fb4675c97ea8e02b0ed1
|
[
"MIT"
] | null | null | null |
assignment-3/solvers/__init__.py
|
ybhan/Artificial-Intelligence-Projects
|
f562f4e4bf0093da13b3fb4675c97ea8e02b0ed1
|
[
"MIT"
] | null | null | null |
assignment-3/solvers/__init__.py
|
ybhan/Artificial-Intelligence-Projects
|
f562f4e4bf0093da13b3fb4675c97ea8e02b0ed1
|
[
"MIT"
] | null | null | null |
# COMP3620/6320 Artificial Intelligence
# The Australian National University - 2018
# Miquel Ramirez, Nathan Robinson, Enrico Scala ({enrico.scala,miquel.ramirez}@gmail.com)
from .solver_base import SolvingException, SolverWrapper
| 38.666667
| 89
| 0.814655
| 27
| 232
| 6.962963
| 0.851852
| 0.138298
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.057971
| 0.107759
| 232
| 5
| 90
| 46.4
| 0.850242
| 0.719828
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b465e6a16c59d0d420f146e4aadc4dc946f251fd
| 92
|
py
|
Python
|
yzrpc/templates/project_template/src/services/__init__.py
|
ml444/yz-rpc
|
f3b6cb76dab72e1763d759080854c11aa6ade872
|
[
"Apache-2.0"
] | 5
|
2021-04-28T09:12:04.000Z
|
2021-11-25T13:50:32.000Z
|
yzrpc/templates/project_template/src/services/__init__.py
|
ml444/yz-rpc
|
f3b6cb76dab72e1763d759080854c11aa6ade872
|
[
"Apache-2.0"
] | null | null | null |
yzrpc/templates/project_template/src/services/__init__.py
|
ml444/yz-rpc
|
f3b6cb76dab72e1763d759080854c11aa6ade872
|
[
"Apache-2.0"
] | 2
|
2021-07-27T04:11:51.000Z
|
2022-01-06T09:36:06.000Z
|
#!/usr/bin/python3.7+
# -*- coding:utf-8 -*-
"""
@auth: cml
@date: 2021/2/23
@desc: ...
"""
| 11.5
| 22
| 0.5
| 14
| 92
| 3.285714
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.126582
| 0.141304
| 92
| 7
| 23
| 13.142857
| 0.455696
| 0.869565
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b470b4adffc8648c0343955ff3985b73ab00d688
| 248
|
py
|
Python
|
corehq/ex-submodules/pillowtop/exceptions.py
|
dimagilg/commcare-hq
|
ea1786238eae556bb7f1cbd8d2460171af1b619c
|
[
"BSD-3-Clause"
] | 471
|
2015-01-10T02:55:01.000Z
|
2022-03-29T18:07:18.000Z
|
corehq/ex-submodules/pillowtop/exceptions.py
|
dimagilg/commcare-hq
|
ea1786238eae556bb7f1cbd8d2460171af1b619c
|
[
"BSD-3-Clause"
] | 14,354
|
2015-01-01T07:38:23.000Z
|
2022-03-31T20:55:14.000Z
|
corehq/ex-submodules/pillowtop/exceptions.py
|
dimagilg/commcare-hq
|
ea1786238eae556bb7f1cbd8d2460171af1b619c
|
[
"BSD-3-Clause"
] | 175
|
2015-01-06T07:16:47.000Z
|
2022-03-29T13:27:01.000Z
|
class PillowtopCheckpointReset(Exception):
pass
class PillowNotFoundError(Exception):
pass
class PillowtopIndexingError(Exception):
pass
class PillowConfigError(Exception):
pass
class BulkDocException(Exception):
pass
| 11.809524
| 42
| 0.758065
| 20
| 248
| 9.4
| 0.4
| 0.345745
| 0.382979
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181452
| 248
| 20
| 43
| 12.4
| 0.926108
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
b47c042002ea8bb549e4e68879ab958cee8fe2ee
| 10,317
|
py
|
Python
|
venv/Lib/site-packages/PyQt4/examples/declarative/modelviews/objectlistmodel/objectlistmodel_rc.py
|
prateekfxtd/ns_Startup
|
095a62b3a8c7bf0ff7b767355d57d993bbd2423d
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/PyQt4/examples/declarative/modelviews/objectlistmodel/objectlistmodel_rc.py
|
prateekfxtd/ns_Startup
|
095a62b3a8c7bf0ff7b767355d57d993bbd2423d
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/PyQt4/examples/declarative/modelviews/objectlistmodel/objectlistmodel_rc.py
|
prateekfxtd/ns_Startup
|
095a62b3a8c7bf0ff7b767355d57d993bbd2423d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Resource object code
#
# Created: Sat 2. Mar 10:35:47 2013
# by: The Resource Compiler for PyQt (Qt v4.8.4)
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore
qt_resource_data = "\
\x00\x00\x09\x05\
\x2f\
\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\
\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\
\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\
\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\
\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x0a\x2a\x2a\x0a\
\x2a\x2a\x20\x43\x6f\x70\x79\x72\x69\x67\x68\x74\x20\x28\x43\x29\
\x20\x32\x30\x31\x30\x20\x4e\x6f\x6b\x69\x61\x20\x43\x6f\x72\x70\
\x6f\x72\x61\x74\x69\x6f\x6e\x20\x61\x6e\x64\x2f\x6f\x72\x20\x69\
\x74\x73\x20\x73\x75\x62\x73\x69\x64\x69\x61\x72\x79\x28\x2d\x69\
\x65\x73\x29\x2e\x0a\x2a\x2a\x20\x41\x6c\x6c\x20\x72\x69\x67\x68\
\x74\x73\x20\x72\x65\x73\x65\x72\x76\x65\x64\x2e\x0a\x2a\x2a\x20\
\x43\x6f\x6e\x74\x61\x63\x74\x3a\x20\x4e\x6f\x6b\x69\x61\x20\x43\
\x6f\x72\x70\x6f\x72\x61\x74\x69\x6f\x6e\x20\x28\x71\x74\x2d\x69\
\x6e\x66\x6f\x40\x6e\x6f\x6b\x69\x61\x2e\x63\x6f\x6d\x29\x0a\x2a\
\x2a\x0a\x2a\x2a\x20\x54\x68\x69\x73\x20\x66\x69\x6c\x65\x20\x69\
\x73\x20\x70\x61\x72\x74\x20\x6f\x66\x20\x74\x68\x65\x20\x51\x74\
\x44\x65\x63\x6c\x61\x72\x61\x74\x69\x76\x65\x20\x6d\x6f\x64\x75\
\x6c\x65\x20\x6f\x66\x20\x74\x68\x65\x20\x51\x74\x20\x54\x6f\x6f\
\x6c\x6b\x69\x74\x2e\x0a\x2a\x2a\x0a\x2a\x2a\x20\x24\x51\x54\x5f\
\x42\x45\x47\x49\x4e\x5f\x4c\x49\x43\x45\x4e\x53\x45\x3a\x42\x53\
\x44\x24\x0a\x2a\x2a\x20\x59\x6f\x75\x20\x6d\x61\x79\x20\x75\x73\
\x65\x20\x74\x68\x69\x73\x20\x66\x69\x6c\x65\x20\x75\x6e\x64\x65\
\x72\x20\x74\x68\x65\x20\x74\x65\x72\x6d\x73\x20\x6f\x66\x20\x74\
\x68\x65\x20\x42\x53\x44\x20\x6c\x69\x63\x65\x6e\x73\x65\x20\x61\
\x73\x20\x66\x6f\x6c\x6c\x6f\x77\x73\x3a\x0a\x2a\x2a\x0a\x2a\x2a\
\x20\x22\x52\x65\x64\x69\x73\x74\x72\x69\x62\x75\x74\x69\x6f\x6e\
\x20\x61\x6e\x64\x20\x75\x73\x65\x20\x69\x6e\x20\x73\x6f\x75\x72\
\x63\x65\x20\x61\x6e\x64\x20\x62\x69\x6e\x61\x72\x79\x20\x66\x6f\
\x72\x6d\x73\x2c\x20\x77\x69\x74\x68\x20\x6f\x72\x20\x77\x69\x74\
\x68\x6f\x75\x74\x0a\x2a\x2a\x20\x6d\x6f\x64\x69\x66\x69\x63\x61\
\x74\x69\x6f\x6e\x2c\x20\x61\x72\x65\x20\x70\x65\x72\x6d\x69\x74\
\x74\x65\x64\x20\x70\x72\x6f\x76\x69\x64\x65\x64\x20\x74\x68\x61\
\x74\x20\x74\x68\x65\x20\x66\x6f\x6c\x6c\x6f\x77\x69\x6e\x67\x20\
\x63\x6f\x6e\x64\x69\x74\x69\x6f\x6e\x73\x20\x61\x72\x65\x0a\x2a\
\x2a\x20\x6d\x65\x74\x3a\x0a\x2a\x2a\x20\x20\x20\x2a\x20\x52\x65\
\x64\x69\x73\x74\x72\x69\x62\x75\x74\x69\x6f\x6e\x73\x20\x6f\x66\
\x20\x73\x6f\x75\x72\x63\x65\x20\x63\x6f\x64\x65\x20\x6d\x75\x73\
\x74\x20\x72\x65\x74\x61\x69\x6e\x20\x74\x68\x65\x20\x61\x62\x6f\
\x76\x65\x20\x63\x6f\x70\x79\x72\x69\x67\x68\x74\x0a\x2a\x2a\x20\
\x20\x20\x20\x20\x6e\x6f\x74\x69\x63\x65\x2c\x20\x74\x68\x69\x73\
\x20\x6c\x69\x73\x74\x20\x6f\x66\x20\x63\x6f\x6e\x64\x69\x74\x69\
\x6f\x6e\x73\x20\x61\x6e\x64\x20\x74\x68\x65\x20\x66\x6f\x6c\x6c\
\x6f\x77\x69\x6e\x67\x20\x64\x69\x73\x63\x6c\x61\x69\x6d\x65\x72\
\x2e\x0a\x2a\x2a\x20\x20\x20\x2a\x20\x52\x65\x64\x69\x73\x74\x72\
\x69\x62\x75\x74\x69\x6f\x6e\x73\x20\x69\x6e\x20\x62\x69\x6e\x61\
\x72\x79\x20\x66\x6f\x72\x6d\x20\x6d\x75\x73\x74\x20\x72\x65\x70\
\x72\x6f\x64\x75\x63\x65\x20\x74\x68\x65\x20\x61\x62\x6f\x76\x65\
\x20\x63\x6f\x70\x79\x72\x69\x67\x68\x74\x0a\x2a\x2a\x20\x20\x20\
\x20\x20\x6e\x6f\x74\x69\x63\x65\x2c\x20\x74\x68\x69\x73\x20\x6c\
\x69\x73\x74\x20\x6f\x66\x20\x63\x6f\x6e\x64\x69\x74\x69\x6f\x6e\
\x73\x20\x61\x6e\x64\x20\x74\x68\x65\x20\x66\x6f\x6c\x6c\x6f\x77\
\x69\x6e\x67\x20\x64\x69\x73\x63\x6c\x61\x69\x6d\x65\x72\x20\x69\
\x6e\x0a\x2a\x2a\x20\x20\x20\x20\x20\x74\x68\x65\x20\x64\x6f\x63\
\x75\x6d\x65\x6e\x74\x61\x74\x69\x6f\x6e\x20\x61\x6e\x64\x2f\x6f\
\x72\x20\x6f\x74\x68\x65\x72\x20\x6d\x61\x74\x65\x72\x69\x61\x6c\
\x73\x20\x70\x72\x6f\x76\x69\x64\x65\x64\x20\x77\x69\x74\x68\x20\
\x74\x68\x65\x0a\x2a\x2a\x20\x20\x20\x20\x20\x64\x69\x73\x74\x72\
\x69\x62\x75\x74\x69\x6f\x6e\x2e\x0a\x2a\x2a\x20\x20\x20\x2a\x20\
\x4e\x65\x69\x74\x68\x65\x72\x20\x74\x68\x65\x20\x6e\x61\x6d\x65\
\x20\x6f\x66\x20\x4e\x6f\x6b\x69\x61\x20\x43\x6f\x72\x70\x6f\x72\
\x61\x74\x69\x6f\x6e\x20\x61\x6e\x64\x20\x69\x74\x73\x20\x53\x75\
\x62\x73\x69\x64\x69\x61\x72\x79\x28\x2d\x69\x65\x73\x29\x20\x6e\
\x6f\x72\x0a\x2a\x2a\x20\x20\x20\x20\x20\x74\x68\x65\x20\x6e\x61\
\x6d\x65\x73\x20\x6f\x66\x20\x69\x74\x73\x20\x63\x6f\x6e\x74\x72\
\x69\x62\x75\x74\x6f\x72\x73\x20\x6d\x61\x79\x20\x62\x65\x20\x75\
\x73\x65\x64\x20\x74\x6f\x20\x65\x6e\x64\x6f\x72\x73\x65\x20\x6f\
\x72\x20\x70\x72\x6f\x6d\x6f\x74\x65\x0a\x2a\x2a\x20\x20\x20\x20\
\x20\x70\x72\x6f\x64\x75\x63\x74\x73\x20\x64\x65\x72\x69\x76\x65\
\x64\x20\x66\x72\x6f\x6d\x20\x74\x68\x69\x73\x20\x73\x6f\x66\x74\
\x77\x61\x72\x65\x20\x77\x69\x74\x68\x6f\x75\x74\x20\x73\x70\x65\
\x63\x69\x66\x69\x63\x20\x70\x72\x69\x6f\x72\x20\x77\x72\x69\x74\
\x74\x65\x6e\x0a\x2a\x2a\x20\x20\x20\x20\x20\x70\x65\x72\x6d\x69\
\x73\x73\x69\x6f\x6e\x2e\x0a\x2a\x2a\x0a\x2a\x2a\x20\x54\x48\x49\
\x53\x20\x53\x4f\x46\x54\x57\x41\x52\x45\x20\x49\x53\x20\x50\x52\
\x4f\x56\x49\x44\x45\x44\x20\x42\x59\x20\x54\x48\x45\x20\x43\x4f\
\x50\x59\x52\x49\x47\x48\x54\x20\x48\x4f\x4c\x44\x45\x52\x53\x20\
\x41\x4e\x44\x20\x43\x4f\x4e\x54\x52\x49\x42\x55\x54\x4f\x52\x53\
\x0a\x2a\x2a\x20\x22\x41\x53\x20\x49\x53\x22\x20\x41\x4e\x44\x20\
\x41\x4e\x59\x20\x45\x58\x50\x52\x45\x53\x53\x20\x4f\x52\x20\x49\
\x4d\x50\x4c\x49\x45\x44\x20\x57\x41\x52\x52\x41\x4e\x54\x49\x45\
\x53\x2c\x20\x49\x4e\x43\x4c\x55\x44\x49\x4e\x47\x2c\x20\x42\x55\
\x54\x20\x4e\x4f\x54\x0a\x2a\x2a\x20\x4c\x49\x4d\x49\x54\x45\x44\
\x20\x54\x4f\x2c\x20\x54\x48\x45\x20\x49\x4d\x50\x4c\x49\x45\x44\
\x20\x57\x41\x52\x52\x41\x4e\x54\x49\x45\x53\x20\x4f\x46\x20\x4d\
\x45\x52\x43\x48\x41\x4e\x54\x41\x42\x49\x4c\x49\x54\x59\x20\x41\
\x4e\x44\x20\x46\x49\x54\x4e\x45\x53\x53\x20\x46\x4f\x52\x0a\x2a\
\x2a\x20\x41\x20\x50\x41\x52\x54\x49\x43\x55\x4c\x41\x52\x20\x50\
\x55\x52\x50\x4f\x53\x45\x20\x41\x52\x45\x20\x44\x49\x53\x43\x4c\
\x41\x49\x4d\x45\x44\x2e\x20\x49\x4e\x20\x4e\x4f\x20\x45\x56\x45\
\x4e\x54\x20\x53\x48\x41\x4c\x4c\x20\x54\x48\x45\x20\x43\x4f\x50\
\x59\x52\x49\x47\x48\x54\x0a\x2a\x2a\x20\x4f\x57\x4e\x45\x52\x20\
\x4f\x52\x20\x43\x4f\x4e\x54\x52\x49\x42\x55\x54\x4f\x52\x53\x20\
\x42\x45\x20\x4c\x49\x41\x42\x4c\x45\x20\x46\x4f\x52\x20\x41\x4e\
\x59\x20\x44\x49\x52\x45\x43\x54\x2c\x20\x49\x4e\x44\x49\x52\x45\
\x43\x54\x2c\x20\x49\x4e\x43\x49\x44\x45\x4e\x54\x41\x4c\x2c\x0a\
\x2a\x2a\x20\x53\x50\x45\x43\x49\x41\x4c\x2c\x20\x45\x58\x45\x4d\
\x50\x4c\x41\x52\x59\x2c\x20\x4f\x52\x20\x43\x4f\x4e\x53\x45\x51\
\x55\x45\x4e\x54\x49\x41\x4c\x20\x44\x41\x4d\x41\x47\x45\x53\x20\
\x28\x49\x4e\x43\x4c\x55\x44\x49\x4e\x47\x2c\x20\x42\x55\x54\x20\
\x4e\x4f\x54\x0a\x2a\x2a\x20\x4c\x49\x4d\x49\x54\x45\x44\x20\x54\
\x4f\x2c\x20\x50\x52\x4f\x43\x55\x52\x45\x4d\x45\x4e\x54\x20\x4f\
\x46\x20\x53\x55\x42\x53\x54\x49\x54\x55\x54\x45\x20\x47\x4f\x4f\
\x44\x53\x20\x4f\x52\x20\x53\x45\x52\x56\x49\x43\x45\x53\x3b\x20\
\x4c\x4f\x53\x53\x20\x4f\x46\x20\x55\x53\x45\x2c\x0a\x2a\x2a\x20\
\x44\x41\x54\x41\x2c\x20\x4f\x52\x20\x50\x52\x4f\x46\x49\x54\x53\
\x3b\x20\x4f\x52\x20\x42\x55\x53\x49\x4e\x45\x53\x53\x20\x49\x4e\
\x54\x45\x52\x52\x55\x50\x54\x49\x4f\x4e\x29\x20\x48\x4f\x57\x45\
\x56\x45\x52\x20\x43\x41\x55\x53\x45\x44\x20\x41\x4e\x44\x20\x4f\
\x4e\x20\x41\x4e\x59\x0a\x2a\x2a\x20\x54\x48\x45\x4f\x52\x59\x20\
\x4f\x46\x20\x4c\x49\x41\x42\x49\x4c\x49\x54\x59\x2c\x20\x57\x48\
\x45\x54\x48\x45\x52\x20\x49\x4e\x20\x43\x4f\x4e\x54\x52\x41\x43\
\x54\x2c\x20\x53\x54\x52\x49\x43\x54\x20\x4c\x49\x41\x42\x49\x4c\
\x49\x54\x59\x2c\x20\x4f\x52\x20\x54\x4f\x52\x54\x0a\x2a\x2a\x20\
\x28\x49\x4e\x43\x4c\x55\x44\x49\x4e\x47\x20\x4e\x45\x47\x4c\x49\
\x47\x45\x4e\x43\x45\x20\x4f\x52\x20\x4f\x54\x48\x45\x52\x57\x49\
\x53\x45\x29\x20\x41\x52\x49\x53\x49\x4e\x47\x20\x49\x4e\x20\x41\
\x4e\x59\x20\x57\x41\x59\x20\x4f\x55\x54\x20\x4f\x46\x20\x54\x48\
\x45\x20\x55\x53\x45\x0a\x2a\x2a\x20\x4f\x46\x20\x54\x48\x49\x53\
\x20\x53\x4f\x46\x54\x57\x41\x52\x45\x2c\x20\x45\x56\x45\x4e\x20\
\x49\x46\x20\x41\x44\x56\x49\x53\x45\x44\x20\x4f\x46\x20\x54\x48\
\x45\x20\x50\x4f\x53\x53\x49\x42\x49\x4c\x49\x54\x59\x20\x4f\x46\
\x20\x53\x55\x43\x48\x20\x44\x41\x4d\x41\x47\x45\x2e\x22\x0a\x2a\
\x2a\x20\x24\x51\x54\x5f\x45\x4e\x44\x5f\x4c\x49\x43\x45\x4e\x53\
\x45\x24\x0a\x2a\x2a\x0a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\
\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\
\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\
\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\
\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\x2a\
\x2a\x2a\x2f\x0a\x0a\x69\x6d\x70\x6f\x72\x74\x20\x51\x74\x51\x75\
\x69\x63\x6b\x20\x31\x2e\x30\x0a\x0a\x2f\x2f\x21\x5b\x30\x5d\x0a\
\x4c\x69\x73\x74\x56\x69\x65\x77\x20\x7b\x0a\x20\x20\x20\x20\x77\
\x69\x64\x74\x68\x3a\x20\x31\x30\x30\x3b\x20\x68\x65\x69\x67\x68\
\x74\x3a\x20\x31\x30\x30\x0a\x20\x20\x20\x20\x61\x6e\x63\x68\x6f\
\x72\x73\x2e\x66\x69\x6c\x6c\x3a\x20\x70\x61\x72\x65\x6e\x74\x0a\
\x0a\x20\x20\x20\x20\x6d\x6f\x64\x65\x6c\x3a\x20\x6d\x79\x4d\x6f\
\x64\x65\x6c\x0a\x20\x20\x20\x20\x64\x65\x6c\x65\x67\x61\x74\x65\
\x3a\x20\x52\x65\x63\x74\x61\x6e\x67\x6c\x65\x20\x7b\x0a\x20\x20\
\x20\x20\x20\x20\x20\x20\x68\x65\x69\x67\x68\x74\x3a\x20\x32\x35\
\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x77\x69\x64\x74\x68\x3a\x20\
\x31\x30\x30\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x63\x6f\x6c\x6f\
\x72\x3a\x20\x6d\x6f\x64\x65\x6c\x2e\x6d\x6f\x64\x65\x6c\x44\x61\
\x74\x61\x2e\x63\x6f\x6c\x6f\x72\x0a\x20\x20\x20\x20\x20\x20\x20\
\x20\x54\x65\x78\x74\x20\x7b\x20\x74\x65\x78\x74\x3a\x20\x6e\x61\
\x6d\x65\x20\x7d\x0a\x20\x20\x20\x20\x7d\x0a\x7d\x0a\x2f\x2f\x21\
\x5b\x30\x5d\x0a\
"
qt_resource_name = "\
\x00\x08\
\x0f\xca\x5b\xbc\
\x00\x76\
\x00\x69\x00\x65\x00\x77\x00\x2e\x00\x71\x00\x6d\x00\x6c\
"
qt_resource_struct = "\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| 57
| 96
| 0.727149
| 2,439
| 10,317
| 3.068471
| 0.051251
| 0.14992
| 0.17798
| 0.234099
| 0.667023
| 0.568813
| 0.525922
| 0.493453
| 0.427312
| 0.37961
| 0
| 0.40006
| 0.02336
| 10,317
| 180
| 97
| 57.316667
| 0.342696
| 0.017544
| 0
| 0.048485
| 0
| 0.890909
| 0
| 0
| 0
| 1
| 0.00079
| 0
| 0
| 1
| 0.012121
| false
| 0
| 0.006061
| 0
| 0.018182
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b47ef3e80d6b7f715ea653b06064311987b035cd
| 40
|
py
|
Python
|
tingting.py
|
yaserqaziuva/cs3240-labdemo
|
bc17db0fc4107d0f4524fbc5eaf121a117a96409
|
[
"MIT"
] | null | null | null |
tingting.py
|
yaserqaziuva/cs3240-labdemo
|
bc17db0fc4107d0f4524fbc5eaf121a117a96409
|
[
"MIT"
] | null | null | null |
tingting.py
|
yaserqaziuva/cs3240-labdemo
|
bc17db0fc4107d0f4524fbc5eaf121a117a96409
|
[
"MIT"
] | null | null | null |
# Yaser Qazi (yq4du)
print("tingting")
| 10
| 20
| 0.675
| 5
| 40
| 5.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.029412
| 0.15
| 40
| 3
| 21
| 13.333333
| 0.764706
| 0.45
| 0
| 0
| 0
| 0
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
b48168930bbe2bb695b47d99581f63e11478b98f
| 129
|
py
|
Python
|
forex/admin.py
|
Shokr/Stocks-Screener
|
0b8da91da40b715beaf3a79163b1bdf6ea3be3b9
|
[
"MIT"
] | 1
|
2021-06-28T23:08:51.000Z
|
2021-06-28T23:08:51.000Z
|
forex/admin.py
|
Shokr/Stocks-Screener
|
0b8da91da40b715beaf3a79163b1bdf6ea3be3b9
|
[
"MIT"
] | 40
|
2020-03-06T10:24:55.000Z
|
2022-03-12T00:56:44.000Z
|
forex/admin.py
|
Shokr/Stocks-Screener
|
0b8da91da40b715beaf3a79163b1bdf6ea3be3b9
|
[
"MIT"
] | 3
|
2020-01-19T07:27:24.000Z
|
2021-09-11T10:09:25.000Z
|
from django.contrib import admin
from forex.froms import Currency
# Register your models here.
admin.site.register(Currency)
| 14.333333
| 32
| 0.79845
| 18
| 129
| 5.722222
| 0.722222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.139535
| 129
| 8
| 33
| 16.125
| 0.927928
| 0.20155
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
c30f073fb033a503d24e113794d48e97a857cd31
| 88
|
py
|
Python
|
slave/app/pulls/__init__.py
|
darksigma/traceless
|
eed3a35e90b8bbbf272e1f324e1c28de7afe08da
|
[
"MIT"
] | 1
|
2015-06-19T14:27:52.000Z
|
2015-06-19T14:27:52.000Z
|
slave/app/pulls/__init__.py
|
pratheeknagaraj/securechat
|
eed3a35e90b8bbbf272e1f324e1c28de7afe08da
|
[
"MIT"
] | null | null | null |
slave/app/pulls/__init__.py
|
pratheeknagaraj/securechat
|
eed3a35e90b8bbbf272e1f324e1c28de7afe08da
|
[
"MIT"
] | 1
|
2016-04-09T19:25:11.000Z
|
2016-04-09T19:25:11.000Z
|
from flask import Blueprint
pulls = Blueprint('pulls', __name__)
from . import routes
| 14.666667
| 36
| 0.761364
| 11
| 88
| 5.727273
| 0.636364
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.159091
| 88
| 5
| 37
| 17.6
| 0.851351
| 0
| 0
| 0
| 0
| 0
| 0.056818
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
c31bf247f1306151cec5627d0a7771767e7d85d7
| 2,351
|
py
|
Python
|
tests/permutils/test_stats.py
|
quintant/Permuta
|
4cdc7990e3dc298d0089ba8c48cd8967acd9b81f
|
[
"BSD-3-Clause"
] | 12
|
2015-09-09T02:40:50.000Z
|
2021-06-02T13:40:25.000Z
|
tests/permutils/test_stats.py
|
quintant/Permuta
|
4cdc7990e3dc298d0089ba8c48cd8967acd9b81f
|
[
"BSD-3-Clause"
] | 80
|
2015-12-17T15:00:17.000Z
|
2022-01-25T20:31:54.000Z
|
tests/permutils/test_stats.py
|
quintant/Permuta
|
4cdc7990e3dc298d0089ba8c48cd8967acd9b81f
|
[
"BSD-3-Clause"
] | 19
|
2015-12-16T13:16:10.000Z
|
2021-06-01T14:37:33.000Z
|
from permuta import Av
from permuta.permutils.statistics import PermutationStatistic
def test_distribution_all_perms():
assert sum(PermutationStatistic.inv().distribution_up_to(7), []) == [
1,
1,
1,
1,
1,
2,
2,
1,
1,
3,
5,
6,
5,
3,
1,
1,
4,
9,
15,
20,
22,
20,
15,
9,
4,
1,
1,
5,
14,
29,
49,
71,
90,
101,
101,
90,
71,
49,
29,
14,
5,
1,
1,
6,
20,
49,
98,
169,
259,
359,
455,
531,
573,
573,
531,
455,
359,
259,
169,
98,
49,
20,
6,
1,
]
assert PermutationStatistic.maj().distribution_for_length(8) == [
1,
7,
27,
76,
174,
343,
602,
961,
1415,
1940,
2493,
3017,
3450,
3736,
3836,
3736,
3450,
3017,
2493,
1940,
1415,
961,
602,
343,
174,
76,
27,
7,
1,
]
def test_distribution_av():
assert sum(
PermutationStatistic.des().distribution_up_to(11, Av.from_string("123")), []
) == [
1,
1,
1,
1,
0,
4,
1,
0,
2,
11,
1,
0,
0,
15,
26,
1,
0,
0,
5,
69,
57,
1,
0,
0,
0,
56,
252,
120,
1,
0,
0,
0,
14,
364,
804,
247,
1,
0,
0,
0,
0,
210,
1800,
2349,
502,
1,
0,
0,
0,
0,
42,
1770,
7515,
6455,
1013,
1,
0,
0,
0,
0,
0,
792,
11055,
27940,
16962,
2036,
1,
]
| 13.282486
| 84
| 0.265844
| 206
| 2,351
| 2.975728
| 0.417476
| 0.052202
| 0.044046
| 0.032626
| 0.02447
| 0
| 0
| 0
| 0
| 0
| 0
| 0.395709
| 0.643131
| 2,351
| 176
| 85
| 13.357955
| 0.334923
| 0
| 0
| 0.77907
| 0
| 0
| 0.001276
| 0
| 0
| 0
| 0
| 0
| 0.017442
| 1
| 0.011628
| true
| 0
| 0.011628
| 0
| 0.023256
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
c33ae417974841443f9d3d480f417e86768f558d
| 292
|
py
|
Python
|
own_blockchain_sdk/__init__.py
|
Muncan90/OwnBlockchainSdkPython
|
730b519eca99629d6ee4b006d70b5fde7e7031fa
|
[
"MIT"
] | 1
|
2020-06-23T17:03:03.000Z
|
2020-06-23T17:03:03.000Z
|
own_blockchain_sdk/__init__.py
|
Muncan90/OwnBlockchainSdkPython
|
730b519eca99629d6ee4b006d70b5fde7e7031fa
|
[
"MIT"
] | null | null | null |
own_blockchain_sdk/__init__.py
|
Muncan90/OwnBlockchainSdkPython
|
730b519eca99629d6ee4b006d70b5fde7e7031fa
|
[
"MIT"
] | 1
|
2020-07-09T04:09:20.000Z
|
2020-07-09T04:09:20.000Z
|
from own_blockchain_sdk.crypto import encode64, decode64, encode58, decode58, \
hash, derive_hash, \
generate_wallet, address_from_private_key, wallet_from_private_key, Wallet, sign_message, sign_plain_text, verify_plain_text_signature
from own_blockchain_sdk.transactions import Tx
| 48.666667
| 138
| 0.839041
| 40
| 292
| 5.675
| 0.625
| 0.061674
| 0.14978
| 0.176211
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.030651
| 0.106164
| 292
| 5
| 139
| 58.4
| 0.83908
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
c33e00e451f84e278456f18bebb719f3dcb4ceba
| 135
|
py
|
Python
|
fusion/criterion/mi_estimator/critic/base_critic.py
|
Mrinal18/fusion
|
34e563f2e50139385577c3880c5de11f8a73f220
|
[
"BSD-3-Clause"
] | 14
|
2021-04-05T01:25:12.000Z
|
2022-02-17T19:44:28.000Z
|
fusion/criterion/mi_estimator/critic/base_critic.py
|
Mrinal18/fusion
|
34e563f2e50139385577c3880c5de11f8a73f220
|
[
"BSD-3-Clause"
] | 1
|
2021-07-05T08:32:49.000Z
|
2021-07-05T12:34:57.000Z
|
fusion/criterion/mi_estimator/critic/base_critic.py
|
Mrinal18/fusion
|
34e563f2e50139385577c3880c5de11f8a73f220
|
[
"BSD-3-Clause"
] | 1
|
2022-02-01T21:56:11.000Z
|
2022-02-01T21:56:11.000Z
|
import abc
from torch import Tensor
class ABaseCritic(abc.ABC):
def __call__(self, x: Tensor, y: Tensor) -> Tensor:
pass
| 16.875
| 55
| 0.674074
| 19
| 135
| 4.578947
| 0.684211
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.22963
| 135
| 7
| 56
| 19.285714
| 0.836538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0.2
| 0.4
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 5
|
c35955a44ee17802df94f2fa3143422f91ff3d81
| 638
|
py
|
Python
|
test/tests/dict.py
|
jonco3/dynamic
|
76d10b012a7860595c7d9abbdf542c7d8f2a4d53
|
[
"MIT"
] | 1
|
2020-11-26T23:37:19.000Z
|
2020-11-26T23:37:19.000Z
|
test/tests/dict.py
|
jonco3/dynamic
|
76d10b012a7860595c7d9abbdf542c7d8f2a4d53
|
[
"MIT"
] | null | null | null |
test/tests/dict.py
|
jonco3/dynamic
|
76d10b012a7860595c7d9abbdf542c7d8f2a4d53
|
[
"MIT"
] | null | null | null |
# output: ok
a = {}
assert(len(a) == 0)
for i in range(0, 100):
a[i] = i * 2
assert(len(a) == 100)
for i in range(0, 100):
assert i in a
assert(a[i] == i * 2)
assert 101 not in a
for i in range(0, 100, 2):
del a[i]
assert(len(a) == 50)
for i in range(0, 100):
assert (i in a) == ((i % 2) != 0)
a = {}
for i in range(0, 100):
a[str(i)] = i
assert(len(a) == 100)
for i in range(0, 100):
k = str(i)
assert k in a
assert(a[k] == i)
assert '101' not in a
for i in range(0, 100, 2):
del a[str(i)]
assert(len(a) == 50)
for i in range(0, 100):
assert (str(i) in a) == ((i % 2) != 0)
print('ok')
| 16.789474
| 42
| 0.510972
| 136
| 638
| 2.397059
| 0.139706
| 0.101227
| 0.147239
| 0.269939
| 0.791411
| 0.769939
| 0.739264
| 0.638037
| 0.638037
| 0.638037
| 0
| 0.124183
| 0.280564
| 638
| 37
| 43
| 17.243243
| 0.586057
| 0.015674
| 0
| 0.482759
| 0
| 0
| 0.007987
| 0
| 0
| 0
| 0
| 0
| 0.448276
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.034483
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
c37008ebbba38d989e1e9087d8f9d9d569f9d1e6
| 14,145
|
py
|
Python
|
codes/models/modules/Subnet_constructor.py
|
lin-zhao-resoLve/Symmetric-Enhancement
|
11c1a662020582d1333d11cf5f9c99556ec0f427
|
[
"Apache-2.0"
] | 14
|
2021-09-30T07:05:04.000Z
|
2022-03-31T08:22:39.000Z
|
codes/models/modules/Subnet_constructor.py
|
lin-zhao-resoLve/Symmetric-Enhancement
|
11c1a662020582d1333d11cf5f9c99556ec0f427
|
[
"Apache-2.0"
] | 3
|
2021-11-09T06:52:13.000Z
|
2021-11-20T08:00:46.000Z
|
codes/models/modules/Subnet_constructor.py
|
lin-zhao-resoLve/Symmetric-Enhancement
|
11c1a662020582d1333d11cf5f9c99556ec0f427
|
[
"Apache-2.0"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
import models.modules.module_util as mutil
# from MPNCOV.python import MPNCOV
class DenseBlock(nn.Module):
def __init__(self, channel_in, channel_out, init='xavier', gc=32, bias=True):
super(DenseBlock, self).__init__()
self.conv1 = nn.Conv2d(channel_in, gc, 3, 1, 1, bias=bias)
self.conv2 = nn.Conv2d(channel_in + gc, gc, 3, 1, 1, bias=bias)
self.conv3 = nn.Conv2d(channel_in + 2 * gc, gc, 3, 1, 1, bias=bias)
self.conv4 = nn.Conv2d(channel_in + 3 * gc, gc, 3, 1, 1, bias=bias)
self.conv5 = nn.Conv2d(channel_in + 4 * gc, channel_out, 3, 1, 1, bias=bias)
# self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
self.prelu = nn.PReLU(num_parameters=1, init=0.2)
if init == 'xavier':
mutil.initialize_weights_xavier([self.conv1, self.conv2, self.conv3, self.conv4], 0.1)
else:
mutil.initialize_weights([self.conv1, self.conv2, self.conv3, self.conv4], 0.1)
mutil.initialize_weights(self.conv5, 0)
def forward(self, x):
x1 = self.prelu(self.conv1(x))
x2 = self.prelu(self.conv2(torch.cat((x, x1), 1)))
x3 = self.prelu(self.conv3(torch.cat((x, x1, x2), 1)))
x4 = self.prelu(self.conv4(torch.cat((x, x1, x2, x3), 1)))
x5 = self.conv5(torch.cat((x, x1, x2, x3, x4), 1))
return x5
class FBBlock(nn.Module):
def __init__(self, channel_in, channel_out, init='xavier', gc=64, bias=True):
super(FBBlock, self).__init__()
self.conv1 = nn.Conv2d(channel_in, gc, 3, 1, 1, bias=bias)
self.conv2 = nn.Conv2d(gc, gc, 3, 1, 1, bias=bias)
self.conv3 = nn.Conv2d(channel_in + gc, gc, 3, 1, 1, bias=bias)
self.conv4 = nn.Conv2d(2*gc, gc, 3, 1, 1, bias=bias)
self.conv5 = nn.Conv2d(channel_in + 2 * gc, gc, 3, 1, 1, bias=bias)
self.conv6 = nn.Conv2d(3 * gc, gc, 3, 1, 1, bias=bias)
self.conv7 = nn.Conv2d(3 * gc, channel_out, 1)
# self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
self.prelu = nn.PReLU(num_parameters=1, init=0.2)
if init == 'xavier':
mutil.initialize_weights_xavier([self.conv1, self.conv2, self.conv3, self.conv4, self.conv5, self.conv6], 0.1)
else:
mutil.initialize_weights([self.conv1, self.conv2, self.conv3, self.conv4, self.conv5, self.conv6], 0.1)
mutil.initialize_weights(self.conv7, 0)
def forward(self, x):
x1 = self.prelu(self.conv1(x))
x2 = self.prelu(self.conv2(x1))
x3 = self.prelu(self.conv3(torch.cat((x, x2), 1)))
x4 = self.prelu(self.conv4(torch.cat((x1, x3), 1)))
x5 = self.prelu(self.conv5(torch.cat((x, x2, x4), 1)))
x6 = self.prelu(self.conv6(torch.cat((x1, x3, x5), 1)))
x7 = self.conv7(torch.cat((x2, x4, x6), 1))
return x7
## second-order Channel attention (SOCA)
class SOCA(nn.Module):
def __init__(self, channel, reduction=16):
super(SOCA, self).__init__()
# global average pooling: feature --> point
# self.avg_pool = nn.AdaptiveAvgPool2d(1)
# self.max_pool = nn.AdaptiveMaxPool2d(1)
self.avg_pool = nn.AdaptiveAvgPool2d(1)
# feature channel downscale and upscale --> channel weight
self.conv_du = nn.Sequential(
nn.Conv2d(channel, channel // reduction, 1, padding=0, bias=True),
nn.PReLU(num_parameters=1, init=0.2),
nn.Conv2d(channel // reduction, channel, 1, padding=0, bias=True),
nn.Sigmoid()
# nn.BatchNorm2d(channel)
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c, 1, 1)
y = self.conv_du(y).view(b, c, 1, 1)
return x * y.expand_as(x)
class ResidualBlock_noBN_S0(nn.Module):
'''Residual block w/o BN
---Conv-ReLU-Conv-+-
|________________|
'''
def __init__(self, nf=64):
super(ResidualBlock_noBN_S0, self).__init__()
self.conv1 = nn.Conv2d(nf, nf*2, 3, 1, 1, bias=True)
self.conv2 = nn.Conv2d(nf*2, nf, 3, 1, 1, bias=True)
self.prelu = nn.PReLU(num_parameters=1, init=0.2)
self.so = (SOCA(nf))
# initialization
mutil.initialize_weights([self.conv1, self.conv2, self.so], 0.1)
def forward(self, x):
identity = x
out = self.prelu(self.conv1(x))
out = self.so(self.conv2(out))
return identity + out
class ResidualBlock_AT(nn.Module):
def __init__(self, channel_in, channel_out, init='xavier', gc=64, bias=True):
super(ResidualBlock_AT, self).__init__()
self.conv1 = nn.Conv2d(channel_in, gc, 3, 1, 1, bias=bias)
# self.res_list = nn.ModuleList([mutil.ResidualBlock_noBN(gc) for _ in range(3)])
self.res1 = ResidualBlock_noBN_S0(gc)
self.res2 = ResidualBlock_noBN_S0(gc)
self.res3 = ResidualBlock_noBN_S0(gc)
self.conv2 = nn.Conv2d(gc, channel_out, 3, 1, 1, bias=bias)
self.prelu = nn.PReLU(num_parameters=1, init=0.2)
self.soca = (SOCA(gc))
if init == 'xavier':
mutil.initialize_weights_xavier([self.conv1], 0.1)
else:
mutil.initialize_weights([self.conv1], 0.1)
mutil.initialize_weights(self.conv2, 0)
def forward(self, x):
x1 = self.prelu(self.conv1(x))
x2 = self.res1(x1)
x3 = self.res2(x2)
x4 = self.res3(x3)
x5 = self.conv2(x4)
return x5
class ResidualBlock_AT_skip(nn.Module):
def __init__(self, channel_in, channel_out, init='xavier', gc=64, bias=True):
super(ResidualBlock_AT_skip, self).__init__()
self.conv1 = nn.Conv2d(channel_in, gc, 3, 1, 1, bias=bias)
# self.res_list = nn.ModuleList([mutil.ResidualBlock_noBN(gc) for _ in range(3)])
self.res1 = ResidualBlock_noBN_S0(gc)
self.res2 = ResidualBlock_noBN_S0(gc)
self.res3 = ResidualBlock_noBN_S0(gc)
self.conv2 = nn.Conv2d(gc, channel_out, 3, 1, 1, bias=bias)
self.prelu = nn.PReLU(num_parameters=1, init=0.2)
self.soca = (SOCA(gc))
if init == 'xavier':
mutil.initialize_weights_xavier([self.conv1], 0.1)
else:
mutil.initialize_weights([self.conv1], 0.1)
mutil.initialize_weights(self.conv2, 0)
def forward(self, x):
x1 = self.prelu(self.conv1(x))
x2 = self.res1(x1)
x3 = self.res2(x2+x1)
x4 = self.res3(x3+x2+x1)
x5 = self.conv2(x4)
return x5
class SELayer(nn.Module):
def __init__(self, channel, reduction=16):
super(SELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(channel, channel // reduction, bias=False),
nn.PReLU(num_parameters=1, init=0.2),
nn.Linear(channel // reduction, channel, bias=False),
nn.Sigmoid()
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
return x * y.expand_as(x)
class ResidualBlock_noBN_SE(nn.Module):
'''Residual block w/o BN
---Conv-ReLU-Conv-+-
|________________|
'''
def __init__(self, nf=64):
super(ResidualBlock_noBN_SE, self).__init__()
self.conv1 = nn.Conv2d(nf, nf*2, 3, 1, 1, bias=True)
self.conv2 = nn.Conv2d(nf*2, nf, 3, 1, 1, bias=True)
self.prelu = nn.PReLU(num_parameters=1, init=0.2)
self.se = SELayer(nf)
# initialization
mutil.initialize_weights([self.conv1, self.conv2, self.se], 0.1)
def forward(self, x):
identity = x
out = self.prelu(self.conv1(x))
out = self.se(self.conv2(out))
return identity + out
class ResidualBlock_SE(nn.Module):
def __init__(self, channel_in, channel_out, init='xavier', gc=64, bias=True):
super(ResidualBlock_SE, self).__init__()
self.conv1 = nn.Conv2d(channel_in, gc, 3, 1, 1, bias=bias)
self.res1 = ResidualBlock_noBN_SE(gc)
self.res2 = ResidualBlock_noBN_SE(gc)
self.res3 = ResidualBlock_noBN_SE(gc)
self.conv2 = nn.Conv2d(gc, channel_out, 3, 1, 1, bias=bias)
self.prelu = nn.PReLU(num_parameters=1, init=0.2)
self.se = SELayer(gc)
if init == 'xavier':
mutil.initialize_weights_xavier([self.conv1], 0.1)
else:
mutil.initialize_weights([self.conv1], 0.1)
mutil.initialize_weights(self.conv2, 0)
def forward(self, x):
x1 = self.prelu(self.conv1(x))
x2 = self.res1(x1)
x3 = self.res2(x2)
x4 = self.res3(x3)
x5 = self.conv2(x4)
return x5
class atmLayer(nn.Module):
def __init__(self, channel=6):
super(atmLayer, self).__init__()
self.fc = nn.Sequential(
nn.Conv2d(channel, 64, 3, 1, 1),
nn.PReLU(num_parameters=1, init=0.2),
nn.Conv2d(64, 64, 3, 1, 1),
nn.PReLU(num_parameters=1, init=0.2),
nn.Conv2d(64, 1, 1, 1)
)
mutil.initialize_weights([self.fc], 0.1)
def forward(self, x):
x = self.fc(x)
return x
class ResidualBlock_atm(nn.Module):
def __init__(self, channel_in, channel_out, init='xavier', gc=64, bias=True):
super(ResidualBlock_atm, self).__init__()
self.conv1 = nn.Conv2d(channel_in, gc, 3, 1, 1, bias=bias)
self.res1 = mutil.ResidualBlock_noBN(gc)
self.map1 = atmLayer(channel_in)
self.res2 = mutil.ResidualBlock_noBN(gc)
self.map2 = atmLayer(channel_in)
self.res3 = mutil.ResidualBlock_noBN(gc)
self.map3 = atmLayer(channel_in)
self.conv2 = nn.Conv2d(gc, channel_out, 3, 1, 1, bias=bias)
self.prelu = nn.PReLU(num_parameters=1, init=0.2)
if init == 'xavier':
mutil.initialize_weights_xavier([self.conv1], 0.1)
else:
mutil.initialize_weights([self.conv1], 0.1)
mutil.initialize_weights(self.conv2, 0)
def forward(self, x):
x1 = self.prelu(self.conv1(x))
x2 = self.res1(x1) * self.map1(x)
x3 = self.res2(x2) * self.map2(x)
x4 = self.res3(x3) * self.map3(x)
x5 = self.conv2(x4)
return x5
class ResidualBlock(nn.Module):
def __init__(self, channel_in, channel_out, init='xavier', gc=64, bias=True):
super(ResidualBlock, self).__init__()
self.conv1 = nn.Conv2d(channel_in, gc, 3, 1, 1, bias=bias)
self.res1 = mutil.ResidualBlock_noBN(gc)
self.res2 = mutil.ResidualBlock_noBN(gc)
self.res3 = mutil.ResidualBlock_noBN(gc)
self.conv2 = nn.Conv2d(gc, channel_out, 3, 1, 1, bias=bias)
self.prelu = nn.PReLU(num_parameters=1, init=0.2)
if init == 'xavier':
mutil.initialize_weights_xavier([self.conv1], 0.1)
else:
mutil.initialize_weights([self.conv1], 0.1)
mutil.initialize_weights(self.conv2, 0)
def forward(self, x):
x1 = self.prelu(self.conv1(x))
x2 = self.res1(x1)
x3 = self.res2(x2)
x4 = self.res3(x3)
x5 = self.conv2(x4)
return x5
class ResidualNet(nn.Module):
'''Residual block w/o BN
---Conv-ReLU-Conv-+-
|________________|
'''
def __init__(self, channel_in, channel_out, init='xavier', nf=64, bias=True):
super(ResidualNet, self).__init__()
self.conv1 = nn.Conv2d(channel_in, nf, 3, 1, 1, bias=True)
self.conv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.conv3 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.conv4 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.conv5 = nn.Conv2d(nf, channel_out, 3, 1, 1, bias=True)
self.prelu = nn.PReLU(num_parameters=1, init=0.2)
# initialization
if init == 'xavier':
mutil.initialize_weights_xavier([self.conv1, self.conv2, self.conv3, self.conv4], 0.1)
else:
mutil.initialize_weights([self.conv1, self.conv2, self.conv3, self.conv4], 0.1)
mutil.initialize_weights(self.conv5, 0)
def forward(self, x):
identity = x
out = self.prelu(self.conv1(x))
out = self.prelu(self.conv2(out))
out = self.prelu(self.conv3(out))
out = self.prelu(self.conv4(out))
out = self.conv5(out)
return identity + out
def subnet(net_structure, init='xavier'):
def constructor(channel_in, channel_out):
if net_structure == 'DBNet':
if init == 'xavier':
return DenseBlock(channel_in, channel_out, init)
else:
return DenseBlock(channel_in, channel_out)
elif net_structure == 'ResNet':
if init == 'xavier':
return ResidualNet(channel_in, channel_out, init)
else:
return ResidualNet(channel_in, channel_out)
elif net_structure == 'ResAT2Net':
if init == 'xavier':
return ResidualBlock_AT(channel_in, channel_out, init)
else:
return ResidualBlock_AT(channel_in, channel_out)
elif net_structure == 'ResAT2Net_skip':
if init == 'xavier':
return ResidualBlock_AT_skip(channel_in, channel_out, init)
else:
return ResidualBlock_AT_skip(channel_in, channel_out)
elif net_structure == 'ResNet_SE':
if init == 'xavier':
return ResidualBlock_SE(channel_in, channel_out, init)
else:
return ResidualBlock_SE(channel_in, channel_out)
elif net_structure == 'ResNet_atm':
if init == 'xavier':
return ResidualBlock_atm(channel_in, channel_out, init)
else:
return ResidualBlock_atm(channel_in, channel_out)
elif net_structure == 'FBNet':
if init == 'xavier':
return FBBlock(channel_in, channel_out, init)
else:
return FBBlock(channel_in, channel_out)
else:
return None
return constructor
| 39.401114
| 122
| 0.594344
| 1,980
| 14,145
| 4.065152
| 0.072222
| 0.044726
| 0.011927
| 0.02609
| 0.846068
| 0.811032
| 0.775624
| 0.72158
| 0.650764
| 0.607902
| 0
| 0.052931
| 0.266737
| 14,145
| 358
| 123
| 39.511173
| 0.723101
| 0.055709
| 0
| 0.566553
| 0
| 0
| 0.015202
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095563
| false
| 0
| 0.013652
| 0
| 0.25256
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
5ee434cb8445c7b25537aee5cb2b09397197a2d1
| 136
|
py
|
Python
|
fooof/__init__.py
|
TheCheeseToast/fooof
|
f3f8422af7d87fa73772e083deaf8439ca59908d
|
[
"Apache-2.0"
] | null | null | null |
fooof/__init__.py
|
TheCheeseToast/fooof
|
f3f8422af7d87fa73772e083deaf8439ca59908d
|
[
"Apache-2.0"
] | null | null | null |
fooof/__init__.py
|
TheCheeseToast/fooof
|
f3f8422af7d87fa73772e083deaf8439ca59908d
|
[
"Apache-2.0"
] | null | null | null |
"""FOOOF - Fitting Oscillations & One-Over F"""
from .version import __version__
from .fit import FOOOF
from .group import FOOOFGroup
| 19.428571
| 47
| 0.757353
| 18
| 136
| 5.5
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.154412
| 136
| 6
| 48
| 22.666667
| 0.86087
| 0.301471
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
5eed9366ec865e29912043c56d4f2b7c967f8724
| 177
|
py
|
Python
|
Android/parser/webserver/test.py
|
Bravest-Ptt/Useful-Shell
|
75016ff44f218afce6b885af7b23fb801a7ef2d4
|
[
"Apache-2.0"
] | 1
|
2020-05-31T08:46:45.000Z
|
2020-05-31T08:46:45.000Z
|
Android/parser/webserver/test.py
|
Bravest-Ptt/Useful-Shell
|
75016ff44f218afce6b885af7b23fb801a7ef2d4
|
[
"Apache-2.0"
] | null | null | null |
Android/parser/webserver/test.py
|
Bravest-Ptt/Useful-Shell
|
75016ff44f218afce6b885af7b23fb801a7ef2d4
|
[
"Apache-2.0"
] | null | null | null |
import subprocess
sys_command = "gedit /home/qinsw/pengtian/shell/Useful-Shell/Android/parser/html/html/websocket.html".encode('utf-8')
subprocess.Popen(sys_command, shell=True)
| 59
| 117
| 0.813559
| 26
| 177
| 5.461538
| 0.730769
| 0.140845
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005882
| 0.039548
| 177
| 3
| 118
| 59
| 0.829412
| 0
| 0
| 0
| 0
| 0.333333
| 0.505618
| 0.44382
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
5eef24c797dbd4502e427473033368654c5319fe
| 38,133
|
py
|
Python
|
api/tests/test_views.py
|
posm/osm-export-tool2
|
5a1f4096f1afbe7420363376e6e1e8d42e47e1d1
|
[
"BSD-3-Clause"
] | 2
|
2018-08-31T18:30:28.000Z
|
2018-11-27T01:50:06.000Z
|
api/tests/test_views.py
|
posm/osm-export-tool2
|
5a1f4096f1afbe7420363376e6e1e8d42e47e1d1
|
[
"BSD-3-Clause"
] | null | null | null |
api/tests/test_views.py
|
posm/osm-export-tool2
|
5a1f4096f1afbe7420363376e6e1e8d42e47e1d1
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import json
import logging
import os
import uuid
from unittest import skip
from mock import patch
from django.contrib.auth.models import Group, User
from django.contrib.gis.geos import GEOSGeometry, Polygon
from django.core.files import File
from rest_framework import status
from rest_framework.authtoken.models import Token
from rest_framework.reverse import reverse
from rest_framework.test import APITestCase
from api.pagination import LinkHeaderPagination
from jobs.models import ExportConfig, ExportFormat, ExportProfile, Job
from tasks.models import ExportRun, ExportTask
logger = logging.getLogger(__name__)
class TestJobViewSet(APITestCase):
def setUp(self, ):
self.path = os.path.dirname(os.path.realpath(__file__))
self.group = Group.objects.create(name='TestDefaultExportExtentGroup')
profile = ExportProfile.objects.create(
name='DefaultExportProfile',
max_extent=2500000,
group=self.group
)
self.user = User.objects.create_user(
username='demo', email='demo@demo.com', password='demo'
)
extents = (-3.9, 16.1, 7.0, 27.6)
bbox = Polygon.from_bbox(extents)
the_geom = GEOSGeometry(bbox, srid=4326)
self.job = Job.objects.create(name='TestJob', event='Test Activation',
description='Test description', user=self.user,
the_geom=the_geom)
format = ExportFormat.objects.get(slug='obf')
self.job.formats.add(format)
token = Token.objects.create(user=self.user)
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key,
HTTP_ACCEPT='application/json; version=1.0',
HTTP_ACCEPT_LANGUAGE='en',
HTTP_HOST='testserver')
# create a test config
f = File(open(self.path + '/files/hdm_presets.xml'))
filename = f.name.split('/')[-1]
name = 'Test Configuration File'
self.config = ExportConfig.objects.create(name='Test Preset Config', filename=filename, upload=f, config_type='PRESET', user=self.user)
f.close()
self.assertIsNotNone(self.config)
self.job.configs.add(self.config)
self.tags = [
{
"name": "Telecommunication office",
"key": "office", "value": "telecommunication",
"data_model": "HDM",
"geom_types": ["point", "polygon"],
"groups": ['HDM Presets v2.11', 'Commercial and Economic', 'Telecommunication']
},
{
"name": "Radio or TV Studio",
"key": "amenity", "value": "studio",
"data_model": "OSM",
"geom_types": ["point", "polygon"],
"groups": ['HDM Presets v2.11', 'Commercial and Economic', 'Telecommunication']
},
{
"name": "Telecommunication antenna",
"key": "man_made", "value": "tower",
"data_model": "OSM",
"geom_types": ["point", "polygon"],
"groups": ['HDM Presets v2.11', 'Commercial and Economic', 'Telecommunication']
},
{
"name": "Telecommunication company retail office",
"key": "office", "value": "telecommunication",
"data_model": "OSM",
"geom_types": ["point", "polygon"],
"groups": ['HDM Presets v2.11', 'Commercial and Economic', 'Telecommunication']
}
]
def tearDown(self,):
self.config.delete() # clean up
def test_list(self, ):
expected = '/api/jobs'
url = reverse('api:jobs-list')
self.assertEquals(expected, url)
def test_get_job_detail(self, ):
expected = '/api/jobs/{0}'.format(self.job.uid)
url = reverse('api:jobs-detail', args=[self.job.uid])
self.assertEquals(expected, url)
data = {"uid": str(self.job.uid),
"name": "Test",
"url": 'http://testserver{0}'.format(url),
"description": "Test Description",
"exports": [{"uid": "8611792d-3d99-4c8f-a213-787bc7f3066",
"url": "http://testserver/api/formats/obf",
"name": "OBF Format",
"description": "OSMAnd OBF Export Format."}],
"created_at": "2015-05-21T19:46:37.163749Z",
"updated_at": "2015-05-21T19:46:47.207111Z",
"status": "SUCCESS"}
response = self.client.get(url)
# test the response headers
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(response['Content-Type'], 'application/json; version=1.0')
self.assertEquals(response['Content-Language'], 'en')
# test significant content
self.assertEquals(response.data['uid'], data['uid'])
self.assertEquals(response.data['url'], data['url'])
self.assertEqual(response.data['exports'][0]['url'], data['exports'][0]['url'])
def test_delete_job(self, ):
url = reverse('api:jobs-detail', args=[self.job.uid])
response = self.client.delete(url)
# test the response headers
self.assertEquals(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertEquals(response['Content-Length'], '0')
self.assertEquals(response['Content-Language'], 'en')
def test_delete_no_permissions(self, ):
url = reverse('api:jobs-detail', args=[self.job.uid])
# create another user with token
user = User.objects.create_user(
username='other_user', email='other_user@demo.com', password='demo'
)
token = Token.objects.create(user=user)
# reset the client credentials to the new user
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key,
HTTP_ACCEPT='application/json; version=1.0',
HTTP_ACCEPT_LANGUAGE='en',
HTTP_HOST='testserver')
# try to delete a job belonging to self.user
response = self.client.delete(url)
# test the response headers
self.assertEquals(response.status_code, status.HTTP_403_FORBIDDEN)
@patch('api.views.ExportTaskRunner')
def test_create_job_success(self, mock):
task_runner = mock.return_value
url = reverse('api:jobs-list')
formats = [format.slug for format in ExportFormat.objects.all()]
config_uid = self.config.uid
request_data = {
'name': 'TestJob',
'description': 'Test description',
'event': 'Test Activation',
'xmin': -3.9,
'ymin': 16.1,
'xmax': 7.0,
'ymax': 27.6,
'formats': formats,
'preset': config_uid,
'published': True,
'tags': self.tags
}
response = self.client.post(url, request_data, format='json')
job_uid = response.data['uid']
# test the ExportTaskRunner.run_task(job_id) method gets called.
task_runner.run_task.assert_called_once_with(job_uid=job_uid)
# test the response headers
self.assertEquals(response.status_code, status.HTTP_202_ACCEPTED)
self.assertEquals(response['Content-Type'], 'application/json; version=1.0')
self.assertEquals(response['Content-Language'], 'en')
# test significant response content
self.assertEqual(response.data['exports'][0]['slug'], request_data['formats'][0])
self.assertEqual(response.data['exports'][1]['slug'], request_data['formats'][1])
self.assertEqual(response.data['name'], request_data['name'])
self.assertEqual(response.data['description'], request_data['description'])
self.assertTrue(response.data['published'])
# check we have the correct tags
job = Job.objects.get(uid=job_uid)
tags = job.tags.all()
self.assertIsNotNone(tags)
self.assertEquals(233, len(tags))
@patch('api.views.ExportTaskRunner')
def test_create_job_with_config_success(self, mock):
task_runner = mock.return_value
config_uid = self.config.uid
url = reverse('api:jobs-list')
formats = [format.slug for format in ExportFormat.objects.all()]
request_data = {
'name': 'TestJob',
'description': 'Test description',
'event': 'Test Activation',
'xmin': -3.9,
'ymin': 16.1,
'xmax': 7.0,
'ymax': 27.6,
'formats': formats,
'preset': config_uid,
'transform': '',
'translation': ''
}
response = self.client.post(url, request_data, format='json')
job_uid = response.data['uid']
# test the ExportTaskRunner.run_task(job_id) method gets called.
task_runner.run_task.assert_called_once_with(job_uid=job_uid)
# test the response headers
self.assertEquals(response.status_code, status.HTTP_202_ACCEPTED)
self.assertEquals(response['Content-Type'], 'application/json; version=1.0')
self.assertEquals(response['Content-Language'], 'en')
# test significant response content
self.assertEqual(response.data['exports'][0]['slug'], request_data['formats'][0])
self.assertEqual(response.data['exports'][1]['slug'], request_data['formats'][1])
self.assertEqual(response.data['name'], request_data['name'])
self.assertEqual(response.data['description'], request_data['description'])
self.assertFalse(response.data['published'])
configs = self.job.configs.all()
self.assertIsNotNone(configs[0])
@patch('api.views.ExportTaskRunner')
def test_create_job_with_tags(self, mock):
# delete the existing tags and test adding them with json
self.job.tags.all().delete()
task_runner = mock.return_value
config_uid = self.config.uid
url = reverse('api:jobs-list')
formats = [format.slug for format in ExportFormat.objects.all()]
request_data = {
'name': 'TestJob',
'description': 'Test description',
'event': 'Test Activation',
'xmin': -3.9,
'ymin': 16.1,
'xmax': 7.0,
'ymax': 27.6,
'formats': formats,
# 'preset': config_uid,
'transform': '',
'translate': '',
'tags': self.tags
}
response = self.client.post(url, request_data, format='json')
job_uid = response.data['uid']
# test the ExportTaskRunner.run_task(job_id) method gets called.
task_runner.run_task.assert_called_once_with(job_uid=job_uid)
# test the response headers
self.assertEquals(response.status_code, status.HTTP_202_ACCEPTED)
self.assertEquals(response['Content-Type'], 'application/json; version=1.0')
self.assertEquals(response['Content-Language'], 'en')
# test significant response content
self.assertEqual(response.data['exports'][0]['slug'], request_data['formats'][0])
self.assertEqual(response.data['exports'][1]['slug'], request_data['formats'][1])
self.assertEqual(response.data['name'], request_data['name'])
self.assertEqual(response.data['description'], request_data['description'])
configs = self.job.configs.all()
# self.assertIsNotNone(configs[0])
def test_missing_bbox_param(self, ):
url = reverse('api:jobs-list')
formats = [format.slug for format in ExportFormat.objects.all()]
request_data = {
'name': 'TestJob',
'description': 'Test description',
'event': 'Test Activation',
# 'xmin': -3.9, missing
'ymin': 16.1,
'xmax': 7.0,
'ymax': 27.6,
'formats': formats
}
response = self.client.post(url, request_data)
self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEquals(response['Content-Type'], 'application/json; version=1.0')
self.assertEquals(response['Content-Language'], 'en')
self.assertEquals(['xmin is required.'], response.data['xmin'])
def test_invalid_bbox_param(self, ):
url = reverse('api:jobs-list')
formats = [str(format.uid) for format in ExportFormat.objects.all()]
request_data = {
'name': 'TestJob',
'description': 'Test description',
'event': 'Test Activation',
'xmin': '', # empty
'ymin': 16.1,
'xmax': 7.0,
'ymax': 27.6,
'formats': formats
}
response = self.client.post(url, request_data, format='json')
self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEquals(response['Content-Type'], 'application/json; version=1.0')
self.assertEquals(response['Content-Language'], 'en')
self.assertEquals(['invalid xmin value.'], response.data['xmin'])
def test_invalid_bbox(self, ):
url = reverse('api:jobs-list')
formats = [format.slug for format in ExportFormat.objects.all()]
request_data = {
'name': 'TestJob',
'description': 'Test description',
'event': 'Test Activation',
'xmin': 7.0, # invalid
'ymin': 16.1,
'xmax': 7.0,
'ymax': 27.6,
'formats': formats
}
response = self.client.post(url, request_data)
self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEquals(response['Content-Type'], 'application/json; version=1.0')
self.assertEquals(response['Content-Language'], 'en')
self.assertEquals(['invalid_bounds'], response.data['id'])
def test_lat_lon_bbox(self, ):
url = reverse('api:jobs-list')
formats = [str(format.uid) for format in ExportFormat.objects.all()]
request_data = {
'name': 'TestJob',
'description': 'Test description',
'event': 'Test Activation',
'xmin': -227.14, # invalid
'ymin': 16.1,
'xmax': 7.0,
'ymax': 27.6,
'formats': formats
}
response = self.client.post(url, request_data)
self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEquals(response['Content-Type'], 'application/json; version=1.0')
self.assertEquals(response['Content-Language'], 'en')
self.assertEquals(["Ensure this value is greater than or equal to -180."], response.data['xmin'])
def test_coord_nan(self, ):
url = reverse('api:jobs-list')
formats = [format.slug for format in ExportFormat.objects.all()]
request_data = {
'name': 'TestJob',
'description': 'Test description',
'event': 'Test Activation',
'xmin': 'xyz', # invalid
'ymin': 16.1,
'xmax': 7.0,
'ymax': 27.6,
'formats': formats
}
response = self.client.post(url, request_data)
self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEquals(response['Content-Type'], 'application/json; version=1.0')
self.assertEquals(response['Content-Language'], 'en')
self.assertEquals(['invalid xmin value.'], response.data['xmin'])
def test_inverted_coords(self, ):
url = reverse('api:jobs-list')
formats = [format.slug for format in ExportFormat.objects.all()]
request_data = {
'name': 'TestJob',
'description': 'Test description',
'event': 'Test Activation',
'xmin': 7.0, # inverted
'ymin': 16.1,
'xmax': -3.9, # inverted
'ymax': 27.6,
'formats': formats
}
response = self.client.post(url, request_data)
self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEquals(response['Content-Type'], 'application/json; version=1.0')
self.assertEquals(response['Content-Language'], 'en')
self.assertEquals(['inverted_coordinates'], response.data['id'])
def test_empty_string_param(self, ):
url = reverse('api:jobs-list')
formats = [format.slug for format in ExportFormat.objects.all()]
request_data = {
'name': 'TestJob',
'description': '', # empty
'event': 'Test Activation',
'xmin': -3.9,
'ymin': 16.1,
'xmax': 7.0,
'ymax': 27.6,
'formats': formats
}
response = self.client.post(url, request_data)
self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEquals(response['Content-Type'], 'application/json; version=1.0')
self.assertEquals(response['Content-Language'], 'en')
self.assertEquals(['This field may not be blank.'], response.data['description'])
def test_missing_format_param(self, ):
url = reverse('api:jobs-list')
request_data = {
'name': 'TestJob',
'description': 'Test description',
'event': 'Test Activation',
'xmin': -3.9,
'ymin': 16.1,
'xmax': 7.0,
'ymax': 27.6,
# 'formats': '', # missing
}
response = self.client.post(url, request_data)
self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEquals(response['Content-Type'], 'application/json; version=1.0')
self.assertEquals(response['Content-Language'], 'en')
self.assertEquals(['Select an export format.'], response.data['formats'])
def test_invalid_format_param(self, ):
url = reverse('api:jobs-list')
request_data = {
'name': 'TestJob',
'description': 'Test description',
'event': 'Test Activation',
'xmin': -3.9,
'ymin': 16.1,
'xmax': 7.0,
'ymax': 27.6,
'formats': '', # invalid
}
response = self.client.post(url, request_data)
self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEquals(response['Content-Type'], 'application/json; version=1.0')
self.assertEquals(response['Content-Language'], 'en')
self.assertIsNotNone(response.data['formats'])
def test_no_matching_format_slug(self, ):
url = reverse('api:jobs-list')
request_data = {
'name': 'TestJob',
'description': 'Test description',
'event': 'Test Activation',
'xmin': -3.9,
'ymin': 16.1,
'xmax': 7.0,
'ymax': 27.6,
'formats': ['broken-format-one', 'broken-format-two']
}
response = self.client.post(url, request_data)
self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEquals(response['Content-Type'], 'application/json; version=1.0')
self.assertEquals(response['Content-Language'], 'en')
self.assertEquals(response.data['formats'], ['invalid export format.'])
@patch('api.views.ExportTaskRunner')
def test_get_correct_region(self, mock):
task_runner = mock.return_value
url = reverse('api:jobs-list')
formats = [format.slug for format in ExportFormat.objects.all()]
# job extent spans africa / asia but greater intersection with asia
request_data = {
'name': 'TestJob',
'description': 'Test description',
'event': 'Test Activation',
'xmin': 36.90,
'ymin': 13.54,
'xmax': 48.52,
'ymax': 20.24,
'formats': formats
}
response = self.client.post(url, request_data, format='json')
job_uid = response.data['uid']
# test the ExportTaskRunner.run_task(job_id) method gets called.
task_runner.run_task.assert_called_once_with(job_uid=job_uid)
# test the response headers
self.assertEquals(response.status_code, status.HTTP_202_ACCEPTED)
self.assertEquals(response['Content-Type'], 'application/json; version=1.0')
self.assertEquals(response['Content-Language'], 'en')
# test significant response content
self.assertEqual(response.data['exports'][0]['slug'], request_data['formats'][0])
self.assertEqual(response.data['exports'][1]['slug'], request_data['formats'][1])
self.assertEqual(response.data['name'], request_data['name'])
self.assertEqual(response.data['description'], request_data['description'])
# test the region
region = response.data['region']
self.assertIsNotNone(region)
self.assertEquals(region['name'], 'Central Asia/Middle East')
def test_invalid_region(self, ):
url = reverse('api:jobs-list')
formats = [format.slug for format in ExportFormat.objects.all()]
# job outside any region
request_data = {
'name': 'TestJob',
'description': 'Test description',
'event': 'Test Activation',
'xmin': 2.74,
'ymin': 47.66,
'xmax': 11.61,
'ymax': 54.24,
'formats': formats
}
response = self.client.post(url, request_data)
self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEquals(response['Content-Type'], 'application/json; version=1.0')
self.assertEquals(response['Content-Language'], 'en')
self.assertEquals(['invalid_region'], response.data['id'])
def test_extents_too_large(self, ):
url = reverse('api:jobs-list')
formats = [format.slug for format in ExportFormat.objects.all()]
# job outside any region
request_data = {
'name': 'TestJob',
'description': 'Test description',
'event': 'Test Activation',
'xmin': -40,
'ymin': -10,
'xmax': 40,
'ymax': 20,
'formats': formats
}
response = self.client.post(url, request_data)
self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEquals(response['Content-Type'], 'application/json; version=1.0')
self.assertEquals(response['Content-Language'], 'en')
self.assertEquals(['invalid_extents'], response.data['id'])
class TestBBoxSearch(APITestCase):
"""
Test cases for testing bounding box searches.
"""
@patch('api.views.ExportTaskRunner')
def setUp(self, mock):
task_runner = mock.return_value
url = reverse('api:jobs-list')
# create dummy user
Group.objects.create(name='TestDefaultExportExtentGroup')
self.user = User.objects.create_user(
username='demo', email='demo@demo.com', password='demo'
)
# setup token authentication
token = Token.objects.create(user=self.user)
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key,
HTTP_ACCEPT='application/json; version=1.0',
HTTP_ACCEPT_LANGUAGE='en',
HTTP_HOST='testserver')
# pull out the formats
formats = [format.slug for format in ExportFormat.objects.all()]
# create test jobs
extents = [(-3.9, 16.1, 7.0, 27.6), (36.90, 13.54, 48.52, 20.24),
(-71.79, -49.57, -67.14, -46.16), (-61.27, -6.49, -56.20, -2.25),
(-11.61, 32.07, -6.42, 36.31), (-10.66, 5.81, -2.45, 11.83),
(47.26, 34.58, 52.92, 39.15), (90.00, 11.28, 95.74, 17.02)]
for extent in extents:
request_data = {
'name': 'TestJob',
'description': 'Test description',
'event': 'Test Activation',
'xmin': extent[0],
'ymin': extent[1],
'xmax': extent[2],
'ymax': extent[3],
'formats': formats
}
response = self.client.post(url, request_data, format='json')
self.assertEquals(status.HTTP_202_ACCEPTED, response.status_code)
self.assertEquals(8, len(Job.objects.all()))
LinkHeaderPagination.page_size = 2
def test_bbox_search_success(self, ):
url = reverse('api:jobs-list')
extent = (-79.5, -16.16, 7.40, 52.44)
param = 'bbox={0},{1},{2},{3}'.format(extent[0], extent[1], extent[2], extent[3])
response = self.client.get('{0}?{1}'.format(url, param))
self.assertEquals(status.HTTP_206_PARTIAL_CONTENT, response.status_code)
self.assertEquals(2, len(response.data)) # 8 jobs in total but response is paginated
def test_list_jobs_no_bbox(self, ):
url = reverse('api:jobs-list')
response = self.client.get(url)
self.assertEquals(status.HTTP_206_PARTIAL_CONTENT, response.status_code)
self.assertEquals(response['Content-Type'], 'application/json; version=1.0')
self.assertEquals(response['Content-Language'], 'en')
self.assertEquals(response['Link'], '<http://testserver/api/jobs?page=2>; rel="next"')
self.assertEquals(2, len(response.data)) # 8 jobs in total but response is paginated
def test_bbox_search_missing_params(self, ):
url = reverse('api:jobs-list')
param = 'bbox=' # missing params
response = self.client.get('{0}?{1}'.format(url, param))
self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEquals(response['Content-Type'], 'application/json; version=1.0')
self.assertEquals(response['Content-Language'], 'en')
self.assertEquals('missing_bbox_parameter', response.data['id'])
def test_bbox_missing_coord(self, ):
url = reverse('api:jobs-list')
extent = (-79.5, -16.16, 7.40) # one missing
param = 'bbox={0},{1},{2}'.format(extent[0], extent[1], extent[2])
response = self.client.get('{0}?{1}'.format(url, param))
self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEquals(response['Content-Type'], 'application/json; version=1.0')
self.assertEquals(response['Content-Language'], 'en')
self.assertEquals('missing_bbox_parameter', response.data['id'])
class TestPagination(APITestCase):
pass
class TestExportRunViewSet(APITestCase):
"""
Test cases for ExportRunViewSet
"""
def setUp(self, ):
Group.objects.create(name='TestDefaultExportExtentGroup')
self.user = User.objects.create(username='demo', email='demo@demo.com', password='demo')
token = Token.objects.create(user=self.user)
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key,
HTTP_ACCEPT='application/json; version=1.0',
HTTP_ACCEPT_LANGUAGE='en',
HTTP_HOST='testserver')
extents = (-3.9, 16.1, 7.0, 27.6)
bbox = Polygon.from_bbox(extents)
the_geom = GEOSGeometry(bbox, srid=4326)
self.job = Job.objects.create(name='TestJob',
description='Test description', user=self.user,
the_geom=the_geom)
self.job_uid = str(self.job.uid)
self.run = ExportRun.objects.create(job=self.job, user=self.user)
self.run_uid = str(self.run.uid)
def test_retrieve_run(self, ):
expected = '/api/runs/{0}'.format(self.run_uid)
url = reverse('api:runs-detail', args=[self.run_uid])
self.assertEquals(expected, url)
response = self.client.get(url)
self.assertIsNotNone(response)
result = response.data
# make sure we get the correct uid back out
self.assertEquals(self.run_uid, result[0].get('uid'))
def test_list_runs(self, ):
expected = '/api/runs'
url = reverse('api:runs-list')
self.assertEquals(expected, url)
query = '{0}?job_uid={1}'.format(url, self.job.uid)
response = self.client.get(query)
self.assertIsNotNone(response)
result = response.data
# make sure we get the correct uid back out
self.assertEquals(1, len(result))
self.assertEquals(self.run_uid, result[0].get('uid'))
class TestExportConfigViewSet(APITestCase):
"""
Test cases for ExportConfigViewSet
"""
def setUp(self, ):
self.path = os.path.dirname(os.path.realpath(__file__))
Group.objects.create(name='TestDefaultExportExtentGroup')
self.user = User.objects.create(username='demo', email='demo@demo.com', password='demo')
bbox = Polygon.from_bbox((-7.96, 22.6, -8.14, 27.12))
the_geom = GEOSGeometry(bbox, srid=4326)
self.job = Job.objects.create(name='TestJob',
description='Test description', user=self.user,
the_geom=the_geom)
self.uid = self.job.uid
# setup token authentication
token = Token.objects.create(user=self.user)
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key,
HTTP_ACCEPT='application/json; version=1.0',
HTTP_ACCEPT_LANGUAGE='en',
HTTP_HOST='testserver')
def test_create_config(self, ):
url = reverse('api:configs-list')
path = os.path.dirname(os.path.realpath(__file__))
f = File(open(path + '/files/Example Transform.sql', 'r'))
name = 'Test Export Config'
response = self.client.post(url, {'name': name, 'upload': f, 'config_type': 'TRANSFORM', 'published': True}, format='multipart')
data = response.data
uid = data['uid']
saved_config = ExportConfig.objects.get(uid=uid)
self.assertIsNotNone(saved_config)
self.assertEquals(name, saved_config.name)
self.assertTrue(saved_config.published)
self.assertEquals('example_transform.sql', saved_config.filename)
self.assertEquals('text/plain', saved_config.content_type)
saved_config.delete()
def test_delete_no_permissions(self, ):
"""
Test deletion of configuration when the user has no object permissions.
"""
post_url = reverse('api:configs-list')
path = os.path.dirname(os.path.realpath(__file__))
f = File(open(path + '/files/hdm_presets.xml', 'r'))
name = 'Test Export Preset'
response = self.client.post(post_url, {'name': name, 'upload': f, 'config_type': 'PRESET', 'published': True}, format='multipart')
data = response.data
uid = data['uid']
saved_config = ExportConfig.objects.get(uid=uid)
self.assertIsNotNone(saved_config)
self.assertEquals(name, saved_config.name)
self.assertTrue(saved_config.published)
self.assertEquals('hdm_presets.xml', saved_config.filename)
self.assertEquals('application/xml', saved_config.content_type)
delete_url = reverse('api:configs-detail', args=[uid])
# create another user with token
user = User.objects.create_user(
username='other_user', email='other_user@demo.com', password='demo'
)
token = Token.objects.create(user=user)
# reset the client credentials to the new user
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key,
HTTP_ACCEPT='application/json; version=1.0',
HTTP_ACCEPT_LANGUAGE='en',
HTTP_HOST='testserver')
# try to delete a configuration belonging to self.user
response = self.client.delete(delete_url)
# test the response headers
self.assertEquals(response.status_code, status.HTTP_403_FORBIDDEN)
saved_config.delete()
def test_invalid_config_type(self, ):
url = reverse('api:configs-list')
path = os.path.dirname(os.path.realpath(__file__))
f = open(path + '/files/Example Transform.sql', 'r')
self.assertIsNotNone(f)
response = self.client.post(url, {'upload': f, 'config_type': 'TRANSFORM-WRONG'}, format='multipart')
self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)
def test_invalid_preset(self, ):
url = reverse('api:configs-list')
path = os.path.dirname(os.path.realpath(__file__))
f = open(path + '/files/invalid_hdm_presets.xml', 'r')
self.assertIsNotNone(f)
response = self.client.post(url, {'name': 'Invalid Preset', 'upload': f, 'config_type': 'PRESET'}, format='multipart')
self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)
def test_invalid_name(self, ):
url = reverse('api:configs-list')
path = os.path.dirname(os.path.realpath(__file__))
f = open(path + '/files/Example Transform.sql', 'r')
self.assertIsNotNone(f)
response = self.client.post(url, {'upload': f, 'config_type': 'TRANSFORM'}, format='multipart')
self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEquals(response.data['name'], ['This field is required.'])
def test_invalid_upload(self, ):
url = reverse('api:configs-list')
response = self.client.post(url, {'upload': '', 'config_type': 'TRANSFORM-WRONG'}, format='multipart')
self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)
@skip('Transform not implemented.')
def test_update_config(self, ):
url = reverse('api:configs-list')
# create an initial config we can then update..
path = os.path.dirname(os.path.realpath(__file__))
f = File(open(path + '/files/Example Transform.sql', 'r'))
name = 'Test Export Config'
response = self.client.post(url, {'name': name, 'upload': f, 'config_type': 'TRANSFORM'}, format='multipart')
data = response.data
saved_uid = data['uid']
saved_config = ExportConfig.objects.get(uid=saved_uid)
# update the config
url = reverse('api:configs-detail', args=[saved_uid])
f = File(open(path + '/files/hdm_presets.xml', 'r'))
updated_name = 'Test Export Config Updated'
response = self.client.put(url, {'name': updated_name, 'upload': f, 'config_type': 'PRESET'}, format='multipart')
data = response.data
updated_uid = data['uid']
self.assertEquals(saved_uid, updated_uid) # check its the same uid
updated_config = ExportConfig.objects.get(uid=updated_uid)
self.assertIsNotNone(updated_config)
self.assertEquals('hdm_presets.xml', updated_config.filename)
self.assertEquals('application/xml', updated_config.content_type)
self.assertEquals('Test Export Config Updated', updated_config.name)
updated_config.delete()
try:
f = File(open(path + '/files/Example Transform.sql', 'r'))
except IOError:
pass # expected.. old file has been deleted during update.
class TestExportTaskViewSet(APITestCase):
"""
Test cases for ExportTaskViewSet
"""
def setUp(self, ):
self.path = os.path.dirname(os.path.realpath(__file__))
Group.objects.create(name='TestDefaultExportExtentGroup')
self.user = User.objects.create(username='demo', email='demo@demo.com', password='demo')
bbox = Polygon.from_bbox((-7.96, 22.6, -8.14, 27.12))
the_geom = GEOSGeometry(bbox, srid=4326)
self.job = Job.objects.create(name='TestJob',
description='Test description', user=self.user,
the_geom=the_geom)
# setup token authentication
token = Token.objects.create(user=self.user)
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key,
HTTP_ACCEPT='application/json; version=1.0',
HTTP_ACCEPT_LANGUAGE='en',
HTTP_HOST='testserver')
self.run = ExportRun.objects.create(job=self.job)
self.celery_uid = str(uuid.uuid4())
self.task = ExportTask.objects.create(run=self.run, name='Shapefile Export',
celery_uid=self.celery_uid, status='SUCCESS')
self.task_uid = str(self.task.uid)
def test_retrieve(self, ):
expected = '/api/tasks/{0}'.format(self.task_uid)
url = reverse('api:tasks-detail', args=[self.task_uid])
self.assertEquals(expected, url)
response = self.client.get(url)
self.assertIsNotNone(response)
self.assertEquals(200, response.status_code)
result = json.dumps(response.data)
data = json.loads(result)
# make sure we get the correct uid back out
self.assertEquals(self.task_uid, data[0].get('uid'))
def test_list(self, ):
expected = '/api/tasks'.format(self.task_uid)
url = reverse('api:tasks-list')
self.assertEquals(expected, url)
response = self.client.get(url)
self.assertIsNotNone(response)
self.assertEquals(200, response.status_code)
result = json.dumps(response.data)
data = json.loads(result)
# should only be one task in the list
self.assertEquals(1, len(data))
# make sure we get the correct uid back out
self.assertEquals(self.task_uid, data[0].get('uid'))
| 44.809636
| 143
| 0.596124
| 4,277
| 38,133
| 5.197101
| 0.094693
| 0.084218
| 0.059385
| 0.058575
| 0.799082
| 0.763182
| 0.741227
| 0.724987
| 0.710725
| 0.685937
| 0
| 0.02407
| 0.266777
| 38,133
| 850
| 144
| 44.862353
| 0.770923
| 0.05528
| 0
| 0.657821
| 0
| 0
| 0.190981
| 0.014494
| 0
| 0
| 0
| 0
| 0.219274
| 1
| 0.057263
| false
| 0.01257
| 0.022346
| 0
| 0.087989
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
6f039823adc1ecaf384a349c676b625abf54c7f1
| 4,929
|
py
|
Python
|
tests/test_validator.py
|
benoit9126/drf-recaptcha
|
959613c41585e500c42bfd74ced05e5f096282d9
|
[
"MIT"
] | null | null | null |
tests/test_validator.py
|
benoit9126/drf-recaptcha
|
959613c41585e500c42bfd74ced05e5f096282d9
|
[
"MIT"
] | null | null | null |
tests/test_validator.py
|
benoit9126/drf-recaptcha
|
959613c41585e500c42bfd74ced05e5f096282d9
|
[
"MIT"
] | null | null | null |
from unittest import mock
import pytest
from rest_framework.serializers import ValidationError
from drf_recaptcha.client import RecaptchaResponse
from drf_recaptcha.validators import ReCaptchaV2Validator, ReCaptchaV3Validator
@pytest.mark.parametrize(
("validator_class", "params"),
[
(ReCaptchaV2Validator, {}),
(ReCaptchaV3Validator, {"action": "test_action", "required_score": 0.4}),
],
)
def test_recaptcha_validator_get_response_success(validator_class, params):
validator = validator_class(secret_key="TEST_SECRET_KEY", **params)
assert isinstance(validator.get_response("test_token"), RecaptchaResponse)
@pytest.mark.parametrize(
("validator_class", "params"),
[
(ReCaptchaV2Validator, {}),
(ReCaptchaV3Validator, {"action": "test_action", "required_score": 0.4}),
],
)
def test_recaptcha_validator_get_response_fail(validator_class, params):
validator = validator_class(secret_key="TEST_SECRET_KEY", **params)
assert isinstance(validator.get_response("test_token"), RecaptchaResponse)
@pytest.mark.parametrize(
("validator_class", "params", "response"),
[
(ReCaptchaV2Validator, {}, RecaptchaResponse(is_valid=True)),
(
ReCaptchaV3Validator,
{"action": "test_action", "required_score": 0.4},
RecaptchaResponse(
is_valid=True, extra_data={"score": 0.6, "action": "test_action"}
),
),
],
)
def test_recaptcha_validator_call_success(validator_class, params, response):
validator = validator_class(secret_key="TEST_SECRET_KEY", **params)
validator.get_response = mock.Mock(return_value=response)
try:
validator("test_token")
except ValidationError:
pytest.fail("Validation is not passed")
@pytest.mark.parametrize(
("validator_class", "params", "response", "error"),
[
(
ReCaptchaV2Validator,
{},
RecaptchaResponse(is_valid=False),
"[ErrorDetail(string='Error verifying reCAPTCHA, please try again.', code='captcha_invalid')]",
),
(
ReCaptchaV2Validator,
{},
RecaptchaResponse(
is_valid=True, extra_data={"score": 0.6, "action": "test_action"}
),
"[ErrorDetail(string='Error verifying reCAPTCHA, please try again.', code='captcha_error')]",
),
(
ReCaptchaV3Validator,
{"action": "test_action", "required_score": 0.4},
RecaptchaResponse(is_valid=False),
"[ErrorDetail(string='Error verifying reCAPTCHA, please try again.', code='captcha_invalid')]",
),
(
ReCaptchaV3Validator,
{"action": "test_action", "required_score": 0.4},
RecaptchaResponse(is_valid=True),
"[ErrorDetail(string='Error verifying reCAPTCHA, please try again.', code='captcha_error')]",
),
(
ReCaptchaV3Validator,
{"action": "test_action", "required_score": 0.4},
RecaptchaResponse(is_valid=True, extra_data={"score": 0.3}),
"[ErrorDetail(string='Error verifying reCAPTCHA, please try again.', code='captcha_invalid')]",
),
(
ReCaptchaV3Validator,
{"action": "test_action", "required_score": 0.4},
RecaptchaResponse(is_valid=True, extra_data={"score": 0.5}),
"[ErrorDetail(string='Error verifying reCAPTCHA, please try again.', code='captcha_invalid')]",
),
(
ReCaptchaV3Validator,
{"action": "test_action", "required_score": 0.4},
RecaptchaResponse(
is_valid=True, extra_data={"score": 0.5, "action": "other_action"}
),
"[ErrorDetail(string='Error verifying reCAPTCHA, please try again.', code='captcha_invalid')]",
),
],
)
def test_recaptcha_validator_call_fail(validator_class, params, response, error):
validator = validator_class(secret_key="TEST_SECRET_KEY", **params)
validator.get_response = mock.Mock(return_value=response)
with pytest.raises(ValidationError) as exc_info:
validator("test_token")
assert str(exc_info.value) == error
@pytest.mark.parametrize(
("validator_class", "params"),
[
(ReCaptchaV2Validator, {}),
(ReCaptchaV3Validator, {"action": "test_action", "required_score": 0.4}),
],
)
def test_recaptcha_validator_set_context(validator_class, params, settings):
settings.DRF_RECAPTCHA_TESTING = True
validator = validator_class(secret_key="TEST_SECRET_KEY", **params)
assert validator.recaptcha_client_ip == ""
serializer_field = mock.Mock(
context={"request": mock.Mock(META={"HTTP_X_FORWARDED_FOR": "4.3.2.1"})}
)
validator("test_token", serializer_field)
assert validator.recaptcha_client_ip == "4.3.2.1"
| 35.717391
| 107
| 0.640495
| 480
| 4,929
| 6.325
| 0.177083
| 0.06917
| 0.057971
| 0.106719
| 0.797431
| 0.724308
| 0.724308
| 0.705534
| 0.705534
| 0.705534
| 0
| 0.013914
| 0.227227
| 4,929
| 137
| 108
| 35.978102
| 0.783145
| 0
| 0
| 0.601695
| 0
| 0
| 0.26618
| 0.070197
| 0
| 0
| 0
| 0
| 0.042373
| 1
| 0.042373
| false
| 0.008475
| 0.042373
| 0
| 0.084746
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
6f1931d551e3c84db6b4ec3803ff0fdadaf3d25a
| 6,123
|
py
|
Python
|
schedule.py
|
IkhwanFikri1997/Exam-Schedule-Generation
|
33b8a7faf714cd05552faa5b8ed2a3717eafbb72
|
[
"MIT"
] | null | null | null |
schedule.py
|
IkhwanFikri1997/Exam-Schedule-Generation
|
33b8a7faf714cd05552faa5b8ed2a3717eafbb72
|
[
"MIT"
] | null | null | null |
schedule.py
|
IkhwanFikri1997/Exam-Schedule-Generation
|
33b8a7faf714cd05552faa5b8ed2a3717eafbb72
|
[
"MIT"
] | null | null | null |
import xlsxwriter
import display
import string
isfilled = {}
color = ['black', 'blue', 'brown', 'cyan', 'green', 'lime', 'magenta', 'navy', 'orange', 'pink', 'purple', 'silver', 'white', 'yellow']
workbook = xlsxwriter.Workbook('Exam_Revision_Schedule.xlsx')
worksheet = workbook.add_worksheet()
Bold = workbook.add_format({'bold': True})
worksheet.write('B1','Monday', Bold)
worksheet.write('C1','Tuesday', Bold)
worksheet.write('D1','Wednesday', Bold)
worksheet.write('E1','Thursday', Bold)
worksheet.write('F1','Friday', Bold)
worksheet.write('G1','Saturday', Bold)
worksheet.write('H1','Sunday', Bold)
colList = ['B','C','D','E','F','G','H']
shleep_format = workbook.add_format({'bg_color':'#808080','border_color':'#000080'})
school_format = workbook.add_format({'bg_color':'#FF0000','border_color':'#008000'})
lesson_format = []
n = 0
lesson_format.append(workbook.add_format({'bg_color':color[n],'border_color':'#008000'}))
def MakeHourlySchedule(slst,slnd,mnst,mnnd,tdst,tdnd,wdst,wdnd,thst,thnd,frst,frnd,stst,stnd,lesson):
for i in range (8):
for j in range(26):
isfilled[string.ascii_uppercase[i]+str(j)] = False
for x in range(24):
worksheet.write(('A' + str(x + 2)),str(x)+':00')
isfilled['A'+ str(x+1)] = True
for h in range(0,7):
if slst > slnd:
for l in range(0, slnd):
worksheet.write((colList[h] + str(l + 2)),'',shleep_format)
isfilled[colList[h] + str(l+1)] = True
for i in range(slst, 24):
worksheet.write((colList[h] + str(i + 2)),'',shleep_format)
isfilled[colList[h] + str(i+1)] = True
if slst < slnd:
for l in range(slst, slnd):
worksheet.write((colList[h] + str(l + 2)),'',shleep_format)
isfilled[colList[h] + str(l+1)] = True
for a in range(mnst, mnnd):
worksheet.write(('B' + str(a + 2)),'',school_format)
isfilled['B' + str(a+1)] = True
for b in range(tdst, tdnd):
worksheet.write(('C' + str(b + 2)),'',school_format)
isfilled['C' + str(b+1)] = True
for c in range(wdst, wdnd):
worksheet.write(('D' + str(c + 2)),'',school_format)
isfilled['D' + str(c+1)] = True
for d in range(thst, thnd):
worksheet.write(('E' + str(d + 2)),'',school_format)
isfilled['E' + str(d+1)] = True
for e in range(frst, frnd):
worksheet.write(('F' + str(e + 2)),'',school_format)
isfilled['F' + str(e+1)] = True
for f in range(stst, stnd):
worksheet.write(('G' + str(f + 2)),'',school_format)
isfilled['G' + str(f+1)] = True
for k in range (0,len(lesson)):
for m in range (1, (lesson[k]+1)):
lessondone = False
for i in range(7,1,-1):
for j in range(24,1,-1):
if (lessondone == False and isfilled[string.ascii_uppercase[i]+str(j)] == False):
print(m)
worksheet.write((string.ascii_uppercase[i]+str(j+1)),'',lesson_format[k])
isfilled[string.ascii_uppercase[i]+str(j)] = True
lessondone = True
n = k+1
lesson_format.append(workbook.add_format({'bg_color':color[n],'border_color':'#008000'}))
print(color[n])
workbook.close()
def MakeDetailedSchedule(slst,slnd,mnst,mnnd,tdst,tdnd,wdst,wdnd,thst,thnd,frst,frnd,stst,stnd,lesson):
for i in range (8):
for j in range(50):
isfilled[string.ascii_uppercase[i]+str(j)] = False
for x in range(48):
if x % 2 == 0:
worksheet.write(('A' + str(x + 2)),str(x//2)+':00')
isfilled['A'+ str(x+1)] = True
elif x % 2 == 1:
worksheet.write(('A' + str(x + 2)),str((x-1)//2)+':30')
isfilled['A'+ str(x+1)] = True
for h in range(0,7):
if slst > slnd:
for l in range(0, slnd*2):
worksheet.write((colList[h] + str(l + 2)),'',shleep_format)
isfilled[colList[h] + str(l+1)] = True
for i in range(slst*2, 48):
worksheet.write((colList[h] + str(i + 2)),'',shleep_format)
isfilled[colList[h] + str(i+1)] = True
if slst < slnd:
for l in range(slst*2, slnd*2):
worksheet.write((colList[h] + str(l + 2)),'',shleep_format)
isfilled[colList[h] + str(l+1)] = True
for a in range(mnst*2, mnnd*2):
worksheet.write(('B' + str(a + 2)),'',school_format)
isfilled['B' + str(a+1)] = True
for b in range(tdst*2, tdnd*2):
worksheet.write(('C' + str(b + 2)),'',school_format)
isfilled['C' + str(b+1)] = True
for c in range(wdst*2, wdnd*2):
worksheet.write(('D' + str(c + 2)),'',school_format)
isfilled['D' + str(c+1)] = True
for d in range(thst*2, thnd*2):
worksheet.write(('E' + str(d + 2)),'',school_format)
isfilled['E' + str(d+1)] = True
for e in range(frst*2, frnd*2):
worksheet.write(('F' + str(e + 2)),'',school_format)
isfilled['F' + str(e+1)] = True
for f in range(stst*2, stnd*2):
worksheet.write(('G' + str(f + 2)),'',school_format)
isfilled['G' + str(f+1)] = True
for k in range (0,len(lesson)):
for m in range (1, (lesson[k]+1)*2):
lessondone = False
for i in range(7,1,-1):
for j in range(48,1,-1):
if (lessondone == False and isfilled[string.ascii_uppercase[i]+str(j)] == False):
print(m)
worksheet.write((string.ascii_uppercase[i]+str(j+1)),'',lesson_format[k])
isfilled[string.ascii_uppercase[i]+str(j)] = True
lessondone = True
n = k+1
lesson_format.append(workbook.add_format({'bg_color':color[n],'border_color':'#008000'}))
print(color[n])
workbook.close()
| 41.938356
| 136
| 0.529642
| 846
| 6,123
| 3.770686
| 0.13357
| 0.074608
| 0.045141
| 0.078997
| 0.796552
| 0.796552
| 0.777743
| 0.770846
| 0.748276
| 0.748276
| 0
| 0.037572
| 0.287114
| 6,123
| 146
| 137
| 41.938356
| 0.693242
| 0
| 0
| 0.582677
| 0
| 0
| 0.060378
| 0.004516
| 0
| 0
| 0
| 0
| 0
| 1
| 0.015748
| false
| 0
| 0.023622
| 0
| 0.03937
| 0.031496
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
6f6c6527f36e7fb0f0afdf47372f4b5a5a7899ea
| 21
|
py
|
Python
|
christian.py
|
evydibble/EngineeringDesign
|
44838c6bf807efeea6c791bd13737218b9b81b7f
|
[
"MIT"
] | null | null | null |
christian.py
|
evydibble/EngineeringDesign
|
44838c6bf807efeea6c791bd13737218b9b81b7f
|
[
"MIT"
] | null | null | null |
christian.py
|
evydibble/EngineeringDesign
|
44838c6bf807efeea6c791bd13737218b9b81b7f
|
[
"MIT"
] | 7
|
2018-12-19T01:35:44.000Z
|
2019-01-10T13:59:40.000Z
|
print ("helloworld")
| 21
| 21
| 0.714286
| 3
| 21
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.047619
| 21
| 1
| 21
| 21
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0.47619
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
48a5f97c7400fcb0479970d3e626f5c5de4ce64f
| 3,682
|
py
|
Python
|
task.py
|
technetbytes/Nested-Object-Serialization
|
86dc7812c2002010247af9f4edabaf29c78c3be9
|
[
"MIT"
] | null | null | null |
task.py
|
technetbytes/Nested-Object-Serialization
|
86dc7812c2002010247af9f4edabaf29c78c3be9
|
[
"MIT"
] | null | null | null |
task.py
|
technetbytes/Nested-Object-Serialization
|
86dc7812c2002010247af9f4edabaf29c78c3be9
|
[
"MIT"
] | null | null | null |
import json
from converter import datetime_converter
from status import Status
class Task:
def __init__(self, task_name, task_id, message, conditions):
self.task_name = task_name
self.task_id = task_id
self.conditions = conditions
self.message = message
def __iter__(self):
yield from {
"task_name": self.task_name,
"task_id": self.task_id,
"conditions": self.conditions,
"message": self.message
}.items()
def __str__(self):
return json.dumps(self.to_json(), ensure_ascii=False, default = datetime_converter)
def __repr__(self):
return self.__str__()
def toJSON(self):
return json.dumps(self, default=lambda o: o.__dict__,
sort_keys=True, indent=4)
def to_json(self):
to_return = {"task_name": self.task_name, "task_id": self.task_id, "message": self.message, "conditions":self.conditions}
statuses = []
for status in self.conditions:
print("checking status type ->",type(status))
# print(" to_json status type", )
if isinstance(status, dict):
from collections import namedtuple
x = namedtuple("ObjectName", status.keys())(*status.values())
conditions = []
print("printing x data",x)
s = Status(x.id, x.status_name, x.status_datetime, x.message)
statuses.append(s.to_json())
if isinstance(status,Status):
#x = namedtuple("ObjectName", status.keys())(*status.values())
#s = Status(status.id, status.status_name, status.status_datetime, status.message)
statuses.append(status.to_json())
# # statuses = []
# # for status in self.conditions:
# # statuses.append(status.__dict__)
# #statuses = {}
# #for key, status in self.conditions.items():
# # single_status = []
# # for status_set in status:
# # single_status.append(status_set.__dict__)
# # statuses[key] = single_status
to_return["conditions"] = statuses
return to_return
# import json
# from task_store.converter import datetime_converter
# class Task:
# def __init__(self, task_name, task_id, message, conditions):
# self.task_name = task_name
# self.task_id = task_id
# self.conditions = conditions
# self.message = message
# def __iter__(self):
# yield from {
# "task_name": self.task_name,
# "task_id": self.task_id,
# "conditions": self.conditions,
# "message": self.message
# }.items()
# def __str__(self):
# return json.dumps(self.to_json(), ensure_ascii=False, default = datetime_converter)
# def __repr__(self):
# return self.__str__()
# def toJSON(self):
# return json.dumps(self, default=lambda o: o.__dict__,
# sort_keys=True, indent=4)
# def to_json(self):
# to_return = {"task_name": self.task_name, "task_id": self.task_id, "message": self.message}
# statuses = []
# for status in self.conditions:
# statuses.append(status.__dict__)
# #statuses = {}
# #for key, status in self.conditions.items():
# # single_status = []
# # for status_set in status:
# # single_status.append(status_set.__dict__)
# # statuses[key] = single_status
# to_return["conditions"] = statuses
# return to_return
| 35.403846
| 129
| 0.572787
| 402
| 3,682
| 4.927861
| 0.141791
| 0.056537
| 0.04846
| 0.064614
| 0.766784
| 0.766784
| 0.750126
| 0.706714
| 0.706714
| 0.706714
| 0
| 0.000789
| 0.311787
| 3,682
| 104
| 130
| 35.403846
| 0.780979
| 0.492395
| 0
| 0
| 0
| 0
| 0.068622
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0.102564
| 0.076923
| 0.384615
| 0.051282
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
48acb9b399d09e2e4fb3dde5930cb61b03c38394
| 109
|
py
|
Python
|
medseg/models/segmentation_models/__init__.py
|
cherise215/Cooperative_Training_and_Latent_Space_Data_Augmentation
|
f5a987fb4babb891a41116e934a9ce6432e0d803
|
[
"Apache-2.0"
] | 18
|
2021-06-20T11:38:08.000Z
|
2022-01-04T11:53:10.000Z
|
medseg/models/segmentation_models/__init__.py
|
cherise215/Cooperative_Training_and_Latent_Space_Data_Augmentation
|
f5a987fb4babb891a41116e934a9ce6432e0d803
|
[
"Apache-2.0"
] | 1
|
2021-10-04T07:12:27.000Z
|
2021-12-06T20:54:46.000Z
|
medseg/models/segmentation_models/__init__.py
|
cherise215/Cooperative_Training_and_Latent_Space_Data_Augmentation
|
f5a987fb4babb891a41116e934a9ce6432e0d803
|
[
"Apache-2.0"
] | 2
|
2021-09-30T18:25:48.000Z
|
2022-03-14T17:16:41.000Z
|
# Created by cc215 at 05/05/19
# Enter feature description here
# Enter scenario name here
# Enter steps here
| 27.25
| 32
| 0.770642
| 18
| 109
| 4.666667
| 0.722222
| 0.214286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 0.174312
| 109
| 4
| 33
| 27.25
| 0.833333
| 0.926606
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
d2905b005d20a53cba6ef5eba12d791aa88cd1f6
| 41
|
py
|
Python
|
kivymd/effects/fadingedge/__init__.py
|
vhn0912/KivyMD
|
2f6f2b78a3c1f9ff15d18ede24fd034a8db5371d
|
[
"MIT"
] | 668
|
2018-08-31T12:38:18.000Z
|
2020-07-31T21:29:10.000Z
|
kivymd/effects/fadingedge/__init__.py
|
vhn0912/KivyMD
|
2f6f2b78a3c1f9ff15d18ede24fd034a8db5371d
|
[
"MIT"
] | 377
|
2018-10-23T15:46:29.000Z
|
2020-08-01T14:03:36.000Z
|
kivymd/effects/fadingedge/__init__.py
|
vhn0912/KivyMD
|
2f6f2b78a3c1f9ff15d18ede24fd034a8db5371d
|
[
"MIT"
] | 275
|
2018-09-04T19:27:51.000Z
|
2020-07-31T01:14:48.000Z
|
from .fadingedge import FadingEdgeEffect
| 20.5
| 40
| 0.878049
| 4
| 41
| 9
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097561
| 41
| 1
| 41
| 41
| 0.972973
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
d2b472f9e9a27f7e72edeb5c7d63b20a18ad95d6
| 43
|
py
|
Python
|
python/src/log/token_exception.py
|
laxian/shell
|
4de76b413e806d52571f1a6900fdf00e70f4f1a5
|
[
"Apache-2.0"
] | 6
|
2018-01-13T17:29:25.000Z
|
2022-03-13T15:33:52.000Z
|
python/src/log/token_exception.py
|
laxian/shell
|
4de76b413e806d52571f1a6900fdf00e70f4f1a5
|
[
"Apache-2.0"
] | null | null | null |
python/src/log/token_exception.py
|
laxian/shell
|
4de76b413e806d52571f1a6900fdf00e70f4f1a5
|
[
"Apache-2.0"
] | 3
|
2018-06-03T10:28:42.000Z
|
2021-08-09T13:32:55.000Z
|
class TokenException(Exception):
pass
| 10.75
| 32
| 0.744186
| 4
| 43
| 8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.186047
| 43
| 4
| 33
| 10.75
| 0.914286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
d2c8c4e60374844fb4ca8bc854f31a975955baec
| 20
|
py
|
Python
|
baler/__init__.py
|
isabella232/baler
|
db4f09dd2c7729b2df5268c87ad3b4cb43396abf
|
[
"MIT"
] | 18
|
2015-01-14T00:19:50.000Z
|
2021-10-21T22:48:08.000Z
|
baler/__init__.py
|
paypal/baler
|
db4f09dd2c7729b2df5268c87ad3b4cb43396abf
|
[
"MIT"
] | 1
|
2021-02-23T10:25:10.000Z
|
2021-02-23T10:25:10.000Z
|
baler/__init__.py
|
isabella232/baler
|
db4f09dd2c7729b2df5268c87ad3b4cb43396abf
|
[
"MIT"
] | 9
|
2015-01-12T16:54:56.000Z
|
2021-06-10T15:14:20.000Z
|
from baler import *
| 10
| 19
| 0.75
| 3
| 20
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 20
| 1
| 20
| 20
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
8273805bd559ff19996f467fc461c865f970b301
| 332
|
py
|
Python
|
test cases/unit/91 devenv/test-devenv.py
|
andriyor/meson
|
f9bfeb2add70973113ab4a98454a5c5d7e3a26ae
|
[
"Apache-2.0"
] | null | null | null |
test cases/unit/91 devenv/test-devenv.py
|
andriyor/meson
|
f9bfeb2add70973113ab4a98454a5c5d7e3a26ae
|
[
"Apache-2.0"
] | null | null | null |
test cases/unit/91 devenv/test-devenv.py
|
andriyor/meson
|
f9bfeb2add70973113ab4a98454a5c5d7e3a26ae
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/python
import os
from pathlib import Path
assert os.environ['MESON_DEVENV'] == '1'
assert os.environ['MESON_PROJECT_NAME'] == 'devenv'
assert os.environ['TEST_A'] == '1'
assert os.environ['TEST_B'] == '0+1+2+3+4'
from mymod.mod import hello
assert hello == 'world'
from mymod2.mod2 import hello
assert hello() == 42
| 20.75
| 51
| 0.701807
| 54
| 332
| 4.222222
| 0.537037
| 0.140351
| 0.263158
| 0.175439
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.038194
| 0.13253
| 332
| 15
| 52
| 22.133333
| 0.753472
| 0.051205
| 0
| 0
| 0
| 0
| 0.203822
| 0
| 0
| 0
| 0
| 0
| 0.6
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
82945189fe4f47d7eceb6f8a549dad11b567833d
| 41
|
py
|
Python
|
models/__init__.py
|
killf/remove_glasses
|
e73eec238686fa539cff66905a086fb26329b253
|
[
"MIT"
] | null | null | null |
models/__init__.py
|
killf/remove_glasses
|
e73eec238686fa539cff66905a086fb26329b253
|
[
"MIT"
] | null | null | null |
models/__init__.py
|
killf/remove_glasses
|
e73eec238686fa539cff66905a086fb26329b253
|
[
"MIT"
] | null | null | null |
from .networks import define_D, define_G
| 20.5
| 40
| 0.829268
| 7
| 41
| 4.571429
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121951
| 41
| 1
| 41
| 41
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
82a228099e127f10947f580b359e0f7a3160f962
| 262
|
py
|
Python
|
dlocal/utils/dates.py
|
GoPreki/DlocalSDK
|
6c3f001454fcd6c8f1b5bec6b042777f5ab99b7d
|
[
"MIT"
] | null | null | null |
dlocal/utils/dates.py
|
GoPreki/DlocalSDK
|
6c3f001454fcd6c8f1b5bec6b042777f5ab99b7d
|
[
"MIT"
] | null | null | null |
dlocal/utils/dates.py
|
GoPreki/DlocalSDK
|
6c3f001454fcd6c8f1b5bec6b042777f5ab99b7d
|
[
"MIT"
] | null | null | null |
from datetime import datetime
def now_in_isoformat():
return datetime.utcnow().isoformat()[:-3] + 'Z'
def isoformat_to_timestamp(isoformat_date, date_format='%Y-%m-%dT%H:%M:%S.%f%z'):
return datetime.strptime(isoformat_date, date_format).timestamp()
| 26.2
| 81
| 0.729008
| 38
| 262
| 4.815789
| 0.578947
| 0.153005
| 0.185792
| 0.251366
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004274
| 0.10687
| 262
| 9
| 82
| 29.111111
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0.087786
| 0.083969
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0.2
| 0.4
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 5
|
82cb71a8ca4209fb10e248bc9376b17de3152b7e
| 6,176
|
py
|
Python
|
chapter05_CNN/5.6_alexnet.py
|
JessyLee/Jessy_Dive_into_DL_Pytorch
|
40b7921637b13507057f41485d928f3b59cc6f6a
|
[
"MIT"
] | null | null | null |
chapter05_CNN/5.6_alexnet.py
|
JessyLee/Jessy_Dive_into_DL_Pytorch
|
40b7921637b13507057f41485d928f3b59cc6f6a
|
[
"MIT"
] | null | null | null |
chapter05_CNN/5.6_alexnet.py
|
JessyLee/Jessy_Dive_into_DL_Pytorch
|
40b7921637b13507057f41485d928f3b59cc6f6a
|
[
"MIT"
] | null | null | null |
# import time
# import torch
# from torch import nn, optim
# import torchvision
#
# import sys
# sys.path.append("..")
# import d2lzh_pytorch as d2l
# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
#
# print(torch.__version__)
# print(torchvision.__version__)
# print(device)
#
# class AlexNet(nn.Module):
# def __init__(self):
# super(AlexNet, self).__init__()
# self.conv = nn.Sequential(
# nn.Conv2d(1, 96, 11, 4),
# nn.ReLU(),
# nn.MaxPool2d(3, 2),
#
# nn.Conv2d(96, 256, 5, 1, 2),
# nn.ReLU(),
# nn.MaxPool2d(3, 2),
#
# nn.Conv2d(256, 384, 3, 1, 1),
# nn.ReLU(),
#
# nn.Conv2d(384, 384, 3, 1, 1),
# nn.ReLU(),
#
# nn.Conv2d(384, 256, 3, 1, 1),
# nn.ReLU(),
# nn.MaxPool2d(3, 2))
#
# self.fc = nn.Sequential(
# nn.Linear(256*5*5, 4096),
# nn.ReLU(),
# nn.Dropout(0.5),
#
# nn.Linear(4096, 4096),
# nn.ReLU(),
# nn.Dropout(0.5),
#
# nn.Linear(4096, 10))
#
# def forward(self, img):
# feature = self.conv(img)
# output = self.fc(feature.view(img.shape[0], -1))
# return output
#
# def load_data_fashion_mnist(batch_size, resize=None,
# root='~/Datasets/FashionMNIST'):
# trans = []
# if resize:
# trans.append(torchvision.transforms.Resize(
# size=resize))
# trans.append(torchvision.transforms.ToTensor)
#
# transform = torchvision.transforms.Compose(trans)
# mnist_train = torchvision.datasets.FashionMNIST(
# root=root, train=True, download=True, transform=transform)
# mnist_test = torchvision.datasets.FashionMNIST(
# root=root, train=False, download=True, transform=transform)
# torch.util
# train_iter = torch.utils.data.DataLoader(
# mnist_train, batch_size=batch_size, shuffle=True, num_workers=1)
# test_iter = torch.utils.data.DataLoader(
# mnist_test, batch_size=batch_size, shuffle=True, num_workers=1)
#
# return train_iter, test_iter
#
#
#
# net = AlexNet()
# batch_size = 128
# train_iter, test_iter = load_data_fashion_mnist(batch_size=batch_size, resize=224)
#
# lr, num_epochs = 0.001, 5
# optimizer = torch.optim.Adam(net.parameters(), lr=lr)
# d2l.train_ch5(net, train_iter, test_iter, batch_size, optimizer, device, num_epochs)
import time
import torch
from torch import nn, optim
import torchvision
import sys
sys.path.append("..")
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(torch.__version__)
print(torchvision.__version__)
print(device)
class AlexNet(nn.Module):
def __init__(self):
super(AlexNet, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(1, 96, 11, 4), # in_channels, out_channels, kernel_size, stride, padding
nn.ReLU(),
nn.MaxPool2d(3, 2), # kernel_size, stride
# 减小卷积窗口,使用填充为2来使得输入与输出的高和宽一致,且增大输出通道数
nn.Conv2d(96, 256, 5, 1, 2),
nn.ReLU(),
nn.MaxPool2d(3, 2),
# 连续3个卷积层,且使用更小的卷积窗口。除了最后的卷积层外,进一步增大了输出通道数。
# 前两个卷积层后不使用池化层来减小输入的高和宽
nn.Conv2d(256, 384, 3, 1, 1),
nn.ReLU(),
nn.Conv2d(384, 384, 3, 1, 1),
nn.ReLU(),
nn.Conv2d(384, 256, 3, 1, 1),
nn.ReLU(),
nn.MaxPool2d(3, 2)
)
# 这里全连接层的输出个数比LeNet中的大数倍。使用丢弃层来缓解过拟合
self.fc = nn.Sequential(
nn.Linear(256*5*5, 4096),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(4096, 4096),
nn.ReLU(),
nn.Dropout(0.5),
# 输出层。由于这里使用Fashion-MNIST,所以用类别数为10,而非论文中的1000
nn.Linear(4096, 10),
)
def forward(self, img):
feature = self.conv(img)
output = self.fc(feature.view(img.shape[0], -1))
return output
# 本函数已保存在d2lzh_pytorch包中方便以后使用
def load_data_fashion_mnist(batch_size, resize=None, root='~/Datasets/FashionMNIST'):
"""Download the fashion mnist dataset and then load into memory."""
trans = []
if resize:
trans.append(torchvision.transforms.Resize(size=resize))
trans.append(torchvision.transforms.ToTensor())
transform = torchvision.transforms.Compose(trans)
mnist_train = torchvision.datasets.FashionMNIST(root=root, train=True, download=True, transform=transform)
mnist_test = torchvision.datasets.FashionMNIST(root=root, train=False, download=True, transform=transform)
train_iter = torch.utils.data.DataLoader(mnist_train, batch_size=batch_size, shuffle=True, num_workers=4)
test_iter = torch.utils.data.DataLoader(mnist_test, batch_size=batch_size, shuffle=False, num_workers=4)
return train_iter, test_iter
def train_ch5(net, train_iter, test_iter, batch_size, optimizer, device, num_epochs):
net = net.to(device)
print("training on ", device)
loss = torch.nn.CrossEntropyLoss()
batch_count = 0
for epoch in range(num_epochs):
train_l_sum, train_acc_sum, n, start = 0.0, 0.0, 0, time.time()
for X, y in train_iter:
X = X.to(device)
y = y.to(device)
y_hat = net(X)
l = loss(y_hat, y)
optimizer.zero_grad()
l.backward()
optimizer.step()
train_l_sum += l.cpu().item()
train_acc_sum += (y_hat.argmax(dim=1) == y).sum().cpu().item()
n += y.shape[0]
batch_count += 1
test_acc = evaluate_accuracy(test_iter, net)
print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f, time %.1f sec'
% (epoch + 1, train_l_sum / batch_count, train_acc_sum / n, test_acc, time.time() - start))
net = AlexNet()
print(net)
batch_size = 128
# 如出现“out of memory”的报错信息,可减小batch_size或resize
train_iter, test_iter = load_data_fashion_mnist(batch_size, resize=224)
lr, num_epochs = 0.001, 5
optimizer = torch.optim.Adam(net.parameters(), lr=lr)
train_ch5(net, train_iter, test_iter, batch_size, optimizer, device, num_epochs)
| 32.505263
| 110
| 0.603627
| 791
| 6,176
| 4.543616
| 0.201011
| 0.045075
| 0.031163
| 0.033111
| 0.753478
| 0.740679
| 0.735392
| 0.735392
| 0.730106
| 0.725654
| 0
| 0.048743
| 0.259229
| 6,176
| 190
| 111
| 32.505263
| 0.736831
| 0.440576
| 0
| 0.135802
| 0
| 0.012346
| 0.03244
| 0.006845
| 0
| 0
| 0
| 0
| 0
| 1
| 0.049383
| false
| 0
| 0.061728
| 0
| 0.148148
| 0.074074
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
82ceb2ebc1a8bc1ef05e2d65e7880f7791ff9d53
| 115
|
py
|
Python
|
reverseCoding/is-42-the-answer-of-everything/gen.py
|
anirudhkannanvp/HACKERRANK-REVERSE-CODING-
|
1d454d7d403ab68a667c34dba9158ebb72bdda4c
|
[
"MIT"
] | 1
|
2018-09-21T16:13:27.000Z
|
2018-09-21T16:13:27.000Z
|
reverseCoding/is-42-the-answer-of-everything/gen.py
|
anirudhkannanvp/HACKERRANK-REVERSE-CODING-
|
1d454d7d403ab68a667c34dba9158ebb72bdda4c
|
[
"MIT"
] | null | null | null |
reverseCoding/is-42-the-answer-of-everything/gen.py
|
anirudhkannanvp/HACKERRANK-REVERSE-CODING-
|
1d454d7d403ab68a667c34dba9158ebb72bdda4c
|
[
"MIT"
] | null | null | null |
from random import randint
t=randint(1,10000)
print(t)
while(t):
t-=1
print(randint(1,100000),randint(1,100000))
| 19.166667
| 43
| 0.730435
| 21
| 115
| 4
| 0.47619
| 0.285714
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.201923
| 0.095652
| 115
| 6
| 43
| 19.166667
| 0.605769
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0.333333
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
82de53e37b6f9654ff9708e3b237cdfbad2bc9dd
| 122
|
py
|
Python
|
src/blip_sdk/extensions/chat/__init__.py
|
mirlarof/blip-sdk-python
|
f958149b2524d4340eeafad8739a33db71df45ed
|
[
"MIT"
] | 2
|
2021-07-02T20:10:48.000Z
|
2021-07-13T20:51:18.000Z
|
src/blip_sdk/extensions/chat/__init__.py
|
mirlarof/blip-sdk-python
|
f958149b2524d4340eeafad8739a33db71df45ed
|
[
"MIT"
] | 9
|
2021-05-27T21:08:23.000Z
|
2021-06-14T20:10:10.000Z
|
src/blip_sdk/extensions/chat/__init__.py
|
mirlarof/blip-sdk-python
|
f958149b2524d4340eeafad8739a33db71df45ed
|
[
"MIT"
] | 3
|
2021-06-23T19:53:20.000Z
|
2022-01-04T17:50:44.000Z
|
from .content_types import ContentTypes
from .uri_templates import UriTemplates
from .chat_extension import ChatExtension
| 30.5
| 41
| 0.877049
| 15
| 122
| 6.933333
| 0.733333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.098361
| 122
| 3
| 42
| 40.666667
| 0.945455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
7d75729b46d86c9b3db59c5f1c54cc9ff0f4c444
| 119
|
py
|
Python
|
main.py
|
CrutoiAlexandru/Job_scrapper
|
d170188b71ca04d3515bbcbec109168c5edb951c
|
[
"MIT"
] | null | null | null |
main.py
|
CrutoiAlexandru/Job_scrapper
|
d170188b71ca04d3515bbcbec109168c5edb951c
|
[
"MIT"
] | null | null | null |
main.py
|
CrutoiAlexandru/Job_scrapper
|
d170188b71ca04d3515bbcbec109168c5edb951c
|
[
"MIT"
] | null | null | null |
import user.input as ui
if __name__ == '__main__':
# start the user input on the terminal based app
ui.start()
| 23.8
| 52
| 0.689076
| 19
| 119
| 3.894737
| 0.736842
| 0.243243
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.226891
| 119
| 5
| 53
| 23.8
| 0.804348
| 0.386555
| 0
| 0
| 0
| 0
| 0.111111
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
7d897bc5f402b4abc2ce30552a2e6f587cb52e42
| 16
|
py
|
Python
|
Week 1/assignment/assignment1.py
|
rasulbi528/Python-web-data-acess
|
2cd1c3d3772a7f9630c837bd7bcb31598c13d10c
|
[
"MIT"
] | 1
|
2016-12-26T13:07:09.000Z
|
2016-12-26T13:07:09.000Z
|
Week 1/assignment/assignment1.py
|
rasulbi528/Python-web-data-acess
|
2cd1c3d3772a7f9630c837bd7bcb31598c13d10c
|
[
"MIT"
] | null | null | null |
Week 1/assignment/assignment1.py
|
rasulbi528/Python-web-data-acess
|
2cd1c3d3772a7f9630c837bd7bcb31598c13d10c
|
[
"MIT"
] | null | null | null |
print "Hi!! :)"
| 8
| 15
| 0.4375
| 2
| 16
| 3.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1875
| 16
| 1
| 16
| 16
| 0.538462
| 0
| 0
| 0
| 0
| 0
| 0.4375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
7ddd3e58986132ebb9020f296e5a5d3e856e0754
| 513
|
py
|
Python
|
calculator/src/lexer_test.py
|
cestella/software_engineering_curriculum
|
5f4ac0f2ffe868abdea3c1baf29b66c091345a02
|
[
"Apache-2.0"
] | 1
|
2020-08-19T19:43:45.000Z
|
2020-08-19T19:43:45.000Z
|
calculator/src/lexer_test.py
|
cestella/software_engineering_curriculum
|
5f4ac0f2ffe868abdea3c1baf29b66c091345a02
|
[
"Apache-2.0"
] | 3
|
2020-07-24T12:36:59.000Z
|
2021-05-28T18:01:36.000Z
|
calculator/src/lexer_test.py
|
cestella/software_engineering_curriculum
|
5f4ac0f2ffe868abdea3c1baf29b66c091345a02
|
[
"Apache-2.0"
] | 1
|
2020-05-31T14:54:18.000Z
|
2020-05-31T14:54:18.000Z
|
from src.lexer import lex
def test_basecase():
assert lex("1 + 1") == [1, "+", 1]
def test_negative_weirdness():
assert lex("1 - -1") == [1, "-", -1]
def test_no_space():
assert lex("-1.2*2") == [-1.2, "*", 2]
def test_multiple_spaces():
assert lex("1 + 1") == [1, "+", 1]
def test_parens():
assert lex("(2 + 3) - 3.2") == ["(", 2, "+", 3, ")", "-", 3.2]
def test_RPN():
assert lex("1 1 +") == [1, 1, "+"]
def test_RPN_with_spaces():
assert lex("1 1 +") == [1, 1, "+"]
| 17.1
| 66
| 0.481481
| 79
| 513
| 2.974684
| 0.265823
| 0.12766
| 0.12766
| 0.234043
| 0.446809
| 0.446809
| 0.446809
| 0.340426
| 0
| 0
| 0
| 0.086957
| 0.237817
| 513
| 29
| 67
| 17.689655
| 0.514067
| 0
| 0
| 0.266667
| 0
| 0
| 0.111111
| 0
| 0
| 0
| 0
| 0
| 0.466667
| 1
| 0.466667
| true
| 0
| 0.066667
| 0
| 0.533333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
8149d6d5371e10ee5d8956061019f0b7810c1ae5
| 130
|
py
|
Python
|
calculatorapp/admin.py
|
arpit456jain/CalculatorApp-In-Django
|
aa976117952db03128e4fe6d6d0dbf375ab29b5d
|
[
"MIT"
] | null | null | null |
calculatorapp/admin.py
|
arpit456jain/CalculatorApp-In-Django
|
aa976117952db03128e4fe6d6d0dbf375ab29b5d
|
[
"MIT"
] | null | null | null |
calculatorapp/admin.py
|
arpit456jain/CalculatorApp-In-Django
|
aa976117952db03128e4fe6d6d0dbf375ab29b5d
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
# Register your models here.
from .models import UserFeedback
admin.site.register(UserFeedback)
| 21.666667
| 33
| 0.823077
| 17
| 130
| 6.294118
| 0.647059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115385
| 130
| 6
| 33
| 21.666667
| 0.930435
| 0.2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
816b0c81fba1986ec985dd3cb56e5f9b5969bc22
| 444
|
py
|
Python
|
command.py
|
ichiyonnana/nnutil
|
bb6ae2fe47da47f15fa256aa85602e7c3beb1f80
|
[
"MIT"
] | 1
|
2021-08-08T22:18:04.000Z
|
2021-08-08T22:18:04.000Z
|
nnutil/command.py
|
ichiyonnana/nntools_maya
|
6a0aa39194cac00aac35e9eca6fcf2b12a70f373
|
[
"MIT"
] | null | null | null |
nnutil/command.py
|
ichiyonnana/nntools_maya
|
6a0aa39194cac00aac35e9eca6fcf2b12a70f373
|
[
"MIT"
] | null | null | null |
#! python
# coding:utf-8
import maya.cmds as cmds
import maya.mel as mel
import pymel.core as pm
def get_selection(**kwargs):
""" [cmds]
Returns:
[type]: [description]
"""
return cmds.ls(selection=True, flatten=True, **kwargs)
def selected(**kwargs):
""" [pm] flatten を有効にした pm.selected()
Returns:
[type]: [description]
"""
return pm.selected(flatten=True, **kwargs)
| 18.5
| 59
| 0.585586
| 52
| 444
| 4.980769
| 0.480769
| 0.07722
| 0.169884
| 0.216216
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003077
| 0.268018
| 444
| 24
| 60
| 18.5
| 0.793846
| 0.301802
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| true
| 0
| 0.428571
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
81e3c8e32867953c69e71802ef9948b2886604a1
| 120
|
py
|
Python
|
uci_cbp_demo/ui/__init__.py
|
taoyilee/bp_demo
|
eaaba09857a8a597f3691b6b79902b71e4af1ffe
|
[
"SWL"
] | 1
|
2020-02-20T21:39:28.000Z
|
2020-02-20T21:39:28.000Z
|
uci_cbp_demo/ui/__init__.py
|
taoyilee/bp_demo
|
eaaba09857a8a597f3691b6b79902b71e4af1ffe
|
[
"SWL"
] | 19
|
2020-04-08T00:10:06.000Z
|
2020-06-21T04:39:48.000Z
|
uci_cbp_demo/ui/__init__.py
|
taoyilee/bp_demo
|
eaaba09857a8a597f3691b6b79902b71e4af1ffe
|
[
"SWL"
] | null | null | null |
# MIT License
# Copyright (C) Michael Tao-Yi Lee (taoyil AT UCI EDU)
from .gui import main
from .tui import tui_main
| 20
| 55
| 0.725
| 21
| 120
| 4.095238
| 0.809524
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 120
| 5
| 56
| 24
| 0.895833
| 0.541667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
c4b41c63d6c0d9ceb82bae821585ed2753e8c1e0
| 208
|
py
|
Python
|
src/loralay/data/utils.py
|
laudao/loralay-modeling
|
a7c89717bac4f0ef9ed820544c4d27e2fe2e4228
|
[
"Apache-2.0"
] | null | null | null |
src/loralay/data/utils.py
|
laudao/loralay-modeling
|
a7c89717bac4f0ef9ed820544c4d27e2fe2e4228
|
[
"Apache-2.0"
] | null | null | null |
src/loralay/data/utils.py
|
laudao/loralay-modeling
|
a7c89717bac4f0ef9ed820544c4d27e2fe2e4228
|
[
"Apache-2.0"
] | null | null | null |
def normalize_bbox(bbox, size):
return [
int(1000 * bbox[0] / size[0]),
int(1000 * bbox[1] / size[1]),
int(1000 * bbox[2] / size[0]),
int(1000 * bbox[3] / size[1]),
]
| 23.111111
| 38
| 0.475962
| 30
| 208
| 3.266667
| 0.366667
| 0.285714
| 0.44898
| 0.244898
| 0.326531
| 0
| 0
| 0
| 0
| 0
| 0
| 0.170213
| 0.322115
| 208
| 8
| 39
| 26
| 0.524823
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0
| 0.142857
| 0.285714
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 5
|
c4e46ee6de43bd254d0a5f54f2ef8afea92aaaf4
| 7,372
|
py
|
Python
|
programs/aniso_magic.py
|
schwehr/PmagPy
|
5e9edc5dc9a7a243b8e7f237fa156e0cd782076b
|
[
"BSD-3-Clause"
] | 2
|
2020-07-05T01:11:33.000Z
|
2020-07-05T01:11:39.000Z
|
programs/aniso_magic.py
|
schwehr/PmagPy
|
5e9edc5dc9a7a243b8e7f237fa156e0cd782076b
|
[
"BSD-3-Clause"
] | null | null | null |
programs/aniso_magic.py
|
schwehr/PmagPy
|
5e9edc5dc9a7a243b8e7f237fa156e0cd782076b
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- python-indent-offset: 4; -*-
#pylint: disable=invalid-name,wrong-import-position,line-too-long
#import draw
import sys
import matplotlib
if matplotlib.get_backend() != "TKAgg":
matplotlib.use("TKAgg")
import pmagpy.pmag as pmag
from pmagpy import ipmag
import pmagpy.pmagplotlib as pmagplotlib
import pmagpy.contribution_builder as cb
def old():
"""
NAME
aniso_magic.py
DESCRIPTION
plots anisotropy data with either bootstrap or hext ellipses
SYNTAX
aniso_magic.py [-h] [command line options]
OPTIONS
-h plots help message and quits
-usr USER: set the user name
-f AFILE, specify specimens.txt formatted file for input
-fsa SAMPFILE, specify samples.txt file (required to plot by site)
-fsi SITEFILE, specify site file (required to include location information)
-x Hext [1963] and bootstrap
-B DON'T do bootstrap, do Hext
-par Tauxe [1998] parametric bootstrap
-v plot bootstrap eigenvectors instead of ellipses
-sit plot by site instead of entire file
-crd [s,g,t] coordinate system, default is specimen (g=geographic, t=tilt corrected)
-P don't make any plots - just fill in the specimens, samples, sites tables
-sav don't make the tables - just save all the plots
-fmt [svg, jpg, eps] format for output images, png default
-gtc DEC INC dec,inc of pole to great circle [down(up) in green (cyan)
-d Vi DEC INC; Vi (1,2,3) to compare to direction DEC INC
-n N; specifies the number of bootstraps - default is 1000
DEFAULTS
AFILE: specimens.txt
plot bootstrap ellipses of Constable & Tauxe [1987]
NOTES
minor axis: circles
major axis: triangles
principal axis: squares
directions are plotted on the lower hemisphere
for bootstrapped eigenvector components: Xs: blue, Ys: red, Zs: black
"""
args = sys.argv
if "-h" in args:
print(main.__doc__)
sys.exit()
verbose = pmagplotlib.verbose
dir_path = pmag.get_named_arg("-WD", ".")
input_dir_path = pmag.get_named_arg("-ID", "")
num_bootstraps = pmag.get_named_arg("-n", 1000)
ipar = pmag.get_flag_arg_from_sys("-par", true=1, false=0)
ihext = pmag.get_flag_arg_from_sys("-x", true=1, false=0)
ivec = pmag.get_flag_arg_from_sys("-v", true=1, false=0)
iplot = pmag.get_flag_arg_from_sys("-P", true=0, false=1)
isite = pmag.get_flag_arg_from_sys("-sit", true=1, false=0)
iboot, vec = 1, 0
infile = pmag.get_named_arg('-f', 'specimens.txt')
samp_file = pmag.get_named_arg('-fsa', 'samples.txt')
site_file = pmag.get_named_arg('-fsi', 'sites.txt')
#outfile = pmag.get_named_arg("-F", "rmag_results.txt")
fmt = pmag.get_named_arg("-fmt", "png")
crd = pmag.get_named_arg("-crd", "s")
comp, Dir, PDir = 0, [], []
user = pmag.get_named_arg("-usr", "")
if '-B' in args:
iboot, ihext = 0, 1
plots, verbose = 0, True
if '-sav' in args:
plots = 1
verbose = 0
if '-gtc' in args:
ind = args.index('-gtc')
d, i = float(args[ind+1]), float(args[ind+2])
PDir.append(d)
PDir.append(i)
if '-d' in args:
comp = 1
ind = args.index('-d')
vec = int(args[ind+1])-1
Dir = [float(args[ind+2]), float(args[ind+3])]
ipmag.aniso_magic_old(infile=infile, samp_file=samp_file, site_file=site_file,
ipar=ipar, ihext=ihext, ivec=ivec, iplot=iplot, isite=isite, iboot=iboot, vec=vec,
Dir=Dir, PDir=PDir, comp=comp, user=user,
fmt=fmt, crd=crd, verbose=verbose, plots=plots,
num_bootstraps=num_bootstraps, dir_path=dir_path,
input_dir_path=input_dir_path)
def main():
"""
NAME
aniso_magic.py
DESCRIPTION
plots anisotropy data with either bootstrap or hext ellipses
SYNTAX
aniso_magic.py [-h] [command line options]
OPTIONS
-h plots help message and quits
-f AFILE, specify specimens.txt formatted file for input
-fsa SAMPFILE, specify samples.txt file (required to plot by site)
-fsi SITEFILE, specify site file (required to include location information)
-x Hext [1963] and bootstrap
-B DON'T do bootstrap, do Hext
-par Tauxe [1998] parametric bootstrap
-v plot bootstrap eigenvectors instead of ellipses
-sit plot by site instead of entire file
-crd [s,g,t] coordinate system, default is specimen (g=geographic, t=tilt corrected)
-P don't make any plots - just fill in the specimens, samples, sites tables
-sav don't make the tables - just save all the plots
-fmt [svg, jpg, eps] format for output images, png default
-gtc DEC INC dec,inc of pole to great circle [down(up) in green (cyan)
-d Vi DEC INC; Vi (1,2,3) to compare to direction DEC INC
-n N; specifies the number of bootstraps - default is 1000
DEFAULTS
AFILE: specimens.txt
plot bootstrap ellipses of Constable & Tauxe [1987]
NOTES
minor axis: circles
major axis: triangles
principal axis: squares
directions are plotted on the lower hemisphere
for bootstrapped eigenvector components: Xs: blue, Ys: red, Zs: black
"""
args = sys.argv
if '-h' in args:
print(new.__doc__)
return
dir_path = pmag.get_named_arg("-WD", ".")
if '-ID' in args and dir_path == '.':
dir_path = pmag.get_named_arg("-ID", ".")
iboot, vec = 1, 0
num_bootstraps = pmag.get_named_arg("-n", 1000)
ipar = pmag.get_flag_arg_from_sys("-par", true=1, false=0)
ihext = pmag.get_flag_arg_from_sys("-x", true=1, false=0)
ivec = pmag.get_flag_arg_from_sys("-v", true=1, false=0)
if ivec:
vec = 3
#iplot = pmag.get_flag_arg_from_sys("-P", true=0, false=1)
isite = pmag.get_flag_arg_from_sys("-sit", true=1, false=0)
infile = pmag.get_named_arg('-f', 'specimens.txt')
samp_file = pmag.get_named_arg('-fsa', 'samples.txt')
site_file = pmag.get_named_arg('-fsi', 'sites.txt')
#outfile = pmag.get_named_arg("-F", "rmag_results.txt")
fmt = pmag.get_named_arg("-fmt", "png")
crd = pmag.get_named_arg("-crd", "s")
comp, Dir, PDir = 0, [], []
user = pmag.get_named_arg("-usr", "")
if '-B' in args:
iboot, ihext = 0, 1
save_plots, verbose, interactive = False, True, True
if '-sav' in args:
save_plots = True
verbose = False
interactive = False
if '-gtc' in args:
ind = args.index('-gtc')
d, i = float(args[ind+1]), float(args[ind+2])
PDir.append(d)
PDir.append(i)
if '-d' in args:
comp = 1
ind = args.index('-d')
vec = int(args[ind+1])-1
Dir = [float(args[ind+2]), float(args[ind+3])]
ipmag.aniso_magic_nb(infile, samp_file, site_file, verbose,
ipar, ihext, ivec, isite, False, iboot,
vec, Dir, PDir, crd, num_bootstraps,
dir_path, save_plots=save_plots, interactive=interactive,
fmt=fmt)
if __name__ == "__main__":
if "-old" in sys.argv:
old()
else:
main()
| 38.196891
| 104
| 0.616115
| 1,069
| 7,372
| 4.117867
| 0.204864
| 0.047706
| 0.054521
| 0.068151
| 0.794184
| 0.780327
| 0.780327
| 0.758519
| 0.758519
| 0.758519
| 0
| 0.017596
| 0.267634
| 7,372
| 192
| 105
| 38.395833
| 0.79774
| 0.426072
| 0
| 0.54902
| 0
| 0
| 0.057975
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019608
| false
| 0
| 0.058824
| 0
| 0.088235
| 0.019608
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
1ee644ac8ac8c64862ca2f6c7c456ea0a2a57bf2
| 47
|
py
|
Python
|
neelu/__init__.py
|
NeeluGeorge/testpks
|
4b676a70e88490cdcdf5ed9f6b92a47b28eedc2d
|
[
"MIT"
] | null | null | null |
neelu/__init__.py
|
NeeluGeorge/testpks
|
4b676a70e88490cdcdf5ed9f6b92a47b28eedc2d
|
[
"MIT"
] | null | null | null |
neelu/__init__.py
|
NeeluGeorge/testpks
|
4b676a70e88490cdcdf5ed9f6b92a47b28eedc2d
|
[
"MIT"
] | null | null | null |
from .neelu import printhi
__all__=["printhi"]
| 15.666667
| 26
| 0.765957
| 6
| 47
| 5.333333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106383
| 47
| 3
| 27
| 15.666667
| 0.761905
| 0
| 0
| 0
| 0
| 0
| 0.145833
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
1eefa53d1317567978c4addda55a56119ba40eda
| 278
|
py
|
Python
|
main_game/creditos.py
|
AlefAdonis/playgame
|
3f1049ad3682f169f2435faeeef63b0b1ddf5642
|
[
"MIT"
] | null | null | null |
main_game/creditos.py
|
AlefAdonis/playgame
|
3f1049ad3682f169f2435faeeef63b0b1ddf5642
|
[
"MIT"
] | 2
|
2020-09-21T14:32:38.000Z
|
2020-10-07T17:47:35.000Z
|
main_game/creditos.py
|
AlefAdonis/playgame
|
3f1049ad3682f169f2435faeeef63b0b1ddf5642
|
[
"MIT"
] | 2
|
2020-10-07T19:53:57.000Z
|
2020-10-10T21:23:43.000Z
|
def credits():
print('\nObrigado por jogar !\n')
print('-' * 20)
print('CRÉDITOS FINAIS \nDiretor Geral - Álef Ádonis\nProgramador - Álef Ádonis\nRoteirista - Álef Ádonis\nEditor Chefe - Álef Ádonis\n\nUM PEQUENO GESTO PARA DEMOSTRAR MEU AMOR')
print('-' *20)
| 39.714286
| 183
| 0.683453
| 36
| 278
| 5.277778
| 0.694444
| 0.210526
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017699
| 0.18705
| 278
| 6
| 184
| 46.333333
| 0.823009
| 0
| 0
| 0.4
| 0
| 0.2
| 0.705036
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0
| 0
| 0.2
| 0.8
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
4808ae8e24928d021e6ff8a71a890563131b8265
| 176
|
py
|
Python
|
bin/cubes/solid-pentominoes-3x3x9-ring.py
|
tiwo/puzzler
|
7ad3d9a792f0635f7ec59ffa85fb46b54fd77a7e
|
[
"Intel"
] | null | null | null |
bin/cubes/solid-pentominoes-3x3x9-ring.py
|
tiwo/puzzler
|
7ad3d9a792f0635f7ec59ffa85fb46b54fd77a7e
|
[
"Intel"
] | null | null | null |
bin/cubes/solid-pentominoes-3x3x9-ring.py
|
tiwo/puzzler
|
7ad3d9a792f0635f7ec59ffa85fb46b54fd77a7e
|
[
"Intel"
] | 1
|
2022-01-02T16:54:14.000Z
|
2022-01-02T16:54:14.000Z
|
#!/usr/bin/env python
# $Id$
"""3 solutions"""
import puzzler
from puzzler.puzzles.solid_pentominoes import SolidPentominoes3x3x9Ring
puzzler.run(SolidPentominoes3x3x9Ring)
| 17.6
| 71
| 0.801136
| 19
| 176
| 7.368421
| 0.789474
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.04375
| 0.090909
| 176
| 9
| 72
| 19.555556
| 0.83125
| 0.210227
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
4813dc6e409e55e64fcb43edf90f0bc58640f97f
| 196
|
py
|
Python
|
app/__init__.py
|
lon9/Twitter-bot-maker
|
f1985dac43f5ceb8ea20983a8e418c4275f8b782
|
[
"MIT"
] | 1
|
2019-10-12T23:28:02.000Z
|
2019-10-12T23:28:02.000Z
|
app/__init__.py
|
Rompei/Twitter-bot-maker
|
f1985dac43f5ceb8ea20983a8e418c4275f8b782
|
[
"MIT"
] | 1
|
2019-12-26T16:37:03.000Z
|
2019-12-26T16:37:03.000Z
|
app/__init__.py
|
lon9/Twitter-bot-maker
|
f1985dac43f5ceb8ea20983a8e418c4275f8b782
|
[
"MIT"
] | null | null | null |
# -*- coding:utf-8 -*-
__version = '1.0'
from bottle import Bottle, TEMPLATE_PATH
app = Bottle()
TEMPLATE_PATH.append("./app/views")
TEMPLATE_PATH.remove("./views/")
from app.controllers import *
| 24.5
| 40
| 0.714286
| 27
| 196
| 5
| 0.592593
| 0.266667
| 0.266667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017143
| 0.107143
| 196
| 7
| 41
| 28
| 0.754286
| 0.102041
| 0
| 0
| 0
| 0
| 0.126437
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
48862e16641b7648ac63c45ebe3f4f1e94b1c815
| 1,713
|
py
|
Python
|
hapthexa_main/launch/launch.py
|
tmcit-caolab/hapthexa_ros2
|
74030283bba2c0c3b4de6651320c8b8a243d723c
|
[
"MIT"
] | null | null | null |
hapthexa_main/launch/launch.py
|
tmcit-caolab/hapthexa_ros2
|
74030283bba2c0c3b4de6651320c8b8a243d723c
|
[
"MIT"
] | null | null | null |
hapthexa_main/launch/launch.py
|
tmcit-caolab/hapthexa_ros2
|
74030283bba2c0c3b4de6651320c8b8a243d723c
|
[
"MIT"
] | 1
|
2021-11-01T08:14:32.000Z
|
2021-11-01T08:14:32.000Z
|
from launch import LaunchDescription
from launch_ros.actions import Node
def generate_launch_description():
return LaunchDescription([
Node(
package="hapthexa_main",
executable="hapthexa_leg",
name="hapthexa_leg_front_left",
namespace="hapthexa/leg/front_left"
),
Node(
package="hapthexa_main",
executable="hapthexa_leg",
name="hapthexa_leg_middle_left",
namespace="hapthexa/leg/middle_left",
parameters=[{'leg_install_angle': 1.57079632679}]
),
Node(
package="hapthexa_main",
executable="hapthexa_leg",
name="hapthexa_leg_rear_left",
namespace="hapthexa/leg/rear_left",
parameters=[{'leg_install_angle': 3.14159265359}]
),
Node(
package="hapthexa_main",
executable="hapthexa_leg",
name="hapthexa_leg_front_right",
namespace="hapthexa/leg/front_right"
),
Node(
package="hapthexa_main",
executable="hapthexa_leg",
name="hapthexa_leg_middle_right",
namespace="hapthexa/leg/middle_right",
parameters=[{'leg_install_angle': -1.57079632679}]
),
Node(
package="hapthexa_main",
executable="hapthexa_leg",
name="hapthexa_leg_rear_right",
namespace="hapthexa/leg/rear_right",
parameters=[{'leg_install_angle': -3.14159265359}]
),
Node(
package="hapthexa_main",
executable="attitude_controller.py",
name="attitude_controller"
)
])
| 33.588235
| 62
| 0.569177
| 153
| 1,713
| 6.045752
| 0.215686
| 0.214054
| 0.143784
| 0.174054
| 0.63027
| 0.610811
| 0.610811
| 0.610811
| 0.610811
| 0.610811
| 0
| 0.041415
| 0.323409
| 1,713
| 51
| 63
| 33.588235
| 0.756687
| 0
| 0
| 0.52
| 1
| 0
| 0.323221
| 0.177363
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02
| true
| 0
| 0.04
| 0.02
| 0.08
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
6f7c0d29b94090986352ac8683b8b0b8b68e85e7
| 84
|
py
|
Python
|
IA/Python/5/5.1/6.py
|
worthl3ss/random-small
|
ffb60781f57eb865acbd81aaa07056046bad32fe
|
[
"MIT"
] | 1
|
2022-02-23T12:47:00.000Z
|
2022-02-23T12:47:00.000Z
|
IA/Python/5/5.1/6.py
|
worthl3ss/random-small
|
ffb60781f57eb865acbd81aaa07056046bad32fe
|
[
"MIT"
] | null | null | null |
IA/Python/5/5.1/6.py
|
worthl3ss/random-small
|
ffb60781f57eb865acbd81aaa07056046bad32fe
|
[
"MIT"
] | null | null | null |
def factorial(n):
if n==1 or n==0:
return 1
return n*factorial(n-1)
| 16.8
| 27
| 0.547619
| 16
| 84
| 2.875
| 0.5
| 0.434783
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.068966
| 0.309524
| 84
| 4
| 28
| 21
| 0.724138
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
6fa9d8cffa9de939322d66001851e7534ba08d5b
| 46
|
py
|
Python
|
src/signal_backtester/base/__init__.py
|
xibalbas/signal_backtester
|
8eaa52ecad22419b29b0e0e34eaadfea83f4e4b9
|
[
"MIT"
] | 14
|
2022-03-04T20:23:45.000Z
|
2022-03-30T11:04:40.000Z
|
src/signal_backtester/base/__init__.py
|
xibalbas/signal_backtester
|
8eaa52ecad22419b29b0e0e34eaadfea83f4e4b9
|
[
"MIT"
] | null | null | null |
src/signal_backtester/base/__init__.py
|
xibalbas/signal_backtester
|
8eaa52ecad22419b29b0e0e34eaadfea83f4e4b9
|
[
"MIT"
] | 2
|
2022-03-05T10:18:19.000Z
|
2022-03-06T12:51:49.000Z
|
"""Base
Base Package
more description
"""
| 9.2
| 20
| 0.652174
| 5
| 46
| 6
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.217391
| 46
| 4
| 21
| 11.5
| 0.833333
| 0.826087
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
6fc786bf49e570d05ac98894f60c30b6ee77872d
| 1,285
|
py
|
Python
|
mollie/api/objects/invoice.py
|
bryanwills/mollie-api-python
|
8122569ae83f07ad07893f3dd56e9a60bcccae05
|
[
"BSD-2-Clause"
] | null | null | null |
mollie/api/objects/invoice.py
|
bryanwills/mollie-api-python
|
8122569ae83f07ad07893f3dd56e9a60bcccae05
|
[
"BSD-2-Clause"
] | null | null | null |
mollie/api/objects/invoice.py
|
bryanwills/mollie-api-python
|
8122569ae83f07ad07893f3dd56e9a60bcccae05
|
[
"BSD-2-Clause"
] | null | null | null |
from .base import ObjectBase
class Invoice(ObjectBase):
@classmethod
def get_resource_class(cls, client):
from ..resources.invoices import Invoices
return Invoices(client)
@property
def id(self):
return self._get_property("id")
@property
def reference(self):
return self._get_property("reference")
@property
def vat_number(self):
return self._get_property("vatNumber")
@property
def status(self):
return self._get_property("status")
@property
def issued_at(self):
return self._get_property("issuedAt")
@property
def paid_at(self):
return self._get_property("paidAt")
@property
def due_at(self):
return self._get_property("dueAt")
@property
def resource(self):
return self._get_property("resource")
@property
def net_amount(self):
return self._get_property("netAmount")
@property
def vat_amount(self):
return self._get_property("vatAmount")
@property
def gross_amount(self):
return self._get_property("grossAmount")
@property
def lines(self):
return self._get_property("lines") or []
@property
def pdf(self):
return self._get_link("pdf")
| 20.725806
| 49
| 0.638911
| 147
| 1,285
| 5.346939
| 0.272109
| 0.181934
| 0.231552
| 0.28117
| 0.412214
| 0.221374
| 0
| 0
| 0
| 0
| 0
| 0
| 0.257588
| 1,285
| 61
| 50
| 21.065574
| 0.823899
| 0
| 0
| 0.288889
| 0
| 0
| 0.070039
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.311111
| false
| 0
| 0.044444
| 0.288889
| 0.688889
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.