hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
aabf42721a2ad015b96fc7311954534f13a041d8
| 2,395
|
py
|
Python
|
etl/parsers/etw/Microsoft_User_Experience_Virtualization_SQM_Uploader.py
|
IMULMUL/etl-parser
|
76b7c046866ce0469cd129ee3f7bb3799b34e271
|
[
"Apache-2.0"
] | 104
|
2020-03-04T14:31:31.000Z
|
2022-03-28T02:59:36.000Z
|
etl/parsers/etw/Microsoft_User_Experience_Virtualization_SQM_Uploader.py
|
IMULMUL/etl-parser
|
76b7c046866ce0469cd129ee3f7bb3799b34e271
|
[
"Apache-2.0"
] | 7
|
2020-04-20T09:18:39.000Z
|
2022-03-19T17:06:19.000Z
|
etl/parsers/etw/Microsoft_User_Experience_Virtualization_SQM_Uploader.py
|
IMULMUL/etl-parser
|
76b7c046866ce0469cd129ee3f7bb3799b34e271
|
[
"Apache-2.0"
] | 16
|
2020-03-05T18:55:59.000Z
|
2022-03-01T10:19:28.000Z
|
# -*- coding: utf-8 -*-
"""
Microsoft-User Experience Virtualization-SQM Uploader
GUID : 57003e21-269b-4bdc-8434-b3bf8d57d2d5
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("57003e21-269b-4bdc-8434-b3bf8d57d2d5"), event_id=3, version=0)
class Microsoft_User_Experience_Virtualization_SQM_Uploader_3_0(Etw):
pattern = Struct(
"hresult" / Int32ul
)
@declare(guid=guid("57003e21-269b-4bdc-8434-b3bf8d57d2d5"), event_id=4, version=0)
class Microsoft_User_Experience_Virtualization_SQM_Uploader_4_0(Etw):
pattern = Struct(
"WString1" / WString
)
@declare(guid=guid("57003e21-269b-4bdc-8434-b3bf8d57d2d5"), event_id=6, version=0)
class Microsoft_User_Experience_Virtualization_SQM_Uploader_6_0(Etw):
pattern = Struct(
"hr" / Int32ul,
"filename" / WString,
"http" / Int32sl
)
@declare(guid=guid("57003e21-269b-4bdc-8434-b3bf8d57d2d5"), event_id=7, version=0)
class Microsoft_User_Experience_Virtualization_SQM_Uploader_7_0(Etw):
pattern = Struct(
"hr" / Int32ul,
"filename" / WString,
"http" / Int32sl
)
@declare(guid=guid("57003e21-269b-4bdc-8434-b3bf8d57d2d5"), event_id=8, version=0)
class Microsoft_User_Experience_Virtualization_SQM_Uploader_8_0(Etw):
pattern = Struct(
"error" / Int32ul
)
@declare(guid=guid("57003e21-269b-4bdc-8434-b3bf8d57d2d5"), event_id=10, version=0)
class Microsoft_User_Experience_Virtualization_SQM_Uploader_10_0(Etw):
pattern = Struct(
"String1" / CString
)
@declare(guid=guid("57003e21-269b-4bdc-8434-b3bf8d57d2d5"), event_id=12, version=0)
class Microsoft_User_Experience_Virtualization_SQM_Uploader_12_0(Etw):
pattern = Struct(
"uint32" / Int32ul
)
@declare(guid=guid("57003e21-269b-4bdc-8434-b3bf8d57d2d5"), event_id=13, version=0)
class Microsoft_User_Experience_Virtualization_SQM_Uploader_13_0(Etw):
pattern = Struct(
"WString1" / WString
)
@declare(guid=guid("57003e21-269b-4bdc-8434-b3bf8d57d2d5"), event_id=14, version=0)
class Microsoft_User_Experience_Virtualization_SQM_Uploader_14_0(Etw):
pattern = Struct(
"error" / Int32ul
)
| 30.705128
| 123
| 0.730689
| 303
| 2,395
| 5.537954
| 0.207921
| 0.077473
| 0.137068
| 0.220501
| 0.809893
| 0.809893
| 0.72944
| 0.72944
| 0.72944
| 0.402265
| 0
| 0.143559
| 0.150731
| 2,395
| 77
| 124
| 31.103896
| 0.681416
| 0.050104
| 0
| 0.358491
| 0
| 0
| 0.17564
| 0.142983
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.075472
| 0
| 0.415094
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
aad54e229a30060595d79a80e2c984060081bcd4
| 37
|
py
|
Python
|
pywaterml/__main__.py
|
BYU-Hydroinformatics/pywaterml
|
d4c88a0402dec61d466edb1fa5dbda4544f7a738
|
[
"BSD-3-Clause"
] | 1
|
2021-11-10T18:28:10.000Z
|
2021-11-10T18:28:10.000Z
|
pywaterml/__main__.py
|
BYU-Hydroinformatics/pywaterml
|
d4c88a0402dec61d466edb1fa5dbda4544f7a738
|
[
"BSD-3-Clause"
] | null | null | null |
pywaterml/__main__.py
|
BYU-Hydroinformatics/pywaterml
|
d4c88a0402dec61d466edb1fa5dbda4544f7a738
|
[
"BSD-3-Clause"
] | 1
|
2021-03-15T00:18:34.000Z
|
2021-03-15T00:18:34.000Z
|
from pywaterml import cli
cli.cli()
| 9.25
| 25
| 0.756757
| 6
| 37
| 4.666667
| 0.666667
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.162162
| 37
| 3
| 26
| 12.333333
| 0.903226
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
631b20b19ae22e194c18886805f9660aaae9332f
| 127
|
py
|
Python
|
FinMind/crawler/__init__.py
|
vishalbelsare/FinMind
|
9890c631952f3ab91560ada6f49971bff26a5858
|
[
"Apache-2.0"
] | 1,106
|
2019-10-04T15:16:59.000Z
|
2022-03-31T03:50:19.000Z
|
FinMind/crawler/__init__.py
|
Jerremiah/FinMind
|
bf57f8e68adc0583495b29135a91e47515cf4cf1
|
[
"Apache-2.0"
] | 119
|
2019-10-07T09:18:18.000Z
|
2022-03-12T08:25:58.000Z
|
FinMind/crawler/__init__.py
|
Jerremiah/FinMind
|
bf57f8e68adc0583495b29135a91e47515cf4cf1
|
[
"Apache-2.0"
] | 184
|
2019-10-06T08:26:53.000Z
|
2022-03-21T06:25:31.000Z
|
from FinMind.crawler.commodities import CommoditiesCrawler
from FinMind.crawler.government_bonds import GovernmentBondsCrawler
| 42.333333
| 67
| 0.905512
| 13
| 127
| 8.769231
| 0.692308
| 0.192982
| 0.315789
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.062992
| 127
| 2
| 68
| 63.5
| 0.957983
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2d696cd97b2e70e930df14d83d8e0a89bd580b5a
| 82
|
py
|
Python
|
site_scons/site_tools/test.py
|
robobrobro/physics-engine
|
0000e2155106f3c6c0485af96bc14120dc6d155a
|
[
"MIT"
] | null | null | null |
site_scons/site_tools/test.py
|
robobrobro/physics-engine
|
0000e2155106f3c6c0485af96bc14120dc6d155a
|
[
"MIT"
] | 8
|
2019-01-26T03:19:46.000Z
|
2019-04-16T14:22:53.000Z
|
site_scons/site_tools/test.py
|
robobrobro/physics-engine
|
0000e2155106f3c6c0485af96bc14120dc6d155a
|
[
"MIT"
] | null | null | null |
def exists(env):
return True
def generate(env):
env.Replace(MODE='test')
| 13.666667
| 28
| 0.658537
| 12
| 82
| 4.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.195122
| 82
| 5
| 29
| 16.4
| 0.818182
| 0
| 0
| 0
| 1
| 0
| 0.04878
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
2d7a65c14c3f592b101ad9734d2735c93a8be732
| 81
|
py
|
Python
|
services/modes_microservice/database_service.py
|
tojoabella/dripi
|
df8a34116f2c7ed68170364c3d5ab5cbd46d1209
|
[
"MIT"
] | 4
|
2021-05-15T00:58:46.000Z
|
2022-02-23T09:25:07.000Z
|
services/modes_microservice/database_service.py
|
tojoabella/dripi
|
df8a34116f2c7ed68170364c3d5ab5cbd46d1209
|
[
"MIT"
] | null | null | null |
services/modes_microservice/database_service.py
|
tojoabella/dripi
|
df8a34116f2c7ed68170364c3d5ab5cbd46d1209
|
[
"MIT"
] | null | null | null |
class DatabaseService:
def __init__(self):
#TODO: mongo
pass
| 16.2
| 23
| 0.592593
| 8
| 81
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 81
| 5
| 24
| 16.2
| 0.814815
| 0.135802
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 0
| 1
| 0.333333
| false
| 0.333333
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
2dbf43795adb82653553a589e42ea606522130b0
| 16,333
|
py
|
Python
|
parsimony/config.py
|
nguigs/pylearn-parsimony
|
f712d2828823d6d55a2470ce060bcaeda2d0589a
|
[
"BSD-3-Clause"
] | 41
|
2015-02-27T13:26:01.000Z
|
2021-07-13T12:48:14.000Z
|
parsimony/config.py
|
nguigs/pylearn-parsimony
|
f712d2828823d6d55a2470ce060bcaeda2d0589a
|
[
"BSD-3-Clause"
] | 31
|
2015-01-12T15:02:45.000Z
|
2022-02-10T07:11:07.000Z
|
parsimony/config.py
|
nguigs/pylearn-parsimony
|
f712d2828823d6d55a2470ce060bcaeda2d0589a
|
[
"BSD-3-Clause"
] | 15
|
2015-01-12T14:48:39.000Z
|
2021-07-13T12:48:32.000Z
|
# -*- coding: utf-8 -*-
"""Handles configuration settings in pylearn-parsimony.
Try to make the sections correspond to packages (sans the parsimony prefix),
such that settings for parsimony.algorithms are found in the section
"algorithms", and that parsimony.utils.consts is found in the section
"utils.consts", etc.
Created on Wed Apr 8 21:21:20 2015
Copyright (c) 2013-2017, CEA/DSV/I2BM/Neurospin. All rights reserved.
@author: Tommy Löfstedt
@email: lofstedt.tommy@gmail.com
@license: BSD 3-clause.
"""
import os.path
import inspect
import warnings
try:
import configparser # Python 3
except ImportError:
import ConfigParser as configparser # Python 2
__all__ = ["get_option", "get_boolean", "get_float", "get_int", "set_option",
"flush"]
# TODO: Python 3 destroys the ini file sometimes on delete. Make the config
# read the file on every call? Slower, but much safer; especially when
# running multiple instances of pylearn-parsimony.
#__config__ = None
#__ini_file__ = "config.ini"
#__flush_dry_run__ = False
class __Config(object):
__flush_dry_run__ = False
def __init__(self, ini_file):
self._ini_file = self._ini_file_name(str(ini_file))
self._config = configparser.ConfigParser()
if os.path.exists(self._ini_file):
try:
self._config.read(self._ini_file)
except configparser.ParsingError:
warnings.warn("Could not parse the config file.",
RuntimeWarning)
else:
warnings.warn("Could not locate the config file.", RuntimeWarning)
def __del__(self):
# Save updates to configuration file. Cannot call flush here.
try:
if not self.__flush_dry_run__:
if os.path.exists(self._ini_file):
with open(self._ini_file, "wb") as fid:
self._config.write(fid)
else:
warnings.warn("Could not locate the config file.",
RuntimeWarning)
except Exception:
# TODO: Anything we can do to resolve this?
pass # Couldn't save. Objects used are probably deleted already.
def _ini_file_name(self, ini_file):
"""Extracts the directory of this module.
"""
fname = inspect.currentframe() # This module.
fname = inspect.getfile(fname) # Filename of this module.
fname = os.path.abspath(fname) # Absolute path of this module.
fname = os.path.dirname(fname) # Directory of this module.
if fname[-1] != "/":
fname = fname + "/" # Should be there, but just in case ...
fname = fname + ini_file # The ini file.
return fname
def get_option(self, section, option, default=None):
"""Fetches a configuration option from a section of the ini file. If
not found, returns the default value.
"""
section = str(section)
option = str(option)
if not self._config.has_section(section): # Subsumed by the below?
value = default
elif not self._config.has_option(section, option):
value = default
else:
value = self._config.get(section, option)
return str(value)
def get_boolean(self, section, option, default=False):
"""Fetches a boolean configuration option from a section of the ini
file. If not found, returns the default value False.
"""
section = str(section)
option = str(option)
if not self._config.has_section(section): # Subsumed by the below?
value = default
elif not self._config.has_option(section, option):
value = default
else:
value = self._config.getboolean(section, option)
return bool(value)
def get_float(self, section, option, default=0.0):
"""Fetches a floating point configuration option from a section of the
ini file. If not found, returns the default value 0.0.
"""
section = str(section)
option = str(option)
if not self._config.has_section(section): # Subsumed by the below?
value = default
elif not self._config.has_option(section, option):
value = default
else:
value = self._config.getfloat(section, option)
return float(value)
def get_int(self, section, option, default=0):
"""Fetches an integer configuration option from a section of the ini
file. If not found, returns the default value 0.
"""
section = str(section)
option = str(option)
if not self._config.has_section(section): # Subsumed by the below?
value = default
elif not self._config.has_option(section, option):
value = default
else:
value = self._config.getint(section, option)
return int(value)
def set_option(self, section, option, value, flush_file=False):
"""Sets a configuration option.
"""
section = str(section)
option = str(option)
value = str(value)
if not self._config.has_section(section):
self._config.add_section(section)
self._config.set(section, option, value)
if flush_file:
self.flush()
def flush(self):
"""Saves the current configuration to disk.
"""
if os.path.exists(self._ini_file):
if not self.__flush_dry_run__:
with open(self._ini_file, "wb") as fid:
self._config.write(fid)
else:
warnings.warn("Could not locate the config file.", RuntimeWarning)
__config__ = __Config("config.ini")
#def __ini_file_name__():
# """Extracts the directory of this module.
# """
# fname = inspect.currentframe() # This module.
# fname = inspect.getfile(fname) # Filename of this module.
# fname = os.path.abspath(fname) # Absolute path of this module.
# fname = os.path.dirname(fname) # Directory of this module.
# if fname[-1] != "/":
# fname = fname + "/" # Should be there, but just in case ...
# fname = fname + __ini_file__ # The ini file.
#
# return fname
#def __load_config__():
# """Loads the configuration settings from the ini file.
# """
# global __config__
# __config__ = ConfigParser.ConfigParser()
#
# fname = __ini_file_name__()
# if os.path.exists(fname):
# try:
# __config__.read(fname)
#
# return True
#
# except ConfigParser.ParsingError:
# warnings.warn("Could not parse the config file.", RuntimeWarning)
# else:
# warnings.warn("Could not locate the config file.", RuntimeWarning)
#
# return False
def get_option(section, option, default=None):
"""Fetches a configuration option from a section of the ini file. If not
found, returns the default value.
Parameters
----------
section : String. The section of the ini file to read from. Try to make the
sections correspond to packages (sans the parsimony prefix), such
that settings for parsimony.algorithms are found in the section
"algorithms", and that parsimony.utils.consts is found in the
section "utils.consts", etc.
option : String. The option to read from the ini file section.
default : Object, but ideally a string. The default value to return if the
section or option doesn't exist.
Examples
--------
>>> import parsimony.config as config
>>>
>>> config.__config__.__flush_dry_run__ = True # Only for the doctests.
>>> config.set_option("test_section", "testing_get", "value")
>>> config.get_option("test_section", "testing_get")
'value'
"""
# if __config__ is None:
# if not __load_config__():
# return default
#
# section = str(section)
# option = str(option)
#
# if not __config__.has_section(section): # Subsumed by the below?
# return default
# if not __config__.has_option(section, option):
# return default
#
# value = __config__.get_option(section, option)
value = __config__.get_option(section, option, default=default)
return value
def get_boolean(section, option, default=False):
"""Fetches a boolean configuration option from a section of the ini file.
If not found, returns the default value False.
Parameters
----------
section : String. The section of the ini file to read from. Try to make the
sections correspond to packages (sans the parsimony prefix), such
that settings for parsimony.algorithms are found in the section
"algorithms", and that parsimony.utils.consts is found in the
section "utils.consts", etc.
option : String. The boolean option to read from the ini file section.
default : Boolean. The default value to return if the section or option
does not exist. Default is False.
Examples
--------
>>> import parsimony.config as config
>>>
>>> config.__config__.__flush_dry_run__ = True # Only for the doctests.
>>> config.set_option("test_section", "testing_get_boolean", "False")
>>> config.get_option("test_section", "testing_get_boolean")
'False'
>>> config.get_boolean("test_section", "testing_get_boolean")
False
>>> config.set_option("test_section", "testing_get_boolean", 0)
>>> config.get_boolean("test_section", "testing_get_boolean")
False
>>> config.set_option("test_section", "testing_get_boolean", 1)
>>> config.get_boolean("test_section", "testing_get_boolean")
True
>>> config.set_option("test_section", "testing_get_boolean", "off")
>>> config.get_boolean("test_section", "testing_get_boolean")
False
>>> config.set_option("test_section", "testing_get_boolean", "on")
>>> config.get_boolean("test_section", "testing_get_boolean")
True
>>> config.set_option("test_section", "testing_get_boolean", "no")
>>> config.get_boolean("test_section", "testing_get_boolean")
False
>>> config.set_option("test_section", "testing_get_boolean", "yes")
>>> config.get_boolean("test_section", "testing_get_boolean")
True
>>> config.get_boolean("test_section", "testing_non_existent", True)
True
"""
# if __config__ is None:
# if not __load_config__():
# return default
#
# section = str(section)
# option = str(option)
#
# if not __config__.has_section(section): # Subsumed by the below?
# return default
# if not __config__.has_option(section, option):
# return default
#
# value = __config__.getboolean(section, option)
value = __config__.get_boolean(section, option, default=default)
return value
def get_float(section, option, default=0.0):
"""Fetches a floating point configuration option from a section of the ini
file. If not found, returns the default value 0.0.
Parameters
----------
section : String. The section of the ini file to read from. Try to make the
sections correspond to packages (sans the parsimony prefix), such
that settings for parsimony.algorithms are found in the section
"algorithms", and that parsimony.utils.consts is found in the
section "utils.consts", etc.
option : String. The floating point option to read from the ini file
section.
default : Float. The default value to return if the section or option does
not exist. Default is 0.0.
Examples
--------
>>> import parsimony.config as config
>>>
>>> config.__config__.__flush_dry_run__ = True # Only for the doctests.
>>> config.set_option("test_section", "testing_get_float", "3.14159265358")
>>> config.get_option("test_section", "testing_get_float")
'3.14159265358'
>>> config.get_float("test_section", "testing_get_float")
3.14159265358
>>> config.get_float("test_section", "testing_non_existent", 2.71828182845)
2.71828182845
"""
# if __config__ is None:
# if not __load_config__():
# return default
#
# section = str(section)
# option = str(option)
#
# if not __config__.has_section(section): # Subsumed by the below?
# return default
# if not __config__.has_option(section, option):
# return default
#
# value = __config__.getfloat(section, option)
value = __config__.get_float(section, option, default=default)
return value
def get_int(section, option, default=0):
"""Fetches an integer configuration option from a section of the ini file.
If not found, returns the default value 0.
Parameters
----------
section : String. The section of the ini file to read from. Try to make the
sections correspond to packages (sans the parsimony prefix), such
that settings for parsimony.algorithms are found in the section
"algorithms", and that parsimony.utils.consts is found in the
section "utils.consts", etc.
option : String. The integer option to read from the ini file section.
default : Integer. The default value to return if the section or option
does not exist. Default is 0.
Examples
--------
>>> import parsimony.config as config
>>>
>>> config.__config__.__flush_dry_run__ = True # Only for the doctests.
>>> config.set_option("test_section", "testing_get_int", "11630")
>>> config.get_option("test_section", "testing_get_int")
'11630'
>>> config.get_int("test_section", "testing_get_int")
11630
>>> config.get_float("test_section", "testing_non_existent", 12407)
12407.0
"""
# if __config__ is None:
# if not __load_config__():
# return default
#
# section = str(section)
# option = str(option)
#
# if not __config__.has_section(section): # Subsumed by the below?
# return default
# if not __config__.has_option(section, option):
# return default
#
# value = __config__.getint(section, option)
value = __config__.get_int(section, option, default=default)
return value
def set_option(section, option, value, flush_file=False):
"""Sets a configuration option.
Parameters
----------
section : String. The section of the ini file to write to. Try to make the
sections correspond to packages, such that settings for
parsimony.algorithms are found in the section algorithms.
option : String. The option to write to the ini file section.
value : String. The value to write to the ini file section.
flush_file : Boolean. If true, saves the current configuration to disk.
Examples
--------
>>> import parsimony.config as config
>>>
>>> config.__config__.__flush_dry_run__ = True # Only for the doctests.
>>> config.set_option("test_section", "testing_set", "Theorem VI")
>>> config.get_option("test_section", "testing_set")
'Theorem VI'
"""
# if __config__ is None:
# __load_config__()
#
# section = str(section)
# option = str(option)
# value = str(value)
#
# if not __config__.has_section(section):
# __config__.add_section(section)
#
# __config__.set_option(section, option, value)
#
# if flush_file:
# flush()
__config__.set_option(section, option, value, flush_file=flush_file)
def flush():
"""Saves the current configuration to disk.
Examples
--------
>>> import parsimony.config as config
>>>
>>> config.__config__.__flush_dry_run__ = True # Only for the doctests.
>>> config.set_option("test_section", "testing_flush", "243000000")
>>> config.flush()
"""
# if __config__ is None:
# if not __load_config__():
# return # Nothing to save.
#
# fname = __ini_file_name__()
#
# if os.path.exists(fname):
# if not __flush_dry_run__:
# with open(fname, "wb") as fid:
# __config__.write(fid)
# else:
# warnings.warn("Could not locate the config file.", RuntimeWarning)
__config__.flush()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 32.99596
| 79
| 0.638523
| 2,014
| 16,333
| 4.924032
| 0.107746
| 0.056368
| 0.052637
| 0.048704
| 0.830493
| 0.812443
| 0.782394
| 0.749723
| 0.724009
| 0.685086
| 0
| 0.011456
| 0.257148
| 16,333
| 494
| 80
| 33.062753
| 0.805901
| 0.655177
| 0
| 0.457627
| 0
| 0
| 0.043116
| 0
| 0
| 0
| 0
| 0.002024
| 0
| 1
| 0.127119
| false
| 0.008475
| 0.059322
| 0
| 0.279661
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
2de38955f1b920209949c1223ac002fc2e0043b9
| 47
|
py
|
Python
|
workspace/module/maya-python-2.7/LxMaInterface/maIfMethods/__init__.py
|
no7hings/Lynxi
|
43c745198a714c2e5aca86c6d7a014adeeb9abf7
|
[
"MIT"
] | 2
|
2018-03-06T03:33:55.000Z
|
2019-03-26T03:25:11.000Z
|
workspace/module/maya-python-2.7/LxMaInterface/maIfMethods/__init__.py
|
no7hings/lynxi
|
43c745198a714c2e5aca86c6d7a014adeeb9abf7
|
[
"MIT"
] | null | null | null |
workspace/module/maya-python-2.7/LxMaInterface/maIfMethods/__init__.py
|
no7hings/lynxi
|
43c745198a714c2e5aca86c6d7a014adeeb9abf7
|
[
"MIT"
] | null | null | null |
# coding:utf-8
from ._maIfMtdTreeview import *
| 15.666667
| 31
| 0.765957
| 6
| 47
| 5.833333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02439
| 0.12766
| 47
| 2
| 32
| 23.5
| 0.829268
| 0.255319
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2de67f932686e333502a2eea3414df28c937a989
| 34
|
py
|
Python
|
game_engine/__init__.py
|
dferndz/r2d2
|
b2cb606a82c4ae4c267c7e6fc9d5e236c762b42e
|
[
"MIT"
] | 1
|
2020-09-06T06:29:35.000Z
|
2020-09-06T06:29:35.000Z
|
game_engine/__init__.py
|
dferndz/r2d2
|
b2cb606a82c4ae4c267c7e6fc9d5e236c762b42e
|
[
"MIT"
] | null | null | null |
game_engine/__init__.py
|
dferndz/r2d2
|
b2cb606a82c4ae4c267c7e6fc9d5e236c762b42e
|
[
"MIT"
] | null | null | null |
from game_engine.game import Game
| 17
| 33
| 0.852941
| 6
| 34
| 4.666667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 34
| 1
| 34
| 34
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
933f5d04dfa1ce3f6811c0d4423120f1a390b2d1
| 39
|
py
|
Python
|
terrainbento/derived_models/model_400_basicSa/__init__.py
|
mcflugen/terrainbento
|
1b756477b8a8ab6a8f1275b1b30ec84855c840ea
|
[
"MIT"
] | null | null | null |
terrainbento/derived_models/model_400_basicSa/__init__.py
|
mcflugen/terrainbento
|
1b756477b8a8ab6a8f1275b1b30ec84855c840ea
|
[
"MIT"
] | null | null | null |
terrainbento/derived_models/model_400_basicSa/__init__.py
|
mcflugen/terrainbento
|
1b756477b8a8ab6a8f1275b1b30ec84855c840ea
|
[
"MIT"
] | null | null | null |
from .model_400_basicSa import BasicSa
| 19.5
| 38
| 0.871795
| 6
| 39
| 5.333333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085714
| 0.102564
| 39
| 1
| 39
| 39
| 0.828571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9349391bd7b40f1c0f71f4121061bb4ffd595ac6
| 78
|
py
|
Python
|
tests/test_dp.py
|
slxiao/partition
|
53ff137b26816d64bf002baf269dbf97b601a3ca
|
[
"MIT"
] | 5
|
2019-11-22T08:34:12.000Z
|
2021-09-21T03:18:31.000Z
|
tests/test_dp.py
|
slxiao/partition
|
53ff137b26816d64bf002baf269dbf97b601a3ca
|
[
"MIT"
] | 3
|
2019-12-22T10:28:44.000Z
|
2021-10-09T19:14:31.000Z
|
tests/test_dp.py
|
slxiao/partition
|
53ff137b26816d64bf002baf269dbf97b601a3ca
|
[
"MIT"
] | 1
|
2020-12-01T15:31:30.000Z
|
2020-12-01T15:31:30.000Z
|
from partition import dp
def test_dp(numbers):
assert dp.dp(numbers) == 1
| 19.5
| 30
| 0.717949
| 13
| 78
| 4.230769
| 0.692308
| 0.327273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015625
| 0.179487
| 78
| 4
| 30
| 19.5
| 0.84375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
934d76435db6527ea3e3414daa3afad02f79c9c0
| 23
|
py
|
Python
|
exam_gen/exam/__init__.py
|
rohit507/exam_gen
|
b6e96955e1762fe8063282917fd69df420142cbb
|
[
"Apache-2.0"
] | null | null | null |
exam_gen/exam/__init__.py
|
rohit507/exam_gen
|
b6e96955e1762fe8063282917fd69df420142cbb
|
[
"Apache-2.0"
] | null | null | null |
exam_gen/exam/__init__.py
|
rohit507/exam_gen
|
b6e96955e1762fe8063282917fd69df420142cbb
|
[
"Apache-2.0"
] | null | null | null |
from .base import Exam
| 11.5
| 22
| 0.782609
| 4
| 23
| 4.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 23
| 1
| 23
| 23
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
935b55dca4a21fa43df8252ad4c5b46700ba1164
| 1,319
|
py
|
Python
|
python/tests/generated/api/fieldset/test_copies.py
|
eno-lang/enolib
|
4175f7c1e8246493b6758c29bddc80d20eaf15f7
|
[
"MIT"
] | 17
|
2019-04-15T21:03:37.000Z
|
2022-01-24T11:03:34.000Z
|
python/tests/generated/api/fieldset/test_copies.py
|
eno-lang/enolib
|
4175f7c1e8246493b6758c29bddc80d20eaf15f7
|
[
"MIT"
] | 20
|
2019-03-13T23:23:40.000Z
|
2022-03-29T13:40:57.000Z
|
python/tests/generated/api/fieldset/test_copies.py
|
eno-lang/enolib
|
4175f7c1e8246493b6758c29bddc80d20eaf15f7
|
[
"MIT"
] | 4
|
2019-04-15T21:18:03.000Z
|
2019-09-21T16:18:10.000Z
|
import enolib
def test_querying_four_entries_from_a_fieldset_all_of_them_copied_from_another_fieldset_produces_the_expected_result():
input = ("fieldset:\n"
"1 = 1\n"
"2 = 2\n"
"3 = 3\n"
"4 = 4\n"
"\n"
"copy < fieldset")
output = [entry.required_string_value() for entry in enolib.parse(input).fieldset('copy').entries()]
assert output == ['1', '2', '3', '4']
def test_querying_four_entries_from_a_fieldset_two_of_them_copied_from_another_fieldset_produces_the_expected_result():
input = ("fieldset:\n"
"1 = 1\n"
"2 = 2\n"
"\n"
"copy < fieldset\n"
"3 = 3\n"
"4 = 4")
output = [entry.required_string_value() for entry in enolib.parse(input).fieldset('copy').entries()]
assert output == ['1', '2', '3', '4']
def test_querying_three_entries_from_a_fieldset_one_owned_one_replaced_one_copied_produces_the_expected_result():
input = ("fieldset:\n"
"1 = 1\n"
"2 = 0\n"
"\n"
"copy < fieldset\n"
"2 = 2\n"
"3 = 3")
output = [entry.required_string_value() for entry in enolib.parse(input).fieldset('copy').entries()]
assert output == ['1', '2', '3']
| 32.975
| 119
| 0.56558
| 171
| 1,319
| 4.035088
| 0.239766
| 0.113043
| 0.065217
| 0.086957
| 0.875362
| 0.836232
| 0.811594
| 0.811594
| 0.72029
| 0.72029
| 0
| 0.037433
| 0.29113
| 1,319
| 40
| 120
| 32.975
| 0.700535
| 0
| 0
| 0.677419
| 0
| 0
| 0.144697
| 0
| 0
| 0
| 0
| 0
| 0.096774
| 1
| 0.096774
| false
| 0
| 0.032258
| 0
| 0.129032
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
fad69d287860b7bd12d644265d3cf390818b532d
| 147
|
py
|
Python
|
backend/modules/camera/admin.py
|
crowdbotics-apps/my-new-app-31789
|
c5513ad2df9e73707871e1c10c6768a93690f9a7
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/modules/camera/admin.py
|
crowdbotics-apps/my-new-app-31789
|
c5513ad2df9e73707871e1c10c6768a93690f9a7
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/modules/camera/admin.py
|
crowdbotics-apps/my-new-app-31789
|
c5513ad2df9e73707871e1c10c6768a93690f9a7
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
from django.contrib import admin
from .models import Image
class ImageAdmin(admin.ModelAdmin):
pass
admin.site.register(Image, ImageAdmin)
| 14.7
| 38
| 0.782313
| 19
| 147
| 6.052632
| 0.684211
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 147
| 9
| 39
| 16.333333
| 0.912698
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.2
| 0.4
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
fafd7fa1401fcb8a54d01929463ecea157ec77a7
| 43
|
py
|
Python
|
dis_snek/models/__init__.py
|
Astrea49/dis_snek
|
c899a6f1caa3c2a45323dbe50ed8ed62676be9d6
|
[
"MIT"
] | null | null | null |
dis_snek/models/__init__.py
|
Astrea49/dis_snek
|
c899a6f1caa3c2a45323dbe50ed8ed62676be9d6
|
[
"MIT"
] | null | null | null |
dis_snek/models/__init__.py
|
Astrea49/dis_snek
|
c899a6f1caa3c2a45323dbe50ed8ed62676be9d6
|
[
"MIT"
] | null | null | null |
from .discord import *
from .snek import *
| 14.333333
| 22
| 0.72093
| 6
| 43
| 5.166667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.186047
| 43
| 2
| 23
| 21.5
| 0.885714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4f07c925586465e06a86d3a27f97f950de54ac82
| 233
|
py
|
Python
|
server/lib/python/cartodb_services/cartodb_services/refactor/tools/redis_mock.py
|
digideskio/dataservices-api
|
246ec135dbeaa3f9a52717fdac50a4ab040ce22b
|
[
"BSD-3-Clause"
] | 22
|
2016-03-11T17:33:31.000Z
|
2021-02-22T04:00:43.000Z
|
server/lib/python/cartodb_services/cartodb_services/refactor/tools/redis_mock.py
|
digideskio/dataservices-api
|
246ec135dbeaa3f9a52717fdac50a4ab040ce22b
|
[
"BSD-3-Clause"
] | 338
|
2016-02-16T16:13:13.000Z
|
2022-03-30T15:50:17.000Z
|
server/lib/python/cartodb_services/cartodb_services/refactor/tools/redis_mock.py
|
CartoDB/dataservices-api
|
d0f28cc002ef11df9f371d5d1fd2d0901c245f97
|
[
"BSD-3-Clause"
] | 14
|
2016-09-22T15:29:33.000Z
|
2021-02-08T03:46:40.000Z
|
class RedisConnectionMock(object):
""" Simple class to mock a dummy behaviour for Redis related functions """
def zscore(self, redis_prefix, day):
pass
def zincrby(self, redis_prefix, day, amount):
pass
| 25.888889
| 78
| 0.67382
| 29
| 233
| 5.344828
| 0.724138
| 0.116129
| 0.193548
| 0.232258
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.240343
| 233
| 8
| 79
| 29.125
| 0.875706
| 0.283262
| 0
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0.4
| 0
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
35703a4d7b8370977c06bb5bc60ba4db0e736a50
| 74
|
py
|
Python
|
steam/__init__.py
|
danielsuo/steam
|
1ff5efdddb3c464dcfedabc9c98e1a54be52850d
|
[
"MIT"
] | null | null | null |
steam/__init__.py
|
danielsuo/steam
|
1ff5efdddb3c464dcfedabc9c98e1a54be52850d
|
[
"MIT"
] | null | null | null |
steam/__init__.py
|
danielsuo/steam
|
1ff5efdddb3c464dcfedabc9c98e1a54be52850d
|
[
"MIT"
] | null | null | null |
from .constants import *
from .helmholtz import *
from .property import *
| 18.5
| 24
| 0.756757
| 9
| 74
| 6.222222
| 0.555556
| 0.357143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.162162
| 74
| 3
| 25
| 24.666667
| 0.903226
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ea574f4ffa33cd9df1b11f3d894535ca2c4afab7
| 112
|
py
|
Python
|
okl4_kernel/okl4_2.1.1-patch.9/tools/magpie-parsers/src/magpieparsers/gnuc/preprocessorinfochannel.py
|
CyberQueenMara/baseband-research
|
e1605537e10c37e161fff1a3416b908c9894f204
|
[
"MIT"
] | 77
|
2018-12-31T22:12:09.000Z
|
2021-12-31T22:56:13.000Z
|
okl4_kernel/okl4_2.1.1-patch.9/tools/magpie-parsers/src/magpieparsers/gnuc/preprocessorinfochannel.py
|
CyberQueenMara/baseband-research
|
e1605537e10c37e161fff1a3416b908c9894f204
|
[
"MIT"
] | null | null | null |
okl4_kernel/okl4_2.1.1-patch.9/tools/magpie-parsers/src/magpieparsers/gnuc/preprocessorinfochannel.py
|
CyberQueenMara/baseband-research
|
e1605537e10c37e161fff1a3416b908c9894f204
|
[
"MIT"
] | 24
|
2019-01-20T15:51:52.000Z
|
2021-12-25T18:29:13.000Z
|
# FIXME: Stub
class PreprocessorInfoChannel(object):
def addLineForTokenNumber(self, line, toknum):
pass
| 14
| 47
| 0.758929
| 11
| 112
| 7.727273
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.151786
| 112
| 7
| 48
| 16
| 0.894737
| 0.098214
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 0
| 1
| 0.333333
| false
| 0.333333
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
ea83084b64a6d381e3702e2c27922b3860e439e8
| 8,576
|
py
|
Python
|
tamcolors/tests/tam_tools_tests/tam_text_box_tests.py
|
cmcmarrow/tamcolors
|
65a5f2455bbe35a739b98d14af158c3df7feb786
|
[
"Apache-2.0"
] | 29
|
2020-07-17T23:46:17.000Z
|
2022-02-06T05:36:44.000Z
|
tamcolors/tests/tam_tools_tests/tam_text_box_tests.py
|
sudo-nikhil/tamcolors
|
65a5f2455bbe35a739b98d14af158c3df7feb786
|
[
"Apache-2.0"
] | 42
|
2020-07-25T19:39:52.000Z
|
2021-02-24T01:19:58.000Z
|
tamcolors/tests/tam_tools_tests/tam_text_box_tests.py
|
sudo-nikhil/tamcolors
|
65a5f2455bbe35a739b98d14af158c3df7feb786
|
[
"Apache-2.0"
] | 8
|
2020-07-18T23:02:48.000Z
|
2020-12-30T04:07:35.000Z
|
# built in libraries
import unittest.mock
# tamcolors libraries
from tamcolors import tam_io
from tamcolors import tam_tools
from tamcolors.tam_io.tam_colors import *
class TAMTextBoxTests(unittest.TestCase):
def test_tam_text_box_init(self):
text_box = tam_tools.tam_text_box.TAMTextBox("hello world!", 20, 34, "#", YELLOW, PURPLE)
self.assertIsInstance(text_box, tam_tools.tam_text_box.TAMTextBox)
def test_tam_text_box_str(self):
text_box = tam_tools.tam_text_box.TAMTextBox("hello world!", 20, 34, "#", YELLOW, PURPLE)
self.assertEqual(str(text_box), "hello world!")
def test_tam_text_box_str_2(self):
text_box = tam_tools.tam_text_box.TAMTextBox("cat world!\n123", 20, 34, "#", YELLOW, PURPLE)
self.assertEqual(str(text_box), "cat world!\n123")
def test_update(self):
text_box = tam_tools.tam_text_box.TAMTextBox("", 20, 15, "#", YELLOW, PURPLE)
surface = tam_io.tam_surface.TAMSurface(20, 15, " ", YELLOW, PURPLE)
surface2 = tam_io.tam_surface.TAMSurface(20, 15, "@", RED, GREEN)
text_box.draw(surface2)
for i in range(20):
surface.set_spot(i, 0, "#", YELLOW, PURPLE)
surface.set_spot(i, 14, "#", YELLOW, PURPLE)
for i in range(1, 15):
surface.set_spot(0, i, "#", YELLOW, PURPLE)
surface.set_spot(19, i, "#", YELLOW, PURPLE)
text_box.update()
text_box.draw(surface2)
self.assertEqual(surface, surface2)
def test_draw(self):
text_box = tam_tools.tam_text_box.TAMTextBox("hello world!", 20, 15, "#", YELLOW, PURPLE)
surface = tam_io.tam_surface.TAMSurface(20, 15, " ", YELLOW, PURPLE)
surface2 = tam_io.tam_surface.TAMSurface(20, 15, "@", RED, GREEN)
text_box.draw(surface2)
for i in range(20):
surface.set_spot(i, 0, "#", YELLOW, PURPLE)
surface.set_spot(i, 14, "#", YELLOW, PURPLE)
for i in range(1, 15):
surface.set_spot(0, i, "#", YELLOW, PURPLE)
surface.set_spot(19, i, "#", YELLOW, PURPLE)
for spot, char in enumerate("hello world!"):
surface.set_spot(2 + spot, 7, char, YELLOW, PURPLE)
text_box.update()
text_box.draw(surface2)
self.assertEqual(surface, surface2)
def test_draw_2(self):
text_box = tam_tools.tam_text_box.TAMTextBox("hello world!", 20, 15, "#", YELLOW, PURPLE, clock=1)
surface = tam_io.tam_surface.TAMSurface(20, 15, " ", YELLOW, PURPLE)
surface2 = tam_io.tam_surface.TAMSurface(20, 15, "@", RED, GREEN)
text_box.draw(surface2)
self.assertEqual(surface, surface2)
for i in range(20):
surface.set_spot(i, 0, "#", YELLOW, PURPLE)
surface.set_spot(i, 14, "#", YELLOW, PURPLE)
for i in range(1, 15):
surface.set_spot(0, i, "#", YELLOW, PURPLE)
surface.set_spot(19, i, "#", YELLOW, PURPLE)
text_box.update()
text_box.draw(surface2)
self.assertEqual(surface, surface2)
for spot, char in enumerate("hello world!"):
surface.set_spot(2 + spot, 7, char, YELLOW, PURPLE)
text_box.update()
text_box.draw(surface2)
self.assertEqual(surface, surface2)
text_box.update()
text_box.draw(surface2)
self.assertEqual(surface, surface2)
def test_draw_3(self):
text_box = tam_tools.tam_text_box.TAMTextBox("hello world!\ncats\n1\n\nhi",
19,
16,
"#",
RED,
GREEN,
center_vertical=False,
center_horizontal=True,
vertical_space=2,
vertical_start=3,
char_background="%")
surface = tam_io.tam_surface.TAMSurface(19, 16, "%", RED, GREEN)
surface2 = tam_io.tam_surface.TAMSurface(19, 16, "@", YELLOW, BLUE)
for i in range(19):
surface.set_spot(i, 0, "#", RED, GREEN)
surface.set_spot(i, 15, "#", RED, GREEN)
for i in range(1, 16):
surface.set_spot(0, i, "#", RED, GREEN)
surface.set_spot(18, i, "#", RED, GREEN)
for spot, char in enumerate("hello world!"):
surface.set_spot(3 + spot, 3, char, RED, GREEN)
for spot, char in enumerate("cats"):
surface.set_spot(7 + spot, 5, char, RED, GREEN)
surface.set_spot(9, 7, "1", RED, GREEN)
for spot, char in enumerate("hi"):
surface.set_spot(8 + spot, 11, char, RED, GREEN)
text_box.update()
text_box.draw(surface2)
self.assertEqual(surface, surface2)
def test_done(self):
text_box = tam_tools.tam_text_box.TAMTextBox("hello world!", 20, 34, "#", YELLOW, PURPLE)
self.assertTrue(text_box.done())
text_box.update()
self.assertTrue(text_box.done())
def test_done_2(self):
text_box = tam_tools.tam_text_box.TAMTextBox("hello world!", 20, 34, "#", YELLOW, PURPLE, clock=1)
for _ in range(14):
self.assertFalse(text_box.done())
text_box.update()
self.assertTrue(text_box.done())
def test_set_colors(self):
text_box = tam_tools.tam_text_box.TAMTextBox("hello world!", 20, 34, "#", YELLOW, PURPLE)
self.assertTrue(text_box.done())
text_box.update()
self.assertTrue(text_box.done())
text_box.set_colors(BLUE, AQUA)
self.assertTrue(text_box.done())
def test_set_colors_2(self):
text_box = tam_tools.tam_text_box.TAMTextBox("hello world!", 20, 34, "#", YELLOW, PURPLE, clock=1)
for _ in range(14):
self.assertFalse(text_box.done())
text_box.update()
self.assertTrue(text_box.done())
text_box.set_colors(BLUE, AQUA)
self.assertTrue(text_box.done())
def test_set_colors_3(self):
text_box = tam_tools.tam_text_box.TAMTextBox("hello world!", 20, 34, "#", YELLOW, PURPLE, clock=1)
for _ in range(13):
self.assertFalse(text_box.done())
text_box.update()
self.assertFalse(text_box.done())
text_box.set_colors(BLUE, AQUA)
self.assertFalse(text_box.done())
def test_get_colors(self):
text_box = tam_tools.tam_text_box.TAMTextBox("hello world!", 20, 34, "#", YELLOW, PURPLE)
self.assertEqual(text_box.get_colors(), (YELLOW, PURPLE))
def test_get_colors_2(self):
text_box = tam_tools.tam_text_box.TAMTextBox("hello world!", 20, 34, "#", AQUA, RED)
self.assertEqual(text_box.get_colors(), (AQUA, RED))
def test_set_char(self):
text_box = tam_tools.tam_text_box.TAMTextBox("hello world!", 20, 34, "^", YELLOW, PURPLE)
text_box.set_char("#")
self.assertEqual(text_box.get_char(), "#")
def test_set_char_2(self):
text_box = tam_tools.tam_text_box.TAMTextBox("hello world!", 20, 34, "@", AQUA, RED)
text_box.set_char("$")
self.assertEqual(text_box.get_char(), "$")
def test_get_char(self):
text_box = tam_tools.tam_text_box.TAMTextBox("hello world!", 20, 34, "#", YELLOW, PURPLE)
self.assertEqual(text_box.get_char(), "#")
def test_get_char_2(self):
text_box = tam_tools.tam_text_box.TAMTextBox("hello world!", 20, 34, "!", AQUA, RED)
self.assertEqual(text_box.get_char(), "!")
def test_get_text(self):
text_box = tam_tools.tam_text_box.TAMTextBox("hello world!", 20, 34, "#", YELLOW, PURPLE)
self.assertEqual(text_box.get_text(), "hello world!")
def test_get_text_2(self):
text_box = tam_tools.tam_text_box.TAMTextBox("cat world!\n123", 20, 34, "#", YELLOW, PURPLE)
self.assertEqual(text_box.get_text(), "cat world!\n123")
def test_get_dimensions(self):
text_box = tam_tools.tam_text_box.TAMTextBox("hello world!", 20, 34, "#", YELLOW, PURPLE)
self.assertEqual(text_box.get_dimensions(), (20, 34))
def test_get_dimensions_2(self):
text_box = tam_tools.tam_text_box.TAMTextBox("hello world!", 4, 3, "#", YELLOW, PURPLE)
self.assertEqual(text_box.get_dimensions(), (4, 3))
| 40.45283
| 106
| 0.585938
| 1,104
| 8,576
| 4.314312
| 0.076993
| 0.145497
| 0.054587
| 0.072433
| 0.881377
| 0.840647
| 0.82658
| 0.797817
| 0.778501
| 0.748898
| 0
| 0.038593
| 0.2809
| 8,576
| 211
| 107
| 40.64455
| 0.733744
| 0.004431
| 0
| 0.546012
| 0
| 0
| 0.049561
| 0.00246
| 0
| 0
| 0
| 0
| 0.202454
| 1
| 0.134969
| false
| 0
| 0.02454
| 0
| 0.165644
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ea9ccb460241bd5528e300962cfc2d963a9e9eb7
| 245
|
py
|
Python
|
rbac/ldap/daoex.py
|
shawnmckinney/py-fortress
|
ead12bf9b7e37e923c42ccdadd8fd3c5adf027cf
|
[
"Apache-2.0"
] | 16
|
2018-03-19T02:19:01.000Z
|
2021-12-30T15:24:40.000Z
|
rbac/ldap/daoex.py
|
shawnmckinney/py-fortress
|
ead12bf9b7e37e923c42ccdadd8fd3c5adf027cf
|
[
"Apache-2.0"
] | 1
|
2021-12-18T16:46:04.000Z
|
2021-12-18T16:46:04.000Z
|
rbac/ldap/daoex.py
|
shawnmckinney/py-fortress
|
ead12bf9b7e37e923c42ccdadd8fd3c5adf027cf
|
[
"Apache-2.0"
] | 2
|
2018-03-14T21:48:43.000Z
|
2018-03-19T03:25:40.000Z
|
'''
@copyright: 2022 - Symas Corporation
'''
from rbac.util.fortress_error import RbacError
class LdapException(RbacError):
pass
class NotFound(RbacError):
pass
class NotUnique(RbacError):
pass
class InvalidCredentials(RbacError):
pass
| 13.611111
| 46
| 0.77551
| 27
| 245
| 7
| 0.62963
| 0.275132
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018779
| 0.130612
| 245
| 17
| 47
| 14.411765
| 0.868545
| 0.146939
| 0
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.444444
| 0.111111
| 0
| 0.555556
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
57a298a146c80952a9429f88a91bcd6576ae486d
| 93
|
py
|
Python
|
src/plotshapes/__init__.py
|
sarang-IITKgp/plot-shapes
|
33aff54515eabd55afe42bf0091395dc3e6e6829
|
[
"BSD-3-Clause"
] | null | null | null |
src/plotshapes/__init__.py
|
sarang-IITKgp/plot-shapes
|
33aff54515eabd55afe42bf0091395dc3e6e6829
|
[
"BSD-3-Clause"
] | null | null | null |
src/plotshapes/__init__.py
|
sarang-IITKgp/plot-shapes
|
33aff54515eabd55afe42bf0091395dc3e6e6829
|
[
"BSD-3-Clause"
] | null | null | null |
import plotshapes.conic_sections
import plotshapes.quadrilateral
import plotshapes.transform
| 23.25
| 32
| 0.903226
| 10
| 93
| 8.3
| 0.6
| 0.578313
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.064516
| 93
| 3
| 33
| 31
| 0.954023
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
57d8cf08100902923b5e2e85d8e5d3e727d014e8
| 4,165
|
py
|
Python
|
tests/functional/saltenv/ops/test_func_pin_current_version.py
|
eitrtechnologies/saltenv
|
66add964657fe270ed96ddfe50802e27539a6526
|
[
"Apache-2.0"
] | 5
|
2022-03-25T17:15:04.000Z
|
2022-03-28T23:24:26.000Z
|
tests/functional/saltenv/ops/test_func_pin_current_version.py
|
eitrtechnologies/saltenv
|
66add964657fe270ed96ddfe50802e27539a6526
|
[
"Apache-2.0"
] | null | null | null |
tests/functional/saltenv/ops/test_func_pin_current_version.py
|
eitrtechnologies/saltenv
|
66add964657fe270ed96ddfe50802e27539a6526
|
[
"Apache-2.0"
] | 2
|
2022-03-26T06:33:30.000Z
|
2022-03-29T19:43:50.000Z
|
from unittest.mock import patch
async def test_func_pin_current_version_no_active_version(mock_hub, hub, tmp_path):
"""
SCENARIO #1:
- There is no active version
"""
# Link the function to the mock_hub
mock_hub.saltenv.ops.pin_current_version = hub.saltenv.ops.pin_current_version
# Mock the return of get_current_version to be ("",""), meaning that
# there is no active version
mock_hub.saltenv.ops.get_current_version.return_value = ("", "")
# Check that pin_current_version return False AND
# that the override_version_file version IS NOT CHANGED
with patch("os.getcwd") as mocked_override_dir:
# Set up the mocked_override_file
mocked_override_dir.return_value = tmp_path
mocked_override_dir.mkdir()
mocked_override_file = tmp_path / ".salt-version"
existing_override_version = "3004"
mocked_override_file.write_text("3004")
# Confirm the return is False
expected = False
actual = await mock_hub.saltenv.ops.pin_current_version()
actual == expected
# Confirm that the mocked_override_file is unchanged
assert mocked_override_file.read_text() == existing_override_version
async def test_func_pin_current_version_active_version_matches_override(mock_hub, hub, tmp_path):
"""
SCENARIO #2:
- There is an active version
- The active version matches the version in the override file.
"""
# Link the function to the mock_hub
mock_hub.saltenv.ops.pin_current_version = hub.saltenv.ops.pin_current_version
# Mock the return of get_current_version to be ("3004", tmp_path/version), where
# tmp_path/version is the main version file. The main version file would have 3004 as its value.
existing_override_version = "3004"
mock_hub.saltenv.ops.get_current_version.return_value = (
existing_override_version,
str(tmp_path / "version"),
)
# Check that pin_current_version return True AND that the
# override_version_file version IS NOT CHANGED
with patch("os.getcwd") as mocked_override_dir:
# Set up the mocked_override_file
mocked_override_dir.return_value = tmp_path
mocked_override_dir.mkdir()
mocked_override_file = tmp_path / ".salt-version"
mocked_override_file.write_text(existing_override_version)
# Confirm the return is True
expected = True
actual = await mock_hub.saltenv.ops.pin_current_version()
assert actual == expected
# Confirm that the mocked_override_file is unchanged
assert mocked_override_file.read_text() == existing_override_version
async def test_func_pin_current_version_active_version_does_not_match_override(
mock_hub, hub, tmp_path
):
"""
SCENARIO #3:
- There is an active version
- The active version does not match the version in the override file.
"""
# Link the function to the mock_hub
mock_hub.saltenv.ops.pin_current_version = hub.saltenv.ops.pin_current_version
# Mock the return of get_current_version to be ("3004", tmp_path/version), where
# tmp_path/version is the main version file. The main version file would have 3004 as its value.
updated_override_version = "3004"
mock_hub.saltenv.ops.get_current_version.return_value = (
updated_override_version,
str(tmp_path / "version"),
)
# Check that pin_current_version return True AND that the
# override_version_file version IS CHANGED
with patch("os.getcwd") as mocked_override_dir:
# Set up the mocked_override_file
mocked_override_dir.return_value = tmp_path
mocked_override_dir.mkdir()
mocked_override_file = tmp_path / ".salt-version"
existing_override_version = "3003"
mocked_override_file.write_text(existing_override_version)
# Confirm the return is True
expected = True
actual = await mock_hub.saltenv.ops.pin_current_version()
assert actual == expected
# Confirm that the mocked_override_file is unchanged
assert mocked_override_file.read_text() == updated_override_version
| 39.292453
| 100
| 0.718607
| 569
| 4,165
| 4.945518
| 0.137083
| 0.119403
| 0.090618
| 0.054371
| 0.943141
| 0.904051
| 0.883795
| 0.848614
| 0.821606
| 0.789623
| 0
| 0.01193
| 0.215126
| 4,165
| 105
| 101
| 39.666667
| 0.848883
| 0.281873
| 0
| 0.653061
| 0
| 0
| 0.03785
| 0
| 0
| 0
| 0
| 0
| 0.102041
| 1
| 0
| false
| 0
| 0.020408
| 0
| 0.020408
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
17cd989d97fa54e84249c7d4c7bc186aca87ed41
| 35
|
py
|
Python
|
nodebox/sound/__init__.py
|
pepsipepsi/nodebox_opengl_python3
|
cfb2633df1055a028672b11311603cc2241a1378
|
[
"BSD-3-Clause"
] | 1
|
2017-03-19T16:56:46.000Z
|
2017-03-19T16:56:46.000Z
|
nodebox/sound/__init__.py
|
pepsipepsi/nodebox_opengl_python3
|
cfb2633df1055a028672b11311603cc2241a1378
|
[
"BSD-3-Clause"
] | null | null | null |
nodebox/sound/__init__.py
|
pepsipepsi/nodebox_opengl_python3
|
cfb2633df1055a028672b11311603cc2241a1378
|
[
"BSD-3-Clause"
] | null | null | null |
from nodebox.sound.process import *
| 35
| 35
| 0.828571
| 5
| 35
| 5.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085714
| 35
| 1
| 35
| 35
| 0.90625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
17cf54093d3cb7ed30cf7d05f27c647af87fcb1b
| 74
|
py
|
Python
|
pycrtsh/__init__.py
|
Te-k/crtshy
|
cf3c1d6a4ee9ec1e102e2e0cb730eca8625cb2c8
|
[
"MIT"
] | 22
|
2017-10-31T21:07:48.000Z
|
2022-03-30T02:15:36.000Z
|
pycrtsh/__init__.py
|
Te-k/crtshy
|
cf3c1d6a4ee9ec1e102e2e0cb730eca8625cb2c8
|
[
"MIT"
] | 11
|
2019-07-12T17:15:36.000Z
|
2022-01-07T15:57:55.000Z
|
pycrtsh/__init__.py
|
Te-k/crtshy
|
cf3c1d6a4ee9ec1e102e2e0cb730eca8625cb2c8
|
[
"MIT"
] | 10
|
2019-07-11T12:33:29.000Z
|
2021-07-20T08:18:10.000Z
|
from .api import Crtsh, CrtshInvalidRequestType, CrtshCertificateNotFound
| 37
| 73
| 0.878378
| 6
| 74
| 10.833333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081081
| 74
| 1
| 74
| 74
| 0.955882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
aa09ece5b1c35b347bccc6ea4782f26b067f9620
| 131
|
py
|
Python
|
anyway/widgets/all_locations_widgets/__init__.py
|
shaniwein/anyway
|
dcd13bf7dc4a120f4d697ab0c08b906f43eea52e
|
[
"MIT"
] | 1
|
2022-01-19T18:23:03.000Z
|
2022-01-19T18:23:03.000Z
|
anyway/widgets/all_locations_widgets/__init__.py
|
shaniwein/anyway
|
dcd13bf7dc4a120f4d697ab0c08b906f43eea52e
|
[
"MIT"
] | null | null | null |
anyway/widgets/all_locations_widgets/__init__.py
|
shaniwein/anyway
|
dcd13bf7dc4a120f4d697ab0c08b906f43eea52e
|
[
"MIT"
] | null | null | null |
from . import (
accident_count_by_severity_widget,
most_severe_accidents_widget,
most_severe_accidents_table_widget,
)
| 21.833333
| 39
| 0.801527
| 16
| 131
| 5.875
| 0.6875
| 0.212766
| 0.340426
| 0.531915
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.152672
| 131
| 5
| 40
| 26.2
| 0.846847
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.2
| 0
| 0.2
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a4c1080a48cc641cbb576553dedba1fdfdbcd416
| 262
|
py
|
Python
|
kanoodlegenius2d/app.py
|
wkeeling/kanoodlegenius2d
|
11d18f3809213cc4d80e56cbcab3e418fc39b365
|
[
"MIT"
] | null | null | null |
kanoodlegenius2d/app.py
|
wkeeling/kanoodlegenius2d
|
11d18f3809213cc4d80e56cbcab3e418fc39b365
|
[
"MIT"
] | null | null | null |
kanoodlegenius2d/app.py
|
wkeeling/kanoodlegenius2d
|
11d18f3809213cc4d80e56cbcab3e418fc39b365
|
[
"MIT"
] | null | null | null |
from kanoodlegenius2d.domain import models
from kanoodlegenius2d.ui import fonts, settings
from kanoodlegenius2d.ui.masterscreen import MasterScreen
def main():
fonts.initialise()
settings.initialise()
models.initialise()
return MasterScreen()
| 23.818182
| 57
| 0.782443
| 27
| 262
| 7.592593
| 0.481481
| 0.292683
| 0.214634
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013393
| 0.145038
| 262
| 10
| 58
| 26.2
| 0.901786
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| true
| 0
| 0.375
| 0
| 0.625
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a4eadc00c01e5d4823704e7c3ad76f76e7a725f3
| 7,841
|
py
|
Python
|
z2/part2/interactive/jm/random_normal_1/637218507.py
|
kozakusek/ipp-2020-testy
|
09aa008fa53d159672cc7cbf969a6b237e15a7b8
|
[
"MIT"
] | 1
|
2020-04-16T12:13:47.000Z
|
2020-04-16T12:13:47.000Z
|
z2/part2/interactive/jm/random_normal_1/637218507.py
|
kozakusek/ipp-2020-testy
|
09aa008fa53d159672cc7cbf969a6b237e15a7b8
|
[
"MIT"
] | 18
|
2020-03-06T17:50:15.000Z
|
2020-05-19T14:58:30.000Z
|
z2/part2/interactive/jm/random_normal_1/637218507.py
|
kozakusek/ipp-2020-testy
|
09aa008fa53d159672cc7cbf969a6b237e15a7b8
|
[
"MIT"
] | 18
|
2020-03-06T17:45:13.000Z
|
2020-06-09T19:18:31.000Z
|
from part1 import (
gamma_board,
gamma_busy_fields,
gamma_delete,
gamma_free_fields,
gamma_golden_move,
gamma_golden_possible,
gamma_move,
gamma_new,
)
"""
scenario: test_random_actions
uuid: 637218507
"""
"""
random actions, total chaos
"""
board = gamma_new(6, 7, 5, 10)
assert board is not None
assert gamma_move(board, 1, 2, 5) == 1
assert gamma_move(board, 1, 1, 0) == 1
board508740979 = gamma_board(board)
assert board508740979 is not None
assert board508740979 == ("......\n"
"..1...\n"
"......\n"
"......\n"
"......\n"
"......\n"
".1....\n")
del board508740979
board508740979 = None
assert gamma_move(board, 3, 4, 2) == 1
assert gamma_move(board, 4, 3, 2) == 1
assert gamma_move(board, 5, 6, 4) == 0
assert gamma_busy_fields(board, 5) == 0
assert gamma_move(board, 1, 2, 3) == 1
assert gamma_move(board, 2, 5, 3) == 1
assert gamma_move(board, 2, 4, 1) == 1
assert gamma_free_fields(board, 2) == 35
assert gamma_move(board, 3, 1, 5) == 1
assert gamma_move(board, 3, 1, 1) == 1
assert gamma_move(board, 4, 4, 3) == 1
board109190569 = gamma_board(board)
assert board109190569 is not None
assert board109190569 == ("......\n"
".31...\n"
"......\n"
"..1.42\n"
"...43.\n"
".3..2.\n"
".1....\n")
del board109190569
board109190569 = None
assert gamma_move(board, 5, 0, 2) == 1
assert gamma_busy_fields(board, 5) == 1
assert gamma_move(board, 1, 6, 0) == 0
assert gamma_move(board, 1, 0, 1) == 1
assert gamma_move(board, 2, 5, 3) == 0
assert gamma_move(board, 2, 3, 4) == 1
assert gamma_busy_fields(board, 2) == 3
assert gamma_move(board, 3, 2, 5) == 0
assert gamma_golden_possible(board, 3) == 1
assert gamma_move(board, 4, 2, 4) == 1
assert gamma_move(board, 5, 4, 0) == 1
assert gamma_move(board, 5, 0, 5) == 1
assert gamma_move(board, 2, 2, 2) == 1
assert gamma_move(board, 3, 4, 0) == 0
assert gamma_move(board, 3, 0, 6) == 1
assert gamma_busy_fields(board, 3) == 4
assert gamma_move(board, 4, 0, 5) == 0
assert gamma_move(board, 4, 4, 0) == 0
board837255014 = gamma_board(board)
assert board837255014 is not None
assert board837255014 == ("3.....\n"
"531...\n"
"..42..\n"
"..1.42\n"
"5.243.\n"
"13..2.\n"
".1..5.\n")
del board837255014
board837255014 = None
assert gamma_move(board, 5, 4, 4) == 1
assert gamma_move(board, 5, 5, 5) == 1
assert gamma_golden_possible(board, 1) == 1
assert gamma_golden_move(board, 1, 5, 5) == 1
assert gamma_move(board, 2, 5, 1) == 1
assert gamma_busy_fields(board, 2) == 5
assert gamma_move(board, 3, 6, 2) == 0
assert gamma_move(board, 3, 4, 2) == 0
assert gamma_move(board, 4, 3, 3) == 1
assert gamma_move(board, 5, 6, 3) == 0
assert gamma_move(board, 5, 3, 4) == 0
assert gamma_move(board, 1, 0, 4) == 1
assert gamma_golden_move(board, 1, 0, 4) == 0
assert gamma_move(board, 3, 3, 1) == 1
assert gamma_free_fields(board, 3) == 18
assert gamma_move(board, 4, 6, 4) == 0
assert gamma_move(board, 4, 4, 1) == 0
assert gamma_move(board, 5, 2, 1) == 1
assert gamma_golden_possible(board, 5) == 1
assert gamma_move(board, 1, 5, 4) == 1
assert gamma_move(board, 2, 0, 0) == 1
assert gamma_move(board, 2, 4, 4) == 0
assert gamma_golden_possible(board, 2) == 1
assert gamma_move(board, 3, 6, 3) == 0
assert gamma_move(board, 3, 3, 4) == 0
assert gamma_golden_move(board, 3, 1, 4) == 0
assert gamma_move(board, 4, 2, 1) == 0
assert gamma_move(board, 4, 1, 6) == 1
assert gamma_free_fields(board, 4) == 14
assert gamma_move(board, 5, 5, 4) == 0
assert gamma_free_fields(board, 5) == 14
assert gamma_move(board, 1, 0, 0) == 0
assert gamma_move(board, 2, 0, 1) == 0
assert gamma_move(board, 2, 5, 1) == 0
assert gamma_free_fields(board, 2) == 14
assert gamma_move(board, 3, 0, 4) == 0
assert gamma_move(board, 3, 1, 0) == 0
board815442504 = gamma_board(board)
assert board815442504 is not None
assert board815442504 == ("34....\n"
"531..1\n"
"1.4251\n"
"..1442\n"
"5.243.\n"
"135322\n"
"21..5.\n")
del board815442504
board815442504 = None
assert gamma_move(board, 4, 2, 6) == 1
assert gamma_move(board, 5, 0, 3) == 1
assert gamma_move(board, 5, 4, 6) == 1
assert gamma_golden_possible(board, 5) == 1
assert gamma_move(board, 1, 3, 1) == 0
assert gamma_move(board, 1, 3, 5) == 1
assert gamma_golden_possible(board, 1) == 0
assert gamma_move(board, 2, 1, 4) == 1
assert gamma_move(board, 2, 1, 2) == 1
assert gamma_golden_move(board, 2, 4, 2) == 1
assert gamma_move(board, 3, 2, 5) == 0
assert gamma_move(board, 5, 3, 1) == 0
assert gamma_move(board, 5, 1, 6) == 0
assert gamma_move(board, 1, 4, 3) == 0
assert gamma_move(board, 1, 4, 6) == 0
assert gamma_move(board, 2, 3, 1) == 0
assert gamma_move(board, 3, 3, 5) == 0
assert gamma_golden_possible(board, 3) == 1
assert gamma_golden_move(board, 3, 4, 4) == 1
assert gamma_move(board, 4, 2, 5) == 0
assert gamma_golden_possible(board, 4) == 1
assert gamma_move(board, 1, 3, 1) == 0
assert gamma_move(board, 1, 0, 0) == 0
assert gamma_free_fields(board, 1) == 8
assert gamma_golden_move(board, 1, 2, 2) == 0
assert gamma_move(board, 2, 0, 3) == 0
assert gamma_move(board, 2, 1, 5) == 0
assert gamma_move(board, 3, 2, 3) == 0
assert gamma_move(board, 4, 6, 3) == 0
assert gamma_golden_possible(board, 4) == 1
assert gamma_move(board, 5, 1, 2) == 0
assert gamma_move(board, 1, 3, 0) == 1
assert gamma_move(board, 1, 2, 3) == 0
assert gamma_move(board, 2, 0, 5) == 0
assert gamma_move(board, 3, 5, 2) == 1
assert gamma_move(board, 3, 4, 6) == 0
assert gamma_move(board, 4, 4, 1) == 0
assert gamma_golden_move(board, 4, 1, 0) == 1
assert gamma_move(board, 5, 5, 4) == 0
assert gamma_move(board, 5, 5, 6) == 1
assert gamma_move(board, 1, 0, 2) == 0
assert gamma_move(board, 1, 3, 4) == 0
assert gamma_move(board, 2, 6, 3) == 0
assert gamma_move(board, 3, 0, 2) == 0
assert gamma_move(board, 3, 3, 2) == 0
assert gamma_golden_possible(board, 3) == 0
board721229061 = gamma_board(board)
assert board721229061 is not None
assert board721229061 == ("344.55\n"
"5311.1\n"
"124231\n"
"5.1442\n"
"522423\n"
"135322\n"
"24.15.\n")
del board721229061
board721229061 = None
assert gamma_move(board, 4, 0, 0) == 0
assert gamma_move(board, 4, 1, 1) == 0
assert gamma_move(board, 5, 0, 5) == 0
assert gamma_move(board, 1, 0, 5) == 0
assert gamma_move(board, 2, 3, 2) == 0
assert gamma_golden_move(board, 2, 5, 3) == 0
assert gamma_move(board, 3, 3, 1) == 0
assert gamma_move(board, 4, 5, 4) == 0
assert gamma_busy_fields(board, 4) == 7
assert gamma_move(board, 5, 3, 6) == 1
assert gamma_move(board, 5, 5, 0) == 1
assert gamma_busy_fields(board, 5) == 9
assert gamma_free_fields(board, 5) == 3
assert gamma_golden_move(board, 5, 6, 0) == 0
assert gamma_move(board, 1, 3, 1) == 0
assert gamma_busy_fields(board, 1) == 8
assert gamma_move(board, 2, 3, 1) == 0
assert gamma_free_fields(board, 2) == 3
assert gamma_move(board, 3, 4, 3) == 0
assert gamma_golden_move(board, 3, 2, 4) == 0
assert gamma_move(board, 4, 0, 2) == 0
assert gamma_move(board, 4, 0, 1) == 0
assert gamma_move(board, 5, 5, 4) == 0
assert gamma_golden_move(board, 5, 1, 0) == 1
assert gamma_move(board, 1, 3, 1) == 0
assert gamma_move(board, 1, 0, 4) == 0
assert gamma_move(board, 2, 4, 4) == 0
assert gamma_move(board, 2, 1, 2) == 0
assert gamma_free_fields(board, 2) == 3
assert gamma_move(board, 3, 3, 1) == 0
assert gamma_move(board, 3, 1, 4) == 0
assert gamma_golden_move(board, 3, 6, 1) == 0
assert gamma_move(board, 4, 0, 2) == 0
assert gamma_move(board, 5, 1, 0) == 0
assert gamma_move(board, 1, 5, 4) == 0
assert gamma_move(board, 1, 3, 2) == 0
assert gamma_move(board, 2, 5, 4) == 0
assert gamma_move(board, 2, 4, 5) == 1
assert gamma_move(board, 3, 0, 2) == 0
assert gamma_move(board, 3, 5, 0) == 0
assert gamma_golden_possible(board, 3) == 0
assert gamma_move(board, 4, 0, 2) == 0
gamma_delete(board)
| 31.744939
| 46
| 0.653105
| 1,432
| 7,841
| 3.424581
| 0.044693
| 0.349918
| 0.354812
| 0.473083
| 0.808728
| 0.804853
| 0.6823
| 0.376835
| 0.267537
| 0.259788
| 0
| 0.140775
| 0.177401
| 7,841
| 246
| 47
| 31.873984
| 0.619535
| 0
| 0
| 0.209821
| 0
| 0
| 0.036115
| 0
| 0
| 0
| 0
| 0
| 0.745536
| 1
| 0
| false
| 0
| 0.004464
| 0
| 0.004464
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a4f1b2954bab8eb8dc86f7ef12bbbefe9369af74
| 4,356
|
py
|
Python
|
tests/test_upower.py
|
listuser/jc
|
3ac8d0362b4fb9999fc55a60a9cb20ac80d114f7
|
[
"MIT"
] | 3,215
|
2019-10-24T15:25:56.000Z
|
2022-03-31T15:43:01.000Z
|
tests/test_upower.py
|
listuser/jc
|
3ac8d0362b4fb9999fc55a60a9cb20ac80d114f7
|
[
"MIT"
] | 109
|
2019-11-02T16:22:29.000Z
|
2022-03-30T17:32:17.000Z
|
tests/test_upower.py
|
listuser/jc
|
3ac8d0362b4fb9999fc55a60a9cb20ac80d114f7
|
[
"MIT"
] | 75
|
2020-02-07T00:16:32.000Z
|
2022-03-29T09:29:53.000Z
|
import os
import sys
import time
import json
import unittest
import jc.parsers.upower
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
# Set the timezone on POSIX systems. Need to manually set for Windows tests
if not sys.platform.startswith('win32'):
os.environ['TZ'] = 'America/Los_Angeles'
time.tzset()
class MyTests(unittest.TestCase):
def setUp(self):
# input
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/ubuntu-18.04/upower-i.out'), 'r', encoding='utf-8') as f:
self.ubuntu_18_4_upower_i = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/ubuntu-18.04/upower-d.out'), 'r', encoding='utf-8') as f:
self.ubuntu_18_4_upower_d = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/ubuntu-18.04/upower-d-clocale.out'), 'r', encoding='utf-8') as f:
self.ubuntu_18_4_upower_d_clocale = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/generic/upower-i-utc.out'), 'r', encoding='utf-8') as f:
self.generic_upower_i_utc = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/generic/upower-i-non-utc.out'), 'r', encoding='utf-8') as f:
self.generic_upower_i_non_utc = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/generic/upower-i-c-locale.out'), 'r', encoding='utf-8') as f:
self.generic_upower_i_c_locale = f.read()
# output
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/ubuntu-18.04/upower-i.json'), 'r', encoding='utf-8') as f:
self.ubuntu_18_4_upower_i_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/ubuntu-18.04/upower-d.json'), 'r', encoding='utf-8') as f:
self.ubuntu_18_4_upower_d_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/ubuntu-18.04/upower-d-clocale.json'), 'r', encoding='utf-8') as f:
self.ubuntu_18_4_upower_d_clocale_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/generic/upower-i-utc.json'), 'r', encoding='utf-8') as f:
self.generic_upower_i_utc_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/generic/upower-i-non-utc.json'), 'r', encoding='utf-8') as f:
self.generic_upower_i_non_utc_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/generic/upower-i-c-locale.json'), 'r', encoding='utf-8') as f:
self.generic_upower_i_c_locale_json = json.loads(f.read())
def test_upower_nodata(self):
"""
Test 'upower' with no data
"""
self.assertEqual(jc.parsers.upower.parse('', quiet=True), [])
def test_upower_i_ubuntu_18_4(self):
"""
Test 'upower -i' on Ubuntu 18.4
"""
self.assertEqual(jc.parsers.upower.parse(self.ubuntu_18_4_upower_i, quiet=True), self.ubuntu_18_4_upower_i_json)
def test_upower_d_ubuntu_18_4(self):
"""
Test 'upower -d' on Ubuntu 18.4 using LANG=en_US.UTF-8
"""
self.assertEqual(jc.parsers.upower.parse(self.ubuntu_18_4_upower_d, quiet=True), self.ubuntu_18_4_upower_d_json)
def test_upower_d_clocale_ubuntu_18_4(self):
"""
Test 'upower -d' on Ubuntu 18.4 using LANG=C
"""
self.assertEqual(jc.parsers.upower.parse(self.ubuntu_18_4_upower_d, quiet=True), self.ubuntu_18_4_upower_d_json)
def test_upower_i_utc_generic(self):
"""
Test 'upower -i' with utc time output
"""
self.assertEqual(jc.parsers.upower.parse(self.generic_upower_i_utc, quiet=True), self.generic_upower_i_utc_json)
def test_upower_i_non_utc_generic(self):
"""
Test 'upower -i' with non-utc time output
"""
self.assertEqual(jc.parsers.upower.parse(self.generic_upower_i_non_utc, quiet=True), self.generic_upower_i_non_utc_json)
def test_upower_i_c_locale(self):
"""
Test 'upower -i' with LANG=C time output
"""
self.assertEqual(jc.parsers.upower.parse(self.generic_upower_i_c_locale, quiet=True), self.generic_upower_i_c_locale_json)
if __name__ == '__main__':
unittest.main()
| 42.705882
| 134
| 0.664141
| 696
| 4,356
| 3.920977
| 0.123563
| 0.082081
| 0.059362
| 0.061561
| 0.843532
| 0.825577
| 0.78417
| 0.713082
| 0.713082
| 0.713082
| 0
| 0.026353
| 0.189853
| 4,356
| 101
| 135
| 43.128713
| 0.746954
| 0.084252
| 0
| 0.038462
| 0
| 0
| 0.162092
| 0.134379
| 0
| 0
| 0
| 0
| 0.134615
| 1
| 0.153846
| false
| 0
| 0.115385
| 0
| 0.288462
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
35282cd4dcca91264219958e252431f0a5e67a73
| 131
|
py
|
Python
|
tests/conftest.py
|
dseuss/pypllon
|
f9ae6104555837fe0eb7d7c333ebc2ed585d314a
|
[
"BSD-3-Clause"
] | null | null | null |
tests/conftest.py
|
dseuss/pypllon
|
f9ae6104555837fe0eb7d7c333ebc2ed585d314a
|
[
"BSD-3-Clause"
] | null | null | null |
tests/conftest.py
|
dseuss/pypllon
|
f9ae6104555837fe0eb7d7c333ebc2ed585d314a
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
import pytest as pt
@pt.fixture(scope="module")
def rgen():
return np.random.RandomState(seed=3476583865)
| 16.375
| 49
| 0.740458
| 20
| 131
| 4.85
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.089286
| 0.145038
| 131
| 7
| 50
| 18.714286
| 0.776786
| 0
| 0
| 0
| 0
| 0
| 0.045802
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.4
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
1057320ca1a0eb1c9ddd59c375419b8e9d5482a8
| 357
|
py
|
Python
|
backend/mmlp/endpoint/result/ResultCount.py
|
magreiner/MMLP
|
23113866d8d0062c8c0e54c7fa5a0bbd0fa15f4e
|
[
"Apache-2.0"
] | null | null | null |
backend/mmlp/endpoint/result/ResultCount.py
|
magreiner/MMLP
|
23113866d8d0062c8c0e54c7fa5a0bbd0fa15f4e
|
[
"Apache-2.0"
] | null | null | null |
backend/mmlp/endpoint/result/ResultCount.py
|
magreiner/MMLP
|
23113866d8d0062c8c0e54c7fa5a0bbd0fa15f4e
|
[
"Apache-2.0"
] | null | null | null |
import json
from falcon import Request, Response
from mmlp.manager import ResultManager
class ResultCount:
def __init__(self, result_manager: ResultManager):
self._result_manager: ResultManager = result_manager
def on_get(self, _: Request, resp: Response):
resp.body = json.dumps(dict(count=self._result_manager.result_count()))
| 27.461538
| 79
| 0.753501
| 44
| 357
| 5.818182
| 0.5
| 0.203125
| 0.199219
| 0.234375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.162465
| 357
| 12
| 80
| 29.75
| 0.856187
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.375
| 0
| 0.75
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
109e7549ca7ca5e402ec3e03c1d217eb79343001
| 166
|
py
|
Python
|
ex14.py
|
wellingtonn96/Revisao_LP2
|
6cd23606633b07db5a984666b12b2dc2193e799d
|
[
"Apache-2.0"
] | null | null | null |
ex14.py
|
wellingtonn96/Revisao_LP2
|
6cd23606633b07db5a984666b12b2dc2193e799d
|
[
"Apache-2.0"
] | null | null | null |
ex14.py
|
wellingtonn96/Revisao_LP2
|
6cd23606633b07db5a984666b12b2dc2193e799d
|
[
"Apache-2.0"
] | null | null | null |
from funcoes import exercicio_tupla
centenas, dezenas, unidades = exercicio_tupla(945)
print('centena: %d, dezena: %d, unidade: %d' % (centenas, dezenas, unidades))
| 33.2
| 77
| 0.746988
| 21
| 166
| 5.809524
| 0.666667
| 0.229508
| 0.377049
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020548
| 0.120482
| 166
| 4
| 78
| 41.5
| 0.815068
| 0
| 0
| 0
| 0
| 0
| 0.216867
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0.333333
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
529e944ddd640e80ec194142f894ace2e9141428
| 161
|
py
|
Python
|
views/shop.py
|
Usamaiqbal789/Flask
|
b0a3c0be63fb88cfe020e116b37d73261c7bcab1
|
[
"MIT"
] | null | null | null |
views/shop.py
|
Usamaiqbal789/Flask
|
b0a3c0be63fb88cfe020e116b37d73261c7bcab1
|
[
"MIT"
] | null | null | null |
views/shop.py
|
Usamaiqbal789/Flask
|
b0a3c0be63fb88cfe020e116b37d73261c7bcab1
|
[
"MIT"
] | 1
|
2021-10-14T19:14:09.000Z
|
2021-10-14T19:14:09.000Z
|
from flask import Blueprint, render_template
shop = Blueprint('shop', __name__)
@shop.route('/shop')
def shop_page():
return render_template("shop.html")
| 17.888889
| 44
| 0.732919
| 21
| 161
| 5.285714
| 0.619048
| 0.252252
| 0.324324
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130435
| 161
| 8
| 45
| 20.125
| 0.792857
| 0
| 0
| 0
| 0
| 0
| 0.111801
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.2
| 0.2
| 0.6
| 0.4
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
52f8a198bbf1908d77d74d41949bf1ffbffdcae1
| 38
|
py
|
Python
|
katas/beta/find_the_gcf_of_two_numbers.py
|
the-zebulan/CodeWars
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
[
"MIT"
] | 40
|
2016-03-09T12:26:20.000Z
|
2022-03-23T08:44:51.000Z
|
katas/beta/find_the_gcf_of_two_numbers.py
|
akalynych/CodeWars
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
[
"MIT"
] | null | null | null |
katas/beta/find_the_gcf_of_two_numbers.py
|
akalynych/CodeWars
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
[
"MIT"
] | 36
|
2016-11-07T19:59:58.000Z
|
2022-03-31T11:18:27.000Z
|
from fractions import gcd as find_GCF
| 19
| 37
| 0.842105
| 7
| 38
| 4.428571
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157895
| 38
| 1
| 38
| 38
| 0.96875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
dc1fa3efb29a27feac2eca0949cd7b10e1578962
| 36
|
py
|
Python
|
otscrape/core/exporter/__init__.py
|
SSripilaipong/otscrape
|
73ad2ea3d20841cf5d81b37180a1f21c48e87480
|
[
"MIT"
] | null | null | null |
otscrape/core/exporter/__init__.py
|
SSripilaipong/otscrape
|
73ad2ea3d20841cf5d81b37180a1f21c48e87480
|
[
"MIT"
] | null | null | null |
otscrape/core/exporter/__init__.py
|
SSripilaipong/otscrape
|
73ad2ea3d20841cf5d81b37180a1f21c48e87480
|
[
"MIT"
] | null | null | null |
from .file.json import JSONExporter
| 18
| 35
| 0.833333
| 5
| 36
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 36
| 1
| 36
| 36
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
dc52f0b09edcc582395d1f1fa0265f2bfa01f3c9
| 27
|
py
|
Python
|
src/sage/tensor/all.py
|
UCD4IDS/sage
|
43474c96d533fd396fe29fe0782d44dc7f5164f7
|
[
"BSL-1.0"
] | 1,742
|
2015-01-04T07:06:13.000Z
|
2022-03-30T11:32:52.000Z
|
src/sage/tensor/all.py
|
UCD4IDS/sage
|
43474c96d533fd396fe29fe0782d44dc7f5164f7
|
[
"BSL-1.0"
] | 66
|
2015-03-19T19:17:24.000Z
|
2022-03-16T11:59:30.000Z
|
src/sage/tensor/all.py
|
UCD4IDS/sage
|
43474c96d533fd396fe29fe0782d44dc7f5164f7
|
[
"BSL-1.0"
] | 495
|
2015-01-10T10:23:18.000Z
|
2022-03-24T22:06:11.000Z
|
from .modules.all import *
| 13.5
| 26
| 0.740741
| 4
| 27
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 27
| 1
| 27
| 27
| 0.869565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
dc5639380e32090db9b3fc5d1dffcf4c9eed3c4a
| 98
|
py
|
Python
|
tests/archives/extension.py
|
4nm1tsu/uncompressor
|
706eb7500ee576fe7e7a6d610dc78bfa837ea8bf
|
[
"MIT"
] | 1
|
2021-11-08T01:52:58.000Z
|
2021-11-08T01:52:58.000Z
|
tests/archives/extension.py
|
4nm1tsu/uncompressor
|
706eb7500ee576fe7e7a6d610dc78bfa837ea8bf
|
[
"MIT"
] | null | null | null |
tests/archives/extension.py
|
4nm1tsu/uncompressor
|
706eb7500ee576fe7e7a6d610dc78bfa837ea8bf
|
[
"MIT"
] | null | null | null |
import mimetypes
print(mimetypes.guess_extension(mimetypes.guess_type('./something.tar.xz')[0]))
| 24.5
| 79
| 0.795918
| 13
| 98
| 5.846154
| 0.769231
| 0.368421
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010638
| 0.040816
| 98
| 3
| 80
| 32.666667
| 0.797872
| 0
| 0
| 0
| 0
| 0
| 0.183673
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
f495a741cc3a9d4155a8adc96792d91041ff25cd
| 87
|
py
|
Python
|
src/metrics/__init__.py
|
ryanwongsa/image-inpainting
|
d20419f3260760f1deb96d2b904dd4de92eeee36
|
[
"BSD-3-Clause"
] | null | null | null |
src/metrics/__init__.py
|
ryanwongsa/image-inpainting
|
d20419f3260760f1deb96d2b904dd4de92eeee36
|
[
"BSD-3-Clause"
] | null | null | null |
src/metrics/__init__.py
|
ryanwongsa/image-inpainting
|
d20419f3260760f1deb96d2b904dd4de92eeee36
|
[
"BSD-3-Clause"
] | null | null | null |
from metrics.psnr_metric import PSNR_Metric
from metrics.loss_metric import Loss_Metric
| 43.5
| 43
| 0.896552
| 14
| 87
| 5.285714
| 0.428571
| 0.297297
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.08046
| 87
| 2
| 44
| 43.5
| 0.925
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f4b18b162035ab5378f719e556b732dd41a4cfdb
| 4,415
|
py
|
Python
|
export 3d pic /2p.py
|
aminoj/Interactive-Orbitals-Simulation
|
20e405d6a23028049c05f4a0fd73e51857ba9270
|
[
"Apache-2.0"
] | null | null | null |
export 3d pic /2p.py
|
aminoj/Interactive-Orbitals-Simulation
|
20e405d6a23028049c05f4a0fd73e51857ba9270
|
[
"Apache-2.0"
] | null | null | null |
export 3d pic /2p.py
|
aminoj/Interactive-Orbitals-Simulation
|
20e405d6a23028049c05f4a0fd73e51857ba9270
|
[
"Apache-2.0"
] | 1
|
2020-04-16T08:02:27.000Z
|
2020-04-16T08:02:27.000Z
|
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
rstride = 15
cstride = 15
MinBound = -3
MaxBound = 3
u = np.linspace(0, 2*np.pi, 100)
v = np.linspace(0, np.pi, 100)
#-------------------------------------------------------------------------------
#Cone
x2 = np.outer(np.cos(u), np.sin(v))
y2 = np.outer(np.sin(u), np.sin(v))
z2 = ((1.5*x2)**2+(1.5*y2)**2+0.3)**(0.5)
ax.plot_surface(x2, y2, z2, rstride = rstride, cstride = cstride, color=(0,0.44,1), linewidth=0)
#-------------------------------------------------------------------------------
#Cone
x2 = np.outer(np.cos(u), np.sin(v))
y2 = np.outer(np.sin(u), np.sin(v))
z2 = -((1.5*x2)**2+(1.5*y2)**2+0.3)**(0.5)
ax.plot_surface(x2, y2, z2, rstride = rstride, cstride = cstride, color=(0,0.44,1), linewidth=0)
#-------------------------------------------------------------------------------
#Cone
y4 = np.outer(np.sin(u), np.sin(v))
z4 = np.outer(np.cos(u), np.sin(v))
x4 = ((1.5*z4)**2+(1.5*y4)**2+0.3)**(0.5)
ax.plot_surface(x4, y4, z4, rstride = rstride, cstride = cstride, color=(0,0.44,1), linewidth=0)
#-------------------------------------------------------------------------------
#Cone
y4 = np.outer(np.sin(u), np.sin(v))
z4 = np.outer(np.cos(u), np.sin(v))
x4 = -((1.5*z4)**2+(1.5*y4)**2+0.3)**(0.5)
ax.plot_surface(x4, y4, z4, rstride = rstride, cstride = cstride, color=(0,0.44,1), linewidth=0)
#-------------------------------------------------------------------------------
#Cone
z5 = np.outer(np.cos(u), np.sin(v))
x5 = np.outer(np.sin(u), np.sin(v))
y5 = ((1.5*z5)**2+(1.5*x5)**2+0.3)**(0.5)
ax.plot_surface(x5, y5, z5, rstride = rstride, cstride = cstride, color=(0,0.44,1), linewidth=0)
#-------------------------------------------------------------------------------
#Cone
z5 = np.outer(np.cos(u), np.sin(v))
x5 = np.outer(np.sin(u), np.sin(v))
y5 = -((1.5*z5)**2+(1.5*x5)**2+0.3)**(0.5)
ax.plot_surface(x5, y5, z5, rstride = rstride, cstride = cstride, color=(0,0.44,1), linewidth=0)
#-------------------------------------------------------------------------------
#Cover
x6 = np.outer(np.cos(u), np.sin(v))
y6 = np.outer(np.sin(u), np.sin(v))
z6 = (abs(((2*x6)**2)+((2*y6)**2)-15)**(0.5))-1.7
ax.plot_surface(x6, y6, z6, rstride = rstride, cstride = cstride, color=(0,0.44,1), linewidth=0)
#-------------------------------------------------------------------------------
#Cover
x6 = np.outer(np.cos(u), np.sin(v))
y6 = np.outer(np.sin(u), np.sin(v))
z6 = -(abs(((2*x6)**2)+((2*y6)**2)-15)**(0.5))+1.7
ax.plot_surface(x6, y6, z6, rstride = rstride, cstride = cstride, color=(0,0.44,1), linewidth=0)
#-------------------------------------------------------------------------------
#Cover
y7 = np.outer(np.sin(u), np.sin(v))
z7 = np.outer(np.cos(u), np.sin(v))
x7 = (abs(((2*x6)**2)+((2*y6)**2)-15)**(0.5))-1.7
ax.plot_surface(x7, y7, z7, rstride = rstride, cstride = cstride, color=(0,0.44,1), linewidth=0)
#-------------------------------------------------------------------------------
#Cover
z7 = np.outer(np.cos(u), np.sin(v))
y7 = np.outer(np.sin(u), np.sin(v))
x7 = -(abs(((2*z7)**2)+((2*y7)**2)-15)**(0.5))+1.7
ax.plot_surface(x7, y7, z7, rstride = rstride, cstride = cstride, color=(0,0.44,1), linewidth=0)
#-------------------------------------------------------------------------------
#Cover
x8 = np.outer(np.cos(u), np.sin(v))
z8 = np.outer(np.sin(u), np.sin(v))
y8 = (abs(((2*x8)**2)+((2*z8)**2)-15)**(0.5))-1.7
ax.plot_surface(x8, y8, z8, rstride = rstride, cstride = cstride, color=(0,0.44,1), linewidth=0)
#-------------------------------------------------------------------------------
#Cover
x8 = np.outer(np.cos(u), np.sin(v))
z8 = np.outer(np.sin(u), np.sin(v))
y8 = -(abs(((2*x8)**2)+((2*z8)**2)-15)**(0.5))+1.7
ax.plot_surface(x8, y8, z8, rstride = rstride, cstride = cstride, color=(0,0.44,1), linewidth=0)
plt.show()
ax.set_xlim3d(MinBound, MaxBound)
ax.set_ylim3d(MinBound, MaxBound)
ax.set_zlim3d(MinBound, MaxBound)
e = 0
b = 0
for ii in xrange(0,120,1):
ax.view_init(elev=e, azim=b*4)
if(ii < 10) :
plt.savefig("2p/movie00%s.png" %ii)
elif(ii < 100):
plt.savefig("2p/movie0%s.png" %ii)
else :
plt.savefig("2p/movie%s.png" %ii)
if(ii == 20 or ii == 40 or ii == 60 or ii == 80 or ii == 100):
e = e + 12
b = 0
b = b + 1
| 32.703704
| 96
| 0.463194
| 723
| 4,415
| 2.803596
| 0.136929
| 0.088801
| 0.106561
| 0.082881
| 0.77257
| 0.77257
| 0.77257
| 0.768624
| 0.768624
| 0.727183
| 0
| 0.08813
| 0.110759
| 4,415
| 135
| 97
| 32.703704
| 0.428171
| 0.226954
| 0
| 0.493506
| 0
| 0
| 0.013864
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.038961
| 0
| 0.038961
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f4e2b8386bdbedfc5c89a0d9305717c57989aa6b
| 14,564
|
py
|
Python
|
dqn/Model.py
|
Theling/Delayed_MDP
|
db1a8808a05917a5915220947cf65268f0524fa9
|
[
"MIT"
] | null | null | null |
dqn/Model.py
|
Theling/Delayed_MDP
|
db1a8808a05917a5915220947cf65268f0524fa9
|
[
"MIT"
] | null | null | null |
dqn/Model.py
|
Theling/Delayed_MDP
|
db1a8808a05917a5915220947cf65268f0524fa9
|
[
"MIT"
] | null | null | null |
# GRU simple normal nn, this model assumes the two elements in state space is uncorrelated.
import tensorflow as tf
import numpy as np
from collections import deque
from DRL_util import Grid
DEBUG = False
class GRU_Model:
def __init__(self,
sess,
state_dim,
action_dim,
steps,
gru_outshape = 16,
pool_maxlen = 10000,
*args, **kwargs):
self.sess = sess
self.state_dim, self.action_dim, self.steps = state_dim, action_dim, steps
self.std_c = 1. # coefficient for variance loss
self.inputs = {}
self.outputs = {}
self.other_tensors = {}
self.build(gru_outshape)
self.tensors = {}
self.tensors.update(self.inputs)
self.tensors.update(self.outputs)
self.tensors.update(self.other_tensors)
init = tf.global_variables_initializer()
# self.sess.run(init)
self.is_trained = False
self.data_pool = {"states": deque(maxlen = pool_maxlen), "actions": deque(maxlen = pool_maxlen), "counter": 0}
self.pool_maxlen = pool_maxlen
def build(self, gru_outshape = 16):
actions = tf.keras.Input(shape=(self.steps,self.action_dim), name = "actions_ipt")
init_state = tf.keras.Input(shape=(self.state_dim,), name = "state_ipt")
self.inputs["actions"] = actions
self.inputs["init_state"] = init_state
cell_tensor = tf.keras.layers.Dense(gru_outshape, name = "transform_state_ipt", activation = "softmax")(init_state)
# mean
y = tf.keras.layers.GRU(gru_outshape, name = "mean_gru")(actions, initial_state = cell_tensor)
mean = tf.keras.layers.Dense(self.state_dim, name = "mean")(y)
# var
y = tf.keras.layers.GRU(gru_outshape, name = "var_gru")(actions, initial_state = cell_tensor)
var = tf.keras.layers.Dense(self.state_dim, name = "var", activation = "relu")(y)
self.outputs["mean"] = mean
self.outputs['var'] = var
print(self.inputs.values)
self.model = tf.keras.Model(inputs = list(self.inputs.values()), outputs = list(self.outputs.values()))
print(self.model.summary())
true_y = tf.keras.Input(shape=(self.state_dim, ))
target_var = (true_y**2-mean**2)
mean_loss = tf.losses.mean_squared_error(labels = true_y, predictions=mean)
var_loss = tf.losses.mean_squared_error(labels = target_var, predictions=var)
loss = mean_loss+ self.std_c*var_loss
updt = tf.train.AdamOptimizer(0.01).minimize(loss)
self.other_tensors["target_var"] = target_var
self.other_tensors["mean_loss"] = mean_loss
self.other_tensors['var_loss'] = var_loss
self.other_tensors["loss"] = loss
self.other_tensors["true_y"] = true_y
self.other_tensors["updt"] = updt
def get_tensors(self, keys):
return [self.tensors[key] for key in keys]
def train(self, iters = 10000):
data = self.arrange_data()
n_update = 10
n_iter = int(iters/n_update)
for _ in range(n_iter):
self.do_training(data['train_s'], data['train_as'], data['train_es'], n_update)
val_loss = self.validate(data["test_s"], data['test_as'], data['test_es'])
print(f"val_loss: {val_loss}")
def do_training(self, train_s, train_as, train_es , train_iter):
updt, loss, actions, true_y, init_state = self.get_tensors(["updt", 'loss', 'actions', 'true_y', 'init_state'])
for _ in range(train_iter):
_, l_ = self.sess.run((updt, loss), feed_dict={actions: train_as, true_y : train_es, init_state: train_s})
print(f'Training finished: {train_iter} updates.')
self.is_trained = True
return l_
def validate(self, train_s, train_as, train_es):
updt, loss, actions, true_y, init_state = self.get_tensors(["updt", 'loss', 'actions', 'true_y', 'init_state'])
l_ = self.sess.run(loss, feed_dict={actions: train_as, true_y : train_es, init_state: train_s})
return l_
def predict(self, s, as_):
if self.is_trained:
m, var = self.sess.run((self.tensors["mean"], self.tensors["var"]), feed_dict={self.tensors["actions"]: as_,
self.tensors["init_state"]: s})
c = np.sqrt(var)
rnds = np.random.normal(size = (len(m), self.state_dim))
#print(m.shape, c.shape, rnds.shape)
return m+rnds*c
else:
raise
def run(self, keys, feed_dict):
return sess.run([self.tensors[k] for k in keys], feed_dict={self.tensors[k]: feed_dict[k] for k in feed_dict})
def add_data(self, state, action):
self.data_pool["states"].append(state)
self.data_pool["actions"].append(action)
self.data_pool["counter"] = min(self.data_pool["counter"]+1, self.pool_maxlen)
def arrange_data(self, val_ratio = 0.2):
states = self.data_pool['states']
actions = self.data_pool['actions']
n = self.data_pool['counter']
steps = self.steps
states = np.array(states).reshape(n, -1)
actions = np.array(actions).reshape(n, -1)
split_idx = int(val_ratio*n)
train_states = states[0:split_idx]
train_actions = actions[0:split_idx]
test_states = states[split_idx:]
test_actions = actions[split_idx:]
train_as = self.convert_ts(train_actions, steps)
test_as = self.convert_ts(test_actions, steps)
train_s = np.array(train_states[:-steps])
test_s = np.array(test_states[:-steps])
train_es = train_states[steps:, :]
test_es = test_states[steps:, :]
if DEBUG:
print(train_es)
return {"train_as": train_as,
"test_as": test_as,
"train_s": train_s,
"test_s": test_s,
"train_es": train_es,
"test_es": test_es}
@staticmethod
def convert_ts(raw_data, steps):
batchsize = len(raw_data)
return np.array([raw_data[i:i+steps, :] for i in range(batchsize-steps)])
class GRU_Model_Discrete:
def __init__(self,
sess,
state_dim,
action_dim,
steps,
state_limits,
state_num_bins,
gru_outshape = 64,
pool_maxlen = 10000,
*args, **kwargs):
self.sess = sess
self.state_dim, self.action_dim, self.steps = state_dim, action_dim, steps
self.grid = Grid(state_limits, state_num_bins)
self.discrete_output_size = self.grid.num_disc_state
self.std_c = 1. # coefficient for variance loss
self.inputs = {}
self.outputs = {}
self.other_tensors = {}
self.build(gru_outshape)
self.tensors = {}
self.tensors.update(self.inputs)
self.tensors.update(self.outputs)
self.tensors.update(self.other_tensors)
init = tf.global_variables_initializer()
# self.sess.run(init)
self.is_trained = False
self.data_pool = {"states": deque(maxlen = pool_maxlen), "actions": deque(maxlen = pool_maxlen), "counter": 0}
self.pool_maxlen = pool_maxlen
def build(self, gru_outshape = 16):
actions = tf.keras.Input(shape=(self.steps,self.action_dim), name = "actions_ipt")
init_state = tf.keras.Input(shape=(self.state_dim,), name = "state_ipt")
self.inputs["actions"] = actions
self.inputs["init_state"] = init_state
cell_tensor = tf.keras.layers.Dense(gru_outshape, name = "transform_state_ipt", activation = "softmax")(init_state)
# mean
y = tf.keras.layers.GRU(gru_outshape, name = "mean_gru")(actions, initial_state = cell_tensor)
out_dist = tf.keras.layers.Dense(self.discrete_output_size, activation = "softmax", name = "out_dist")(y)
self.outputs["out_dist"] = out_dist
print(self.inputs.values)
self.model = tf.keras.Model(inputs = list(self.inputs.values()), outputs = list(self.outputs.values()))
print(self.model.summary())
true_y = tf.keras.Input(shape=(self.discrete_output_size, ))
dist_loss = tf.losses.mean_squared_error(labels = true_y, predictions=out_dist)
loss = dist_loss
updt = tf.train.AdamOptimizer(0.01).minimize(loss)
self.other_tensors["dist_loss"] = dist_loss
self.other_tensors["loss"] = loss
self.other_tensors["true_y"] = true_y
self.other_tensors["updt"] = updt
def get_tensors(self, keys):
return [self.tensors[key] for key in keys]
def train(self, iters = 10000):
data = self.arrange_data()
n_update = 10
n_iter = int(iters/n_update)
for _ in range(n_iter):
self.do_training(data['train_s'], data['train_as'], data['train_es'], n_update)
val_loss = self.validate(data["test_s"], data['test_as'], data['test_es'])
print(f"val_loss: {val_loss}")
def do_training(self, train_s, train_as, train_es , train_iter):
updt, loss, actions, true_y, init_state = self.get_tensors(["updt", 'loss', 'actions', 'true_y', 'init_state'])
for _ in range(train_iter):
_, l_ = self.sess.run((updt, loss), feed_dict={actions: train_as, true_y : train_es, init_state: train_s})
print(f'Training finished: {train_iter} updates.')
self.is_trained = True
return l_
def validate(self, train_s, train_as, train_es):
updt, loss, actions, true_y, init_state = self.get_tensors(["updt", 'loss', 'actions', 'true_y', 'init_state'])
l_ = self.sess.run(loss, feed_dict={actions: train_as, true_y : train_es, init_state: train_s})
return l_
def predict(self, s, as_):
assert len(s) == 1 and len(as_) == 1
if self.is_trained:
dist_over_states = self.sess.run(self.tensors["out_dist"], feed_dict={self.tensors["actions"]: as_,
self.tensors["init_state"]: s})
return self.grid.get_state_from_dist(dist_over_states[0])
else:
raise
def run(self, keys, feed_dict):
return sess.run([self.tensors[k] for k in keys], feed_dict={self.tensors[k]: feed_dict[k] for k in feed_dict})
def add_data(self, state, action):
self.data_pool["states"].append(state)
self.data_pool["actions"].append(action)
self.data_pool["counter"] = min(self.data_pool["counter"]+1, self.pool_maxlen)
def arrange_data(self, val_ratio = 0.2):
states = self.data_pool['states']
actions = self.data_pool['actions']
n = self.data_pool['counter']
steps = self.steps
states = np.array(states).reshape(n, -1)
actions = np.array(actions).reshape(n, -1)
split_idx = int(val_ratio*n)
train_states = states[0:split_idx]
train_actions = actions[0:split_idx]
test_states = states[split_idx:]
test_actions = actions[split_idx:]
train_as = self.convert_ts(train_actions, steps)
test_as = self.convert_ts(test_actions, steps)
train_s = np.array(train_states[:-steps])
test_s = np.array(test_states[:-steps])
train_es = self.discretize_states(train_states[steps:, :])
test_es = self.discretize_states(test_states[steps:, :])
if DEBUG:
print(train_es)
print(train_es.shape)
return {"train_as": train_as,
"test_as": test_as,
"train_s": train_s,
"test_s": test_s,
"train_es": train_es,
"test_es": test_es}
def discretize_states(self, state_arr):
n = len(state_arr)
ret = np.zeros(shape = (n, self.discrete_output_size))
indices = [self.grid.search(x) for x in state_arr]
ret[np.arange(n), indices] = 1
return ret
@staticmethod
def convert_ts(raw_data, steps):
batchsize = len(raw_data)
return np.array([raw_data[i:i+steps, :] for i in range(batchsize-steps)])
if __name__=="__main__":
DEBUG = True
import gym
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
env = gym.make("MountainCar-v0")
action_space = [0,1,2]
sess = tf.Session()
# mo = GRU_Model(sess, state_dim = 2, action_dim = 1, steps = 2)
mo = GRU_Model_Discrete(sess,
state_dim = 2,
action_dim = 1,
steps = 2,
state_limits = [[-1.2, 0.6], [-0.5, 0.5]],
state_num_bins = [20,5])
init = tf.global_variables_initializer()
sess.run(init)
# populate data
states = []
actions = []
s = env.reset()
for _ in range(1500):
states.append(s)
act = np.random.choice(action_space)
actions.append([act])
mo.add_data(s, act)
s, reward, done, _ = env.step(act)
if done:
print('done')
s = env.reset()
# states = np.array(states)
# actions = np.array(actions)
# train_states = states[0:10000]
# train_actions = actions[0:10000]
# test_states = states[10000:]
# test_actions = actions[10000:]
# def convert_ts(raw_data, steps):
# batchsize = len(raw_data)
# return np.array([raw_data[i:i+steps, :] for i in range(batchsize-steps)])
# train_as = convert_ts(train_actions, steps)
# test_as = convert_ts(test_actions, steps)
# train_s = np.array(train_states[:-steps])
# test_s = np.array(test_states[:-steps])
# train_es = train_states[steps:, :]
# test_es = test_states[steps:, :]
mo.train(1000)
s = states[0]
acts = actions[0:2]
# s = np.array(s).reshape(1, -1)
# acts = np.array(acts).reshape(1, 2, -1)
print(s, acts)
[print(mo.predict([s], [acts])) for _ in range(5)]
print(states[2])
print(mo.grid.bins_mid)
| 35.696078
| 124
| 0.583082
| 1,898
| 14,564
| 4.231296
| 0.10432
| 0.024654
| 0.023907
| 0.018927
| 0.789441
| 0.768771
| 0.764786
| 0.755572
| 0.736521
| 0.720085
| 0
| 0.01189
| 0.289687
| 14,564
| 407
| 125
| 35.783784
| 0.764427
| 0.064749
| 0
| 0.703971
| 0
| 0
| 0.066446
| 0
| 0
| 0
| 0
| 0
| 0.00361
| 1
| 0.083032
| false
| 0
| 0.021661
| 0.01444
| 0.166065
| 0.057762
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
52054d33310c0194f97c41b19da52377dfc2698d
| 121
|
py
|
Python
|
app/utils/limiter.py
|
johndatserakis/find-the-state-api
|
81da6c37eaf635ddfc01cb9964d0d173248721c7
|
[
"MIT"
] | 1
|
2021-12-23T15:40:53.000Z
|
2021-12-23T15:40:53.000Z
|
app/utils/limiter.py
|
johndatserakis/find-the-state-api
|
81da6c37eaf635ddfc01cb9964d0d173248721c7
|
[
"MIT"
] | null | null | null |
app/utils/limiter.py
|
johndatserakis/find-the-state-api
|
81da6c37eaf635ddfc01cb9964d0d173248721c7
|
[
"MIT"
] | null | null | null |
from slowapi import Limiter
from slowapi.util import get_remote_address
limiter = Limiter(key_func=get_remote_address)
| 20.166667
| 46
| 0.85124
| 18
| 121
| 5.444444
| 0.555556
| 0.22449
| 0.326531
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107438
| 121
| 5
| 47
| 24.2
| 0.907407
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
522f33837383a367d3e08c1282920fb4cdf516be
| 11,306
|
py
|
Python
|
draw.py
|
HJoonKwon/MAVeric
|
39e93942836c3e3b38a4d56566fb118ce809b72f
|
[
"MIT"
] | null | null | null |
draw.py
|
HJoonKwon/MAVeric
|
39e93942836c3e3b38a4d56566fb118ce809b72f
|
[
"MIT"
] | null | null | null |
draw.py
|
HJoonKwon/MAVeric
|
39e93942836c3e3b38a4d56566fb118ce809b72f
|
[
"MIT"
] | null | null | null |
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
from numpy import *
import matplotlib.pyplot as plot
def draw_traj(waypoints, trajectory):
"""
Visualize the trajectories in every dimension by using matplotlib.
The code is quite repetitive and might be optimized, but it works...
"""
mpl.rcParams['legend.fontsize'] = 10
# =============================
# 3D Plot
# =============================
ax = plot.subplot2grid((23, 31), (0, 0), colspan=13, rowspan=13, projection='3d') # create Axes3D object, which can plot in 3D
for i in range(len(trajectory)):
t = linspace(waypoints[i].time, waypoints[i+1].time, int((waypoints[i+1].time-waypoints[i].time)*20))
x_path = trajectory[i][0] * t ** 4 + trajectory[i][1] * t ** 3 + trajectory[i][2] * t ** 2 + trajectory[i][3] * t + trajectory[i][4]
y_path = trajectory[i][5] * t ** 4 + trajectory[i][6] * t ** 3 + trajectory[i][7] * t ** 2 + trajectory[i][8] * t + trajectory[i][9]
z_path = trajectory[i][10] * t ** 4 + trajectory[i][11] * t ** 3 + trajectory[i][12] * t ** 2 + trajectory[i][13] * t + trajectory[i][14]
ax.plot(x_path, y_path, z_path, label='[%d] to [%d]' %(i, i+1)) # plot trajectory
ax.plot([waypoints[i+1].x], [waypoints[i+1].y], [waypoints[i+1].z],'ro') # plot start
if i == 0:
ax.plot([waypoints[i].x], [waypoints[i].y], [waypoints[i].z], 'ro') # plot end
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.legend()
# =============================
# Position Plots
# =============================
# add 2D plot of X over time
ax = plot.subplot2grid((23, 31), (13, 0), colspan = 6, rowspan=4)
for i in range(len(trajectory)):
t = linspace(waypoints[i].time, waypoints[i+1].time, int((waypoints[i+1].time-waypoints[i].time)*20))
x_path = trajectory[i][0] * t ** 4 + trajectory[i][1] * t ** 3 + trajectory[i][2] * t ** 2 + trajectory[i][3] * t + trajectory[i][4]
ax.plot(t, x_path)
ax.set_ylabel('X')
# add 2D plot of Y over time
ax = plot.subplot2grid((23, 31), (19, 0), colspan = 6, rowspan=4)
for i in range(len(trajectory)):
t = linspace(waypoints[i].time, waypoints[i+1].time, int((waypoints[i+1].time-waypoints[i].time)*20))
y_path = trajectory[i][5] * t ** 4 + trajectory[i][6] * t ** 3 + trajectory[i][7] * t ** 2 + trajectory[i][8] * t + trajectory[i][9]
ax.plot(t, y_path)
ax.set_ylabel('Y')
ax.set_xlabel('Time')
# add 2D plot of Z over time
ax = plot.subplot2grid((23, 31), (13, 7), colspan = 6, rowspan=4)
for i in range(len(trajectory)):
t = linspace(waypoints[i].time, waypoints[i+1].time, int((waypoints[i+1].time-waypoints[i].time)*20))
z_path = trajectory[i][10] * t ** 4 + trajectory[i][11] * t ** 3 + trajectory[i][12] * t ** 2 + trajectory[i][13] * t + trajectory[i][14]
ax.plot(t, z_path)
ax.set_ylabel('Z')
# add 2D plot of Yaw over time
ax = plot.subplot2grid((23, 31), (19, 7), colspan = 6, rowspan=4)
for i in range(len(trajectory)):
t = linspace(waypoints[i].time, waypoints[i+1].time, int((waypoints[i+1].time-waypoints[i].time)*20))
yaw_path = trajectory[i][15] * t ** 2 + trajectory[i][16] * t + trajectory[i][17]
ax.plot(t, yaw_path)
ax.set_ylabel('Yaw')
ax.set_xlabel('Time')
# =============================
# Velocity Plots
# =============================
# add 2D plot of X over time
ax = plot.subplot2grid((23, 31), (0, 15), colspan=3, rowspan=4)
for i in range(len(trajectory)):
t = linspace(waypoints[i].time, waypoints[i + 1].time, int((waypoints[i + 1].time - waypoints[i].time) * 20))
x_path = 4 * trajectory[i][0] * t ** 3 + 3 * trajectory[i][1] * t ** 2 + 2 * trajectory[i][2] * t + trajectory[i][3]
ax.plot(t, x_path)
ax.set_ylabel('X')
# add 2D plot of Y over time
ax = plot.subplot2grid((23, 31), (6, 15), colspan=3, rowspan=4)
for i in range(len(trajectory)):
t = linspace(waypoints[i].time, waypoints[i + 1].time, int((waypoints[i + 1].time - waypoints[i].time) * 20))
y_path = 4 * trajectory[i][5] * t ** 3 + 3 * trajectory[i][6] * t ** 2 + 2* trajectory[i][7] * t + trajectory[i][8]
ax.plot(t, y_path)
ax.set_ylabel('Y')
ax.set_xlabel('Time')
# add 2D plot of Z over time
ax = plot.subplot2grid((23, 31), (0, 19), colspan=3, rowspan=4)
for i in range(len(trajectory)):
t = linspace(waypoints[i].time, waypoints[i + 1].time, int((waypoints[i + 1].time - waypoints[i].time) * 20))
z_path = 4 * trajectory[i][10] * t ** 3 + 3 * trajectory[i][11] * t ** 2 + 2 * trajectory[i][12] * t + trajectory[i][13]
ax.plot(t, z_path)
ax.set_ylabel('Z')
# add 2D plot of Yaw over time
ax = plot.subplot2grid((23, 31), (6, 19), colspan=3, rowspan=4)
for i in range(len(trajectory)):
t = linspace(waypoints[i].time, waypoints[i + 1].time, int((waypoints[i + 1].time - waypoints[i].time) * 20))
yaw_path = 2 * trajectory[i][15] * t + trajectory[i][16]
ax.plot(t, yaw_path)
ax.set_ylabel('Yaw')
ax.set_xlabel('Time')
# =============================
# Acceleration Plots
# =============================
# add 2D plot of X over time
ax = plot.subplot2grid((23, 31), (13, 15), colspan=3, rowspan=4)
for i in range(len(trajectory)):
t = linspace(waypoints[i].time, waypoints[i + 1].time, int((waypoints[i + 1].time - waypoints[i].time) * 20))
x_path = 12 * trajectory[i][0] * t ** 2 + 6 * trajectory[i][1] * t + 2 * trajectory[i][2]
ax.plot(t, x_path)
ax.set_ylabel('X')
# add 2D plot of Y over time
ax = plot.subplot2grid((23, 31), (19, 15), colspan=3, rowspan=4)
for i in range(len(trajectory)):
t = linspace(waypoints[i].time, waypoints[i + 1].time, int((waypoints[i + 1].time - waypoints[i].time) * 20))
y_path = 12 * trajectory[i][5] * t ** 2 + 6 * trajectory[i][6] * t + 2 * trajectory[i][7]
ax.plot(t, y_path)
ax.set_ylabel('Y')
ax.set_xlabel('Time')
# add 2D plot of Z over time
ax = plot.subplot2grid((23, 31), (13, 19), colspan=3, rowspan=4)
for i in range(len(trajectory)):
t = linspace(waypoints[i].time, waypoints[i + 1].time, int((waypoints[i + 1].time - waypoints[i].time) * 20))
z_path = 12 * trajectory[i][10] * t ** 2 + 6 * trajectory[i][11] * t + 2 * trajectory[i][12]
ax.plot(t, z_path)
ax.set_ylabel('Z')
# add 2D plot of Yaw over time
ax = plot.subplot2grid((23, 31), (19, 19), colspan=3, rowspan=4)
for i in range(len(trajectory)):
t = linspace(waypoints[i].time, waypoints[i + 1].time, int((waypoints[i + 1].time - waypoints[i].time) * 20))
oneVec = linspace(1,1, int((waypoints[i + 1].time - waypoints[i].time) * 20))
yaw_path = 2 * trajectory[i][15] * oneVec
ax.plot(t, yaw_path)
ax.set_ylabel('Yaw')
ax.set_xlabel('Time')
# =============================
# Jerk Plots
# =============================
# add 2D plot of X over time
ax = plot.subplot2grid((23, 31), (0, 24), colspan=3, rowspan=4)
for i in range(len(trajectory)):
t = linspace(waypoints[i].time, waypoints[i + 1].time, int((waypoints[i + 1].time - waypoints[i].time) * 20))
x_path = 24 * trajectory[i][0] * t + 6 * trajectory[i][1]
ax.plot(t, x_path)
ax.set_ylabel('X')
# add 2D plot of Y over time
ax = plot.subplot2grid((23, 31), (6, 24), colspan=3, rowspan=4)
for i in range(len(trajectory)):
t = linspace(waypoints[i].time, waypoints[i + 1].time, int((waypoints[i + 1].time - waypoints[i].time) * 20))
y_path = 24 * trajectory[i][5] * t + 6 * trajectory[i][6]
ax.plot(t, y_path)
ax.set_ylabel('Y')
ax.set_xlabel('Time')
# add 2D plot of Z over time
ax = plot.subplot2grid((23, 31), (0, 28), colspan=3, rowspan=4)
for i in range(len(trajectory)):
t = linspace(waypoints[i].time, waypoints[i + 1].time, int((waypoints[i + 1].time - waypoints[i].time) * 20))
z_path = 24 * trajectory[i][10] * t + 6 * trajectory[i][11]
ax.plot(t, z_path)
ax.set_ylabel('Z')
ax.set_xlabel('Time')
# =============================
# Snap Plots
# =============================
# add 2D plot of X over time
ax = plot.subplot2grid((23, 31), (13, 24), colspan=3, rowspan=4)
for i in range(len(trajectory)):
t = linspace(waypoints[i].time, waypoints[i + 1].time, int((waypoints[i + 1].time - waypoints[i].time) * 20))
oneVec = linspace(1,1, int((waypoints[i + 1].time - waypoints[i].time) * 20))
x_path = 24 *trajectory[i][0] * oneVec
ax.plot(t, x_path)
ax.set_ylabel('X')
# add 2D plot of Y over time
ax = plot.subplot2grid((23, 31), (19, 24), colspan=3, rowspan=4)
for i in range(len(trajectory)):
t = linspace(waypoints[i].time, waypoints[i + 1].time, int((waypoints[i + 1].time - waypoints[i].time) * 20))
oneVec = linspace(1,1, int((waypoints[i + 1].time - waypoints[i].time) * 20))
y_path = 24 * trajectory[i][5] * oneVec
ax.plot(t, y_path)
ax.set_ylabel('Y')
ax.set_xlabel('Time')
# add 2D plot of Z over time
ax = plot.subplot2grid((23, 31), (13, 28), colspan=3, rowspan=4)
for i in range(len(trajectory)):
t = linspace(waypoints[i].time, waypoints[i + 1].time, int((waypoints[i + 1].time - waypoints[i].time) * 20))
oneVec = linspace(1,1, int((waypoints[i + 1].time - waypoints[i].time) * 20))
z_path = 24 * trajectory[i][10] * oneVec
ax.plot(t, z_path)
ax.set_ylabel('Z')
ax.set_xlabel('Time')
# =============================
# Labels
# =============================
ax = plot.subplot2grid((23, 31), (18, 6))
ax.set_frame_on(False)
ax.axis('off')
ax.text(-0.3,0.7,"Position", fontweight='bold')
ax = plot.subplot2grid((23, 31), (5, 18))
ax.set_frame_on(False)
ax.axis('off')
ax.text(-0.3,0.7,"Velocity", fontweight='bold')
ax = plot.subplot2grid((23, 31), (18, 18))
ax.set_frame_on(False)
ax.axis('off')
ax.text(-0.7,0.7,"Acceleration", fontweight='bold')
ax = plot.subplot2grid((23, 31), (5, 27))
ax.set_frame_on(False)
ax.axis('off')
ax.text(0,0.7,"Jerk", fontweight='bold')
ax = plot.subplot2grid((23, 31), (18, 27))
ax.set_frame_on(False)
ax.axis('off')
ax.text(-0.1,0.7,"Snap", fontweight='bold')
#plot.figtext(0, 0, 'Planned Trajectory:\n '
# '(X,Y,Z,Yaw,X_dot,Y_dot,Z_dot)\n '
# 'Start: (%0.2f, %0.2f, %0.2f, %0.2f, %0.2f,%0.2f, %0.2f)\n '
# 'End: (%0.2f, %0.2f, %0.2f, %0.2f, %0.2f, %0.2f, %0.2f) \n'
# 'Time for segment: %0.2f'
# % (waypoint0.x, waypoint0.y, waypoint0.z, waypoint0.yaw, waypoint0.x_dot, waypoint0.y_dot,
# waypoint0.z_dot,
# waypoint1.x, waypoint1.y, waypoint1.z, waypoint1.yaw, waypoint1.x_dot, waypoint1.y_dot,
# waypoint1.z_dot,
# waypoint1.time - waypoint0.time))
# print to screen
plot.show()
| 45.405622
| 145
| 0.552715
| 1,745
| 11,306
| 3.527221
| 0.073926
| 0.146223
| 0.080422
| 0.102356
| 0.808123
| 0.785378
| 0.77498
| 0.77498
| 0.750447
| 0.750447
| 0
| 0.067976
| 0.229701
| 11,306
| 248
| 146
| 45.58871
| 0.638765
| 0.163276
| 0
| 0.644172
| 0
| 0
| 0.018217
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.006135
| false
| 0
| 0.02454
| 0
| 0.030675
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
526a538cc535f29c049f7c12811d92feb33a8a42
| 46
|
py
|
Python
|
mypackage/__init__.py
|
meinert/pythonprojecttemplate
|
c368b2d3de2f64afdcf9eb79c9d982d9b037c711
|
[
"BSD-2-Clause"
] | null | null | null |
mypackage/__init__.py
|
meinert/pythonprojecttemplate
|
c368b2d3de2f64afdcf9eb79c9d982d9b037c711
|
[
"BSD-2-Clause"
] | null | null | null |
mypackage/__init__.py
|
meinert/pythonprojecttemplate
|
c368b2d3de2f64afdcf9eb79c9d982d9b037c711
|
[
"BSD-2-Clause"
] | null | null | null |
from .core import hmm
from .mymodule import *
| 15.333333
| 23
| 0.76087
| 7
| 46
| 5
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 46
| 2
| 24
| 23
| 0.921053
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5285bbbd49479419e172b9098c929f6b6f6ca4d6
| 263
|
py
|
Python
|
src/openpersonen/api/views/ouder.py
|
maykinmedia/open-personen
|
ddcf083ccd4eb864c5305bcd8bc75c6c64108272
|
[
"RSA-MD"
] | 2
|
2020-08-26T11:24:43.000Z
|
2021-07-28T09:46:40.000Z
|
src/openpersonen/api/views/ouder.py
|
maykinmedia/open-personen
|
ddcf083ccd4eb864c5305bcd8bc75c6c64108272
|
[
"RSA-MD"
] | 153
|
2020-08-26T10:45:35.000Z
|
2021-12-10T17:33:16.000Z
|
src/openpersonen/api/views/ouder.py
|
maykinmedia/open-personen
|
ddcf083ccd4eb864c5305bcd8bc75c6c64108272
|
[
"RSA-MD"
] | null | null | null |
from openpersonen.api.data_classes import Ouder
from openpersonen.api.serializers import OuderSerializer
from openpersonen.api.views.base import NestedViewSet
class OuderViewSet(NestedViewSet):
serializer_class = OuderSerializer
instance_class = Ouder
| 26.3
| 56
| 0.836502
| 29
| 263
| 7.482759
| 0.551724
| 0.221198
| 0.262673
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117871
| 263
| 9
| 57
| 29.222222
| 0.935345
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
bfef3fcd14a6ddbd71f0e0ad7e444538abea6494
| 41
|
py
|
Python
|
contribuicao/arquivo_novo.py
|
lucasleonardobs/cool-repo
|
96b2329de8151ba16db4e742e363248fc9a6820c
|
[
"MIT"
] | null | null | null |
contribuicao/arquivo_novo.py
|
lucasleonardobs/cool-repo
|
96b2329de8151ba16db4e742e363248fc9a6820c
|
[
"MIT"
] | null | null | null |
contribuicao/arquivo_novo.py
|
lucasleonardobs/cool-repo
|
96b2329de8151ba16db4e742e363248fc9a6820c
|
[
"MIT"
] | null | null | null |
print("Essa aqui é minha contribuição!")
| 20.5
| 40
| 0.756098
| 6
| 41
| 5.166667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121951
| 41
| 1
| 41
| 41
| 0.861111
| 0
| 0
| 0
| 0
| 0
| 0.756098
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
872c374b3a476b8af183a616257568628340ad0c
| 734
|
py
|
Python
|
src/finitestate/firmware/package_metadata/model.py
|
FiniteStateInc/clearcode-toolkit
|
521c3a2ab9d9fa6d7b9059227c6af9d09b031c33
|
[
"Apache-2.0"
] | null | null | null |
src/finitestate/firmware/package_metadata/model.py
|
FiniteStateInc/clearcode-toolkit
|
521c3a2ab9d9fa6d7b9059227c6af9d09b031c33
|
[
"Apache-2.0"
] | null | null | null |
src/finitestate/firmware/package_metadata/model.py
|
FiniteStateInc/clearcode-toolkit
|
521c3a2ab9d9fa6d7b9059227c6af9d09b031c33
|
[
"Apache-2.0"
] | 1
|
2020-12-22T16:51:40.000Z
|
2020-12-22T16:51:40.000Z
|
import attr
@attr.s
class FSPackageMetadata():
id: str = attr.ib(kw_only=True)
name: str = attr.ib(kw_only=True)
version: str = attr.ib(kw_only=True)
release: str = attr.ib(kw_only=True)
file_name: str = attr.ib(kw_only=True)
supplier_name: str = attr.ib(kw_only=True)
supplier_type: str = attr.ib(kw_only=True)
supplier_url: str = attr.ib(kw_only=True)
source_information: str = attr.ib(kw_only=True)
file_name: str = attr.ib(kw_only=True)
download_location: str = attr.ib(kw_only=True)
home_page: str = attr.ib(kw_only=True)
declared_license: str = attr.ib(kw_only=True)
summary_description: str = attr.ib(kw_only=True)
detailed_description: str = attr.ib(kw_only=True)
| 34.952381
| 53
| 0.697548
| 122
| 734
| 3.983607
| 0.237705
| 0.216049
| 0.277778
| 0.339506
| 0.730453
| 0.730453
| 0.495885
| 0.269547
| 0.1893
| 0.1893
| 0
| 0
| 0.173025
| 734
| 20
| 54
| 36.7
| 0.800659
| 0
| 0
| 0.111111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.055556
| 0
| 0.944444
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
87754db7cd6e79fb73ed0a1b2c5380838fdee166
| 3,640
|
py
|
Python
|
dbdaora/boolean/_tests/datastore/test_integration_service_boolean_aioredis_datastore_get_one.py
|
dutradda/sqldataclass
|
5c87a3818e9d736bbf5e1438edc5929a2f5acd3f
|
[
"MIT"
] | 21
|
2019-10-14T14:33:33.000Z
|
2022-02-11T04:43:07.000Z
|
dbdaora/boolean/_tests/datastore/test_integration_service_boolean_aioredis_datastore_get_one.py
|
dutradda/sqldataclass
|
5c87a3818e9d736bbf5e1438edc5929a2f5acd3f
|
[
"MIT"
] | null | null | null |
dbdaora/boolean/_tests/datastore/test_integration_service_boolean_aioredis_datastore_get_one.py
|
dutradda/sqldataclass
|
5c87a3818e9d736bbf5e1438edc5929a2f5acd3f
|
[
"MIT"
] | 1
|
2019-09-29T23:51:44.000Z
|
2019-09-29T23:51:44.000Z
|
import asynctest
import pytest
from aioredis import RedisError
@pytest.mark.asyncio
async def test_should_get_one(
fake_service, serialized_fake_entity, fake_entity
):
await fake_service.repository.memory_data_source.set(
'fake:other_fake:fake', serialized_fake_entity,
)
entity = await fake_service.get_one('fake', other_id='other_fake')
assert entity == fake_entity.id
@pytest.mark.asyncio
async def test_should_get_one_with_fields(
fake_service, serialized_fake_entity, fake_entity
):
await fake_service.repository.memory_data_source.set(
'fake:other_fake:fake', serialized_fake_entity,
)
fake_entity.number = None
fake_entity.boolean = False
entity = await fake_service.get_one(
'fake',
fields=['id', 'other_id', 'integer', 'inner_entities'],
other_id='other_fake',
)
assert entity == fake_entity.id
@pytest.mark.asyncio
async def test_should_get_one_from_cache(
fake_service, serialized_fake_entity, fake_entity
):
fake_service.repository.memory_data_source.get = asynctest.CoroutineMock()
fake_service.cache['fakeother_idother_fake'] = fake_entity.id
entity = await fake_service.get_one('fake', other_id='other_fake')
assert entity == fake_entity.id
assert not fake_service.repository.memory_data_source.get.called
@pytest.mark.asyncio
async def test_should_get_one_from_fallback_when_not_found_on_memory(
fake_service, serialized_fake_entity, fake_entity
):
await fake_service.repository.memory_data_source.delete(
'fake:other_fake:fake'
)
await fake_service.repository.memory_data_source.delete(
'fake:not-found:other_fake:fake'
)
await fake_service.repository.fallback_data_source.put(
fake_service.repository.fallback_data_source.make_key(
'fake', 'other_fake:fake'
),
{'value': True},
)
entity = await fake_service.get_one('fake', other_id='other_fake')
assert entity == fake_entity.id
assert (
await fake_service.repository.memory_data_source.get(
'fake:other_fake:fake'
)
== b'1'
)
@pytest.mark.asyncio
async def test_should_get_one_from_fallback_when_not_found_on_memory_with_fields(
fake_service, serialized_fake_entity, fake_entity
):
await fake_service.repository.memory_data_source.delete(
'fake:other_fake:fake'
)
await fake_service.repository.fallback_data_source.put(
fake_service.repository.fallback_data_source.make_key(
'fake', 'other_fake:fake'
),
{'value': True},
)
fake_entity.number = None
fake_entity.boolean = False
entity = await fake_service.get_one(
'fake',
other_id='other_fake',
fields=['id', 'other_id', 'integer', 'inner_entities'],
)
assert entity == fake_entity.id
assert (
await fake_service.repository.memory_data_source.get(
'fake:other_fake:fake'
)
== b'1'
)
@pytest.mark.asyncio
async def test_should_get_one_from_fallback_after_open_circuit_breaker(
fake_service, fake_entity, mocker
):
fake_service.repository.memory_data_source.get = asynctest.CoroutineMock(
side_effect=RedisError
)
key = fake_service.repository.fallback_data_source.make_key(
'fake', 'other_fake', 'fake'
)
await fake_service.repository.fallback_data_source.put(
key, {'value': True}
)
entity = await fake_service.get_one('fake', other_id='other_fake')
assert entity == fake_entity.id
assert fake_service.logger.warning.call_count == 1
| 28.4375
| 81
| 0.708791
| 468
| 3,640
| 5.136752
| 0.138889
| 0.137271
| 0.106489
| 0.091514
| 0.902246
| 0.902246
| 0.902246
| 0.868552
| 0.839434
| 0.759983
| 0
| 0.001024
| 0.19533
| 3,640
| 127
| 82
| 28.661417
| 0.819734
| 0
| 0
| 0.650485
| 0
| 0
| 0.107418
| 0.014286
| 0
| 0
| 0
| 0
| 0.097087
| 1
| 0
| false
| 0
| 0.029126
| 0
| 0.029126
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5e6d4804b9a1a61a343ba8ce1cca0c1bfe09a8ce
| 167
|
py
|
Python
|
wideq/__init__.py
|
stboch/wideq
|
4acde696e958e00eede87a0e1fe28f490148204f
|
[
"MIT"
] | null | null | null |
wideq/__init__.py
|
stboch/wideq
|
4acde696e958e00eede87a0e1fe28f490148204f
|
[
"MIT"
] | null | null | null |
wideq/__init__.py
|
stboch/wideq
|
4acde696e958e00eede87a0e1fe28f490148204f
|
[
"MIT"
] | null | null | null |
"""Reverse-engineered client for the LG SmartThinQ API.
"""
from .core import * # noqa
from .client import * # noqa
from .ac import * # noqa
__version__ = '1.1.1'
| 20.875
| 55
| 0.670659
| 24
| 167
| 4.5
| 0.625
| 0.277778
| 0.259259
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022388
| 0.197605
| 167
| 7
| 56
| 23.857143
| 0.783582
| 0.407186
| 0
| 0
| 0
| 0
| 0.055556
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0d727e672d17895ece977f4d3042f95ae8722662
| 36
|
py
|
Python
|
rootnum/__init__.py
|
gXLg/rootnum
|
a617e30475a12ed6c23f9a1b35f54326824e6d7a
|
[
"MIT"
] | 1
|
2021-10-04T10:37:31.000Z
|
2021-10-04T10:37:31.000Z
|
rootnum/__init__.py
|
gXLg/Rootnum
|
a617e30475a12ed6c23f9a1b35f54326824e6d7a
|
[
"MIT"
] | null | null | null |
rootnum/__init__.py
|
gXLg/Rootnum
|
a617e30475a12ed6c23f9a1b35f54326824e6d7a
|
[
"MIT"
] | null | null | null |
from rootnum.rootnum import Rootnum
| 18
| 35
| 0.861111
| 5
| 36
| 6.2
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 36
| 1
| 36
| 36
| 0.96875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0d9d247c1cd1987d88314c1225b8f5691cf72a63
| 10,530
|
py
|
Python
|
api/queries/end_user_advisories/tests/test_create_end_user_advisories.py
|
uktrade/lite-ap
|
4e1a57956bd921992b4a6e2b8fbacbba5720960d
|
[
"MIT"
] | 3
|
2019-05-15T09:30:39.000Z
|
2020-04-22T16:14:23.000Z
|
api/queries/end_user_advisories/tests/test_create_end_user_advisories.py
|
uktrade/lite-ap
|
4e1a57956bd921992b4a6e2b8fbacbba5720960d
|
[
"MIT"
] | 85
|
2019-04-24T10:39:35.000Z
|
2022-03-21T14:52:12.000Z
|
api/queries/end_user_advisories/tests/test_create_end_user_advisories.py
|
uktrade/lite-ap
|
4e1a57956bd921992b4a6e2b8fbacbba5720960d
|
[
"MIT"
] | 1
|
2021-01-17T11:12:19.000Z
|
2021-01-17T11:12:19.000Z
|
from django.urls import reverse
from parameterized import parameterized
from rest_framework import status
from api.cases.models import Case
from api.parties.enums import PartyType
from test_helpers.clients import DataTestClient
class EndUserAdvisoryCreateTests(DataTestClient):
url = reverse("queries:end_user_advisories:end_user_advisories")
def test_create_end_user_advisory_query(self):
"""
Ensure that a user can create an end user advisory, and that it creates a case
when doing so
"""
data = {
"end_user": {
"sub_type": "government",
"name": "Ada",
"website": "https://gov.uk",
"address": "123",
"signatory_name_euu": "Ada",
"country": "GB",
"type": PartyType.END_USER,
},
"note": "I Am Easy to Find",
"reasoning": "Lack of hairpin turns",
"nature_of_business": "guns",
"contact_name": "Steven",
"contact_email": "steven@gov.com",
"contact_job_title": "director",
"contact_telephone": "0123456789",
}
response = self.client.post(self.url, data, **self.exporter_headers)
response_data = response.json()["end_user_advisory"]
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response_data["note"], data["note"])
self.assertEqual(response_data["reasoning"], data["reasoning"])
self.assertEqual(response_data["contact_email"], data["contact_email"])
self.assertEqual(response_data["contact_telephone"], data["contact_telephone"])
self.assertEqual(response_data["contact_job_title"], data["contact_job_title"])
end_user_data = response_data["end_user"]
self.assertEqual(end_user_data["sub_type"]["key"], data["end_user"]["sub_type"])
self.assertEqual(end_user_data["name"], data["end_user"]["name"])
self.assertEqual(end_user_data["website"], data["end_user"]["website"])
self.assertEqual(end_user_data["address"], data["end_user"]["address"])
self.assertEqual(end_user_data["country"]["id"], data["end_user"]["country"])
self.assertEqual(Case.objects.count(), 1)
self.assertEqual(Case.objects.get().submitted_by, self.exporter_user)
def test_create_copied_end_user_advisory_query(self):
"""
Ensure that a user can duplicate an end user advisory, it links to the previous
query and that it creates a case when doing so
"""
query = self.create_end_user_advisory("Advisory", "", self.organisation)
data = {
"end_user": {
"sub_type": "government",
"name": "Ada",
"website": "https://gov.uk",
"address": "123",
"signatory_name_euu": "Ada",
"country": "GB",
"type": PartyType.END_USER,
},
"note": "I Am Easy to Find",
"reasoning": "Lack of hairpin turns",
"copy_of": query.id,
"nature_of_business": "guns",
"contact_name": "Steven",
"contact_email": "steven@gov.com",
"contact_job_title": "director",
"contact_telephone": "0123456789",
}
response = self.client.post(self.url, data, **self.exporter_headers)
response_data = response.json()["end_user_advisory"]
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response_data["note"], data["note"])
self.assertEqual(response_data["reasoning"], data["reasoning"])
self.assertEqual(response_data["copy_of"], str(data["copy_of"]))
end_user_data = response_data["end_user"]
self.assertEqual(end_user_data["sub_type"]["key"], data["end_user"]["sub_type"])
self.assertEqual(end_user_data["name"], data["end_user"]["name"])
self.assertEqual(end_user_data["website"], data["end_user"]["website"])
self.assertEqual(end_user_data["address"], data["end_user"]["address"])
self.assertEqual(end_user_data["country"]["id"], data["end_user"]["country"])
self.assertEqual(Case.objects.count(), 2)
@parameterized.expand(
[
("com", "person", "http://gov.co.uk", "place street", "GB", "", "",), # invalid end user type
("commercial", "", "", "nowhere", "GB", "", ""), # name is empty
("government", "abc", "abc", "nowhere", "GB", "", "",), # invalid web address
("government", "abc", "", "", "GB", "", ""), # empty address
("government", "abc", "", "nowhere", "ALP", "", ""), # invalid country code
("", "", "", "", "", "", ""), # empty dataset
]
)
def test_create_end_user_advisory_query_failure(
self, end_user_type, name, website, address, country, note, reasoning
):
data = {
"end_user": {
"type": end_user_type,
"name": name,
"website": website,
"address": address,
"country": country,
},
"note": note,
"reasoning": reasoning,
}
response = self.client.post(self.url, data, **self.exporter_headers)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_end_user_advisory_query_for_organisation_failure(self):
"""
Fail to create organisation advisory with missing fields
"""
data = {
"end_user": {
"sub_type": "commercial",
"name": "Ada",
"website": "https://gov.uk",
"address": "123",
"signatory_name_euu": "Ada",
"country": "GB",
"type": PartyType.END_USER,
},
"note": "I Am Easy to Find",
"reasoning": "Lack of hairpin turns",
"contact_email": "steven@gov.com",
"contact_telephone": "0123456789",
"nature_of_business": "",
"contact_name": "",
"contact_job_title": "",
}
response = self.client.post(self.url, data, **self.exporter_headers)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
errors = response.json()["errors"]
self.assertEqual(errors.get("nature_of_business"), ["This field may not be blank"])
self.assertEqual(errors.get("contact_name"), ["This field may not be blank"])
self.assertEqual(errors.get("contact_job_title"), ["This field may not be blank"])
def test_create_end_user_advisory_query_for_government_failure(self):
"""
Fail to create gov advisory with missing fields
"""
data = {
"end_user": {
"sub_type": "commercial",
"name": "Ada",
"website": "https://gov.uk",
"address": "123",
"signatory_name_euu": "Ada",
"country": "GB",
"type": PartyType.END_USER,
},
"note": "I Am Easy to Find",
"reasoning": "Lack of hairpin turns",
"contact_email": "steven@gov.com",
"contact_telephone": "0123456789",
"contact_name": "",
"contact_job_title": "",
}
response = self.client.post(self.url, data, **self.exporter_headers)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
errors = response.json()["errors"]
self.assertEqual(errors.get("contact_name"), ["This field may not be blank"])
self.assertEqual(errors.get("contact_job_title"), ["This field may not be blank"])
def test_create_end_user_advisory_query_for_government(self):
"""
Successfully creates gov advisory
"""
data = {
"end_user": {
"sub_type": "government",
"name": "Ada",
"website": "https://gov.uk",
"address": "123",
"signatory_name_euu": "Ada",
"country": "GB",
"type": PartyType.END_USER,
},
"note": "I Am Easy to Find",
"reasoning": "Lack of hairpin turns",
"contact_email": "steven@gov.com",
"contact_telephone": "0123456789",
"contact_name": "steven",
"contact_job_title": "director",
}
response = self.client.post(self.url, data, **self.exporter_headers)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_end_user_advisory_query_for_individual(self):
"""
Successfully create individual advisory
"""
data = {
"end_user": {
"sub_type": "individual",
"name": "Ada",
"website": "https://gov.uk",
"address": "123",
"signatory_name_euu": "Ada",
"country": "GB",
"type": PartyType.END_USER,
},
"note": "I Am Easy to Find",
"reasoning": "Lack of hairpin turns",
"contact_email": "steven@gov.com",
"contact_telephone": "0123456789",
}
response = self.client.post(self.url, data, **self.exporter_headers)
response_data = response.json()["end_user_advisory"]
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response_data["note"], data["note"])
self.assertEqual(response_data["reasoning"], data["reasoning"])
self.assertEqual(response_data["contact_email"], data["contact_email"])
self.assertEqual(response_data["contact_telephone"], data["contact_telephone"])
end_user_data = response_data["end_user"]
self.assertEqual(end_user_data["sub_type"]["key"], data["end_user"]["sub_type"])
self.assertEqual(end_user_data["name"], data["end_user"]["name"])
self.assertEqual(end_user_data["website"], data["end_user"]["website"])
self.assertEqual(end_user_data["signatory_name_euu"], data["end_user"]["signatory_name_euu"])
self.assertEqual(end_user_data["address"], data["end_user"]["address"])
self.assertEqual(end_user_data["country"]["id"], data["end_user"]["country"])
self.assertEqual(Case.objects.count(), 1)
| 41.952191
| 106
| 0.570845
| 1,116
| 10,530
| 5.143369
| 0.12724
| 0.084146
| 0.049826
| 0.061324
| 0.8
| 0.786063
| 0.780139
| 0.772125
| 0.759582
| 0.74669
| 0
| 0.013492
| 0.282051
| 10,530
| 250
| 107
| 42.12
| 0.745767
| 0.047863
| 0
| 0.707921
| 0
| 0
| 0.26566
| 0.004764
| 0
| 0
| 0
| 0
| 0.217822
| 1
| 0.034653
| false
| 0
| 0.029703
| 0
| 0.074257
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0da098f3ce6e533bfaba3eb347a8abb5f40ab89a
| 152
|
py
|
Python
|
meiduo_mall/meiduo_mall/utils/views.py
|
1103928458/meiduo_drf
|
49595755f264b09ea748b4deb8a88bba5eb8557b
|
[
"MIT"
] | null | null | null |
meiduo_mall/meiduo_mall/utils/views.py
|
1103928458/meiduo_drf
|
49595755f264b09ea748b4deb8a88bba5eb8557b
|
[
"MIT"
] | null | null | null |
meiduo_mall/meiduo_mall/utils/views.py
|
1103928458/meiduo_drf
|
49595755f264b09ea748b4deb8a88bba5eb8557b
|
[
"MIT"
] | 1
|
2020-11-10T07:22:42.000Z
|
2020-11-10T07:22:42.000Z
|
from django.contrib.auth import mixins
from django.views import View
# 判断用户登录----用于继承
class LoginRequiredView(mixins.LoginRequiredMixin,View):
pass
| 25.333333
| 56
| 0.802632
| 19
| 152
| 6.421053
| 0.736842
| 0.163934
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111842
| 152
| 6
| 57
| 25.333333
| 0.903704
| 0.092105
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.25
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
0da45945f19b31a65ab305f970191ced807cecf8
| 173
|
py
|
Python
|
ciclo1_python/udea/MisionTIC_UdeA_Ciclo1/Material/Semana_7/Semana 7/Mifuncion.py
|
felipeescallon/mision_tic_2022
|
20496fc40b18d2e98114d6362928f34fde41aaae
|
[
"CC0-1.0"
] | 7
|
2021-07-05T21:25:50.000Z
|
2021-11-09T11:09:41.000Z
|
ciclo1_python/udea/MisionTIC_UdeA_Ciclo1/Material/Semana_7/Semana 7/Mifuncion.py
|
felipeescallon/mision_tic_2022
|
20496fc40b18d2e98114d6362928f34fde41aaae
|
[
"CC0-1.0"
] | null | null | null |
ciclo1_python/udea/MisionTIC_UdeA_Ciclo1/Material/Semana_7/Semana 7/Mifuncion.py
|
felipeescallon/mision_tic_2022
|
20496fc40b18d2e98114d6362928f34fde41aaae
|
[
"CC0-1.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
@author: Ing. Víctor Fabián Castro Pérez
"""
def imprimealgo():
print ("Esta es la cadena que se imprime de la funcion MiFuncion\n")
| 21.625
| 73
| 0.624277
| 24
| 173
| 4.5
| 0.958333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007463
| 0.225434
| 173
| 7
| 74
| 24.714286
| 0.798507
| 0.364162
| 0
| 0
| 0
| 0
| 0.617021
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
0df1104fc90b32d9e41bb331b088353d2b4dcb77
| 48
|
py
|
Python
|
allink_core/core/customisation/dummy_new_app/tests/test_views.py
|
allink/allink-core
|
cf2727f26192d8dee89d76feb262bc4760f36f5e
|
[
"BSD-3-Clause"
] | 5
|
2017-03-13T08:49:45.000Z
|
2022-03-05T20:05:56.000Z
|
allink_core/core/customisation/dummy_new_app/tests/test_views.py
|
allink/allink-core
|
cf2727f26192d8dee89d76feb262bc4760f36f5e
|
[
"BSD-3-Clause"
] | 28
|
2019-10-21T08:32:18.000Z
|
2022-02-10T13:16:38.000Z
|
allink_core/core/customisation/dummy_new_app/tests/test_views.py
|
allink/allink-core
|
cf2727f26192d8dee89d76feb262bc4760f36f5e
|
[
"BSD-3-Clause"
] | null | null | null |
# TODO add your tests here or delete this file.
| 24
| 47
| 0.75
| 9
| 48
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.208333
| 48
| 1
| 48
| 48
| 0.947368
| 0.9375
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 1
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0dfa8d034fee3127c14bc4cec6c81ca4e9d89283
| 1,753
|
py
|
Python
|
user/vistas/widgets/chatBox.py
|
ZerpaTechnology/occoa
|
a8c0bd2657bc058801a883109c0ec0d608d04ccc
|
[
"Apache-2.0"
] | null | null | null |
user/vistas/widgets/chatBox.py
|
ZerpaTechnology/occoa
|
a8c0bd2657bc058801a883109c0ec0d608d04ccc
|
[
"Apache-2.0"
] | null | null | null |
user/vistas/widgets/chatBox.py
|
ZerpaTechnology/occoa
|
a8c0bd2657bc058801a883109c0ec0d608d04ccc
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
print '''<div class="chat '''+str('hidden' if 'chat' in data['hidden'] else '' )+'''"> <style> input,button{ font-size:13px; } .chat{ width:200px; font-size:12px !important; } .chat textarea{ width:100%; } .chat .mensajes{ height:240px; } .conversador1{ text-align:left; } .conversador1 p{ background-color:rgb(200,200,250); border-radius:30px 0px 30px 30px; padding:8px; display:inline-block; margin:0px; } .yo{ text-align:right; } .yo p{ background-color:rgb(150,200,250); border-radius:30px 30px 30px 0px; padding:8px; display:inline-block; margin:0px; } .btn-closeChat{ cursor:pointer; padding:5px; } .chatbox{ min-height:30px; }</style> <div class="bg-ubuntu_blue pad-05"> <span class="titulo">Usuario 1</span> <div class="right"> <img src="'''+str(config.base_url)+'''static/imgs/iconos/005-add.png" class="height-1_5"> <img src="'''+str(config.base_url)+'''static/imgs/iconos/004-settings.png" class="height-1_5"> <span class="btn-closeChat">x</span></div> </div> <div style="overflow-y:scroll" class="bg-white mensajes"> <div> '''
if "conversacion" in data:
print ''' '''
for elem in data["conversacion"]:
print ''' '''
if elem[0]=="conversador1":
print ''' <div class="conversador1"><p>'''+str(elem[1]) +'''</p></div> '''
elif elem[0]=="yo":
print ''' <div class="yo"><p >'''+str(elem[1]) +'''</p></div> '''
pass
print ''' '''
pass
print ''' '''
pass
print ''' </div> </div> <textarea class="chatbox"> hola </textarea></div>'''
| 103.117647
| 1,194
| 0.551626
| 219
| 1,753
| 4.392694
| 0.438356
| 0.04158
| 0.040541
| 0.039501
| 0.261954
| 0.182952
| 0.155925
| 0.079002
| 0.079002
| 0
| 0
| 0.055265
| 0.236167
| 1,753
| 17
| 1,195
| 103.117647
| 0.663181
| 0.021677
| 0
| 0.466667
| 0
| 0.2
| 0.773629
| 0.177946
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.2
| 0.066667
| null | null | 0.533333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
|
0
| 6
|
218b2b286fc80001c86b7e06dcf171dfa0a36985
| 68
|
py
|
Python
|
recipes-tag/pyzbar/run_test.py
|
dmgav/lightsource2-recipes
|
2014b10a65e173b0b6fdd0707ef81709b0ce1b1f
|
[
"BSD-3-Clause"
] | 4
|
2016-07-17T23:55:23.000Z
|
2021-07-18T22:51:40.000Z
|
recipes-tag/pyzbar/run_test.py
|
dmgav/lightsource2-recipes
|
2014b10a65e173b0b6fdd0707ef81709b0ce1b1f
|
[
"BSD-3-Clause"
] | 477
|
2016-07-05T15:21:30.000Z
|
2020-03-23T20:02:52.000Z
|
recipes-tag/pyzbar/run_test.py
|
dmgav/lightsource2-recipes
|
2014b10a65e173b0b6fdd0707ef81709b0ce1b1f
|
[
"BSD-3-Clause"
] | 21
|
2016-07-25T16:18:52.000Z
|
2021-04-06T01:37:59.000Z
|
import pyzbar
import pyzbar.pyzbar
from pyzbar.pyzbar import decode
| 17
| 32
| 0.852941
| 10
| 68
| 5.8
| 0.4
| 0.413793
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 68
| 3
| 33
| 22.666667
| 0.966667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
10d5d2b61dda1e3cc37e5494b00b5f383db14f81
| 173
|
py
|
Python
|
simpletransformers/language_generation/__init__.py
|
taranais/simpletransformers
|
36b2519cad5d8beed1f1726fa9b1163eb52286f0
|
[
"Apache-2.0"
] | 2
|
2020-09-14T07:40:14.000Z
|
2021-04-12T06:14:48.000Z
|
simpletransformers/language_generation/__init__.py
|
taranais/simpletransformers
|
36b2519cad5d8beed1f1726fa9b1163eb52286f0
|
[
"Apache-2.0"
] | 1
|
2020-05-31T22:54:58.000Z
|
2020-05-31T22:54:58.000Z
|
simpletransformers/language_generation/__init__.py
|
taranais/simpletransformers
|
36b2519cad5d8beed1f1726fa9b1163eb52286f0
|
[
"Apache-2.0"
] | null | null | null |
from simpletransformers.language_generation.language_generation_model import LanguageGenerationModel
from simpletransformers.config.model_args import LanguageGenerationArgs
| 57.666667
| 100
| 0.930636
| 16
| 173
| 9.8125
| 0.625
| 0.280255
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.046243
| 173
| 2
| 101
| 86.5
| 0.951515
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8029383880bb17349fa790fa7e645976273db82e
| 245
|
py
|
Python
|
torchmeta/toy/__init__.py
|
yusufraji/siren
|
9416a751f64b9d1e9816f7a05e895531e9506d8a
|
[
"MIT"
] | 1,704
|
2019-09-16T15:08:18.000Z
|
2022-03-31T22:36:43.000Z
|
torchmeta/toy/__init__.py
|
yusufraji/siren
|
9416a751f64b9d1e9816f7a05e895531e9506d8a
|
[
"MIT"
] | 135
|
2019-09-20T15:34:03.000Z
|
2022-03-13T23:31:17.000Z
|
torchmeta/toy/__init__.py
|
yusufraji/siren
|
9416a751f64b9d1e9816f7a05e895531e9506d8a
|
[
"MIT"
] | 221
|
2019-09-17T09:01:21.000Z
|
2022-03-30T03:23:35.000Z
|
from torchmeta.toy.harmonic import Harmonic
from torchmeta.toy.sinusoid import Sinusoid
from torchmeta.toy.sinusoid_line import SinusoidAndLine
from torchmeta.toy import helpers
__all__ = ['Harmonic', 'Sinusoid', 'SinusoidAndLine', 'helpers']
| 30.625
| 64
| 0.816327
| 29
| 245
| 6.724138
| 0.344828
| 0.266667
| 0.328205
| 0.246154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097959
| 245
| 7
| 65
| 35
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0.155102
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.8
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
337f404475a70dd054d2a1d4c710dc6756661761
| 2,065
|
py
|
Python
|
authors/apps/articles/tests/test_sharing.py
|
andela/ah-backend-zeus
|
44e2f554c4a7a10c06bd3c7be42fc91571c09f29
|
[
"BSD-3-Clause"
] | 1
|
2019-03-22T09:13:35.000Z
|
2019-03-22T09:13:35.000Z
|
authors/apps/articles/tests/test_sharing.py
|
andela/ah-backend-zeus
|
44e2f554c4a7a10c06bd3c7be42fc91571c09f29
|
[
"BSD-3-Clause"
] | 13
|
2018-11-27T16:48:25.000Z
|
2021-06-10T21:00:19.000Z
|
authors/apps/articles/tests/test_sharing.py
|
andela/ah-backend-zeus
|
44e2f554c4a7a10c06bd3c7be42fc91571c09f29
|
[
"BSD-3-Clause"
] | 9
|
2018-11-23T11:10:24.000Z
|
2019-04-04T11:04:33.000Z
|
from rest_framework.test import APIClient
from .base_test import BaseTest
from rest_framework import status
class TestSharing(BaseTest):
def test_api_can_share_an_article_on_facebook(self):
created_article = self.client.post(
'/api/articles/', data=self.new_article, format='json')
slug = self.get_slug(created_article)
response = self.client.post(
'/api/articles/{}/facebook/'.format(slug),
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_api_can_share_an_article_with_facebook_with_wrong_slug(self):
response = self.client.post(
'/api/articles/slug/facebook/',
)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_api_can_share_an_article_on_twitter(self):
created_article = self.client.post(
'/api/articles/', data=self.new_article, format='json')
slug = self.get_slug(created_article)
response = self.client.post(
'/api/articles/{}/twitter/'.format(slug),
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_api_can_share_an_article_with_twitter_with_wrong_slug(self):
response = self.client.post(
'/api/articles/slug/twitter/',
)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_api_can_share_an_article_with_email(self):
created_article = self.client.post(
'/api/articles/', data=self.new_article, format='json')
slug = self.get_slug(created_article)
response = self.client.post(
'/api/articles/{}/email/'.format(slug),
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_api_can_share_an_article_with_email_with_wrong_slug(self):
response = self.client.post(
'/api/articles/slug/email/'
)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
| 34.416667
| 74
| 0.673608
| 259
| 2,065
| 5.034749
| 0.166023
| 0.069018
| 0.096626
| 0.117331
| 0.858129
| 0.858129
| 0.858129
| 0.858129
| 0.834356
| 0.82362
| 0
| 0.011152
| 0.218402
| 2,065
| 59
| 75
| 35
| 0.796778
| 0
| 0
| 0.55814
| 0
| 0
| 0.106693
| 0.074685
| 0
| 0
| 0
| 0
| 0.139535
| 1
| 0.139535
| false
| 0
| 0.069767
| 0
| 0.232558
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
338c7fd82c12514e681e73d3d5425a07ff2ddf74
| 247
|
py
|
Python
|
zipkin/binding/requests/__init__.py
|
Themimitoof/python-zipkin
|
f91169d044a49f641930bdfc456f34e497690fe8
|
[
"Apache-2.0"
] | 4
|
2018-02-28T11:00:36.000Z
|
2020-01-22T10:52:18.000Z
|
zipkin/binding/requests/__init__.py
|
Themimitoof/python-zipkin
|
f91169d044a49f641930bdfc456f34e497690fe8
|
[
"Apache-2.0"
] | 4
|
2018-04-21T12:29:46.000Z
|
2021-06-22T06:48:45.000Z
|
zipkin/binding/requests/__init__.py
|
Themimitoof/python-zipkin
|
f91169d044a49f641930bdfc456f34e497690fe8
|
[
"Apache-2.0"
] | 4
|
2018-02-28T13:50:10.000Z
|
2021-07-01T09:47:01.000Z
|
try:
from .impl import bind, request_adapter
except ImportError as exc:
import logging
logging.getLogger(__name__).warn("requests not installed")
def bind():
pass
def request_adapter(adapter):
return adapter
| 19
| 62
| 0.680162
| 29
| 247
| 5.586207
| 0.724138
| 0.17284
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.246964
| 247
| 12
| 63
| 20.583333
| 0.870968
| 0
| 0
| 0
| 0
| 0
| 0.089069
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.222222
| false
| 0.111111
| 0.333333
| 0.111111
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 0
|
0
| 6
|
33b228fde5a419ed1e631234a898b50145f7c788
| 2,976
|
py
|
Python
|
3.7.0/lldb-3.7.0.src/test/tools/lldb-mi/TestMiFile.py
|
androm3da/clang_sles
|
2ba6d0711546ad681883c42dfb8661b842806695
|
[
"MIT"
] | 3
|
2016-02-10T14:18:40.000Z
|
2018-02-05T03:15:56.000Z
|
3.7.0/lldb-3.7.0.src/test/tools/lldb-mi/TestMiFile.py
|
androm3da/clang_sles
|
2ba6d0711546ad681883c42dfb8661b842806695
|
[
"MIT"
] | 1
|
2016-02-10T15:40:03.000Z
|
2016-02-10T15:40:03.000Z
|
3.7.0/lldb-3.7.0.src/test/tools/lldb-mi/TestMiFile.py
|
androm3da/clang_sles
|
2ba6d0711546ad681883c42dfb8661b842806695
|
[
"MIT"
] | null | null | null |
"""
Test lldb-mi -file-xxx commands.
"""
import lldbmi_testcase
from lldbtest import *
import unittest2
class MiFileTestCase(lldbmi_testcase.MiTestCaseBase):
mydir = TestBase.compute_mydir(__file__)
@lldbmi_test
@expectedFailureWindows("llvm.org/pr22274: need a pexpect replacement for windows")
@skipIfFreeBSD # llvm.org/pr22411: Failure presumably due to known thread races
def test_lldbmi_file_exec_and_symbols_file(self):
"""Test that 'lldb-mi --interpreter' works for -file-exec-and-symbols exe."""
self.spawnLldbMi(args = None)
# Test that -file-exec-and-symbols works for filename
self.runCmd("-file-exec-and-symbols %s" % self.myexe)
self.expect("\^done")
# Run
self.runCmd("-exec-run")
self.expect("\^running")
self.expect("\*stopped,reason=\"exited-normally\"")
@lldbmi_test
@expectedFailureWindows("llvm.org/pr22274: need a pexpect replacement for windows")
@skipIfFreeBSD # llvm.org/pr22411: Failure presumably due to known thread races
def test_lldbmi_file_exec_and_symbols_absolute_path(self):
"""Test that 'lldb-mi --interpreter' works for -file-exec-and-symbols fullpath/exe."""
self.spawnLldbMi(args = None)
# Test that -file-exec-and-symbols works for absolute path
import os
path = os.path.join(os.getcwd(), self.myexe)
self.runCmd("-file-exec-and-symbols \"%s\"" % path)
self.expect("\^done")
# Run
self.runCmd("-exec-run")
self.expect("\^running")
self.expect("\*stopped,reason=\"exited-normally\"")
@lldbmi_test
@expectedFailureWindows("llvm.org/pr22274: need a pexpect replacement for windows")
@skipIfFreeBSD # llvm.org/pr22411: Failure presumably due to known thread races
def test_lldbmi_file_exec_and_symbols_relative_path(self):
"""Test that 'lldb-mi --interpreter' works for -file-exec-and-symbols relpath/exe."""
self.spawnLldbMi(args = None)
# Test that -file-exec-and-symbols works for relative path
path = "./%s" % self.myexe
self.runCmd("-file-exec-and-symbols %s" % path)
self.expect("\^done")
# Run
self.runCmd("-exec-run")
self.expect("\^running")
self.expect("\*stopped,reason=\"exited-normally\"")
@lldbmi_test
@expectedFailureWindows("llvm.org/pr22274: need a pexpect replacement for windows")
@skipIfFreeBSD # llvm.org/pr22411: Failure presumably due to known thread races
def test_lldbmi_file_exec_and_symbols_unknown_path(self):
"""Test that 'lldb-mi --interpreter' works for -file-exec-and-symbols badpath/exe."""
self.spawnLldbMi(args = None)
# Test that -file-exec-and-symbols fails on unknown path
path = "unknown_dir/%s" % self.myexe
self.runCmd("-file-exec-and-symbols %s" % path)
self.expect("\^error")
if __name__ == '__main__':
unittest2.main()
| 36.292683
| 94
| 0.665323
| 373
| 2,976
| 5.182306
| 0.209115
| 0.066218
| 0.09105
| 0.148991
| 0.825142
| 0.825142
| 0.825142
| 0.81014
| 0.81014
| 0.81014
| 0
| 0.017744
| 0.204637
| 2,976
| 81
| 95
| 36.740741
| 0.798902
| 0.279234
| 0
| 0.625
| 0
| 0
| 0.228761
| 0.041766
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.083333
| 0
| 0.208333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
33b7968facc326b2e573917800145c84f816046a
| 109
|
py
|
Python
|
pysmap/twitterutil/__init__.py
|
SMAPPNYU/pysmapp
|
eb871992f40c53125129535e871525d5623c8c2d
|
[
"MIT"
] | 21
|
2016-05-22T22:09:54.000Z
|
2021-08-09T14:46:13.000Z
|
pysmap/twitterutil/__init__.py
|
SMAPPNYU/pysmapp
|
eb871992f40c53125129535e871525d5623c8c2d
|
[
"MIT"
] | 26
|
2016-05-06T16:34:09.000Z
|
2020-07-17T19:51:19.000Z
|
pysmap/twitterutil/__init__.py
|
SMAPPNYU/pysmapp
|
eb871992f40c53125129535e871525d5623c8c2d
|
[
"MIT"
] | 6
|
2016-08-16T10:35:02.000Z
|
2020-07-14T14:40:58.000Z
|
'''
module
'''
from . import smapp_collection, smapp_dataset
__all__ = ['smapp_collection', 'smapp_dataset']
| 18.166667
| 47
| 0.743119
| 12
| 109
| 6.083333
| 0.583333
| 0.410959
| 0.547945
| 0.739726
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.110092
| 109
| 6
| 47
| 18.166667
| 0.752577
| 0.055046
| 0
| 0
| 0
| 0
| 0.302083
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
1d0f97d1931d93977d049c7711217dd4ecab685f
| 43
|
py
|
Python
|
apps/Todo/serializers/__init__.py
|
Eduardo-RFarias/DjangoReactBackend
|
b8183ea4b24be5c0aa557ffbc79fc23e0777b8ad
|
[
"MIT"
] | null | null | null |
apps/Todo/serializers/__init__.py
|
Eduardo-RFarias/DjangoReactBackend
|
b8183ea4b24be5c0aa557ffbc79fc23e0777b8ad
|
[
"MIT"
] | null | null | null |
apps/Todo/serializers/__init__.py
|
Eduardo-RFarias/DjangoReactBackend
|
b8183ea4b24be5c0aa557ffbc79fc23e0777b8ad
|
[
"MIT"
] | null | null | null |
from .TodoSerializer import TodoSerializer
| 21.5
| 42
| 0.883721
| 4
| 43
| 9.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093023
| 43
| 1
| 43
| 43
| 0.974359
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1d83f20852b9dffc42a935bdafe17867ce4c3936
| 17,434
|
py
|
Python
|
model/Seq2SeqEncoders.py
|
bigaidream-projects/citylearn-2020-pikapika
|
8c9389eb4b4e979faf269b8c0ce87b499af97ac1
|
[
"Apache-2.0"
] | 3
|
2021-12-20T03:40:55.000Z
|
2022-02-02T04:26:33.000Z
|
model/Seq2SeqEncoders.py
|
bigaidream-projects/citylearn-2020-pikapika
|
8c9389eb4b4e979faf269b8c0ce87b499af97ac1
|
[
"Apache-2.0"
] | null | null | null |
model/Seq2SeqEncoders.py
|
bigaidream-projects/citylearn-2020-pikapika
|
8c9389eb4b4e979faf269b8c0ce87b499af97ac1
|
[
"Apache-2.0"
] | 4
|
2022-02-11T20:30:51.000Z
|
2022-02-27T01:17:34.000Z
|
import torch
from torch import nn
from model.BaseModules import TransformerDecoderLayer
from model.Encoder import BaseEncoder, AutoEncoder, TemporalConvNet
class Seq2SeqLSTM(BaseEncoder):
def __init__(self, source_size, target_size, hidden_size, **kwargs):
super(Seq2SeqLSTM, self).__init__()
self.encoder = nn.LSTM(source_size, hidden_size, **kwargs)
self.decoder = nn.LSTM(target_size, hidden_size, **kwargs)
def forward(self, src, tgt, **kwargs):
enc, enc_hx = self.encoder(src)
dec, _ = self.decoder(tgt, hx=enc_hx)
return enc, dec
class Seq2SeqLSTM_new(BaseEncoder):
def __init__(self, source_size, target_size, pred_len, hidden_size, **kwargs):
super(Seq2SeqLSTM_new, self).__init__()
self.pred_len = pred_len
self.encoder = nn.LSTM(source_size, hidden_size, **kwargs)
self.decoder = nn.LSTM(target_size, hidden_size, **kwargs)
def forward(self, src, **kwargs):
"""
:param src: (Time, batch*building, State)
:param kwargs:
:return:
"""
enc, enc_hx = self.encoder(src)
h_x, _ = enc_hx
dims = h_x.ndim - 1
dec_in = h_x.repeat(self.pred_len, *([1] * dims))
dec, _ = self.decoder(dec_in, hx=enc_hx)
return enc, dec
class Seq2SeqAttnEncoder(BaseEncoder):
def __init__(self, source_size, target_size, hidden_size, target_fn,
auto_encoder_kwargs, attn_kwargs, lstm_kwargs=None, **kwargs):
super(Seq2SeqAttnEncoder, self).__init__()
if lstm_kwargs is None:
lstm_kwargs = {}
self.target_fn = target_fn
self.auto_encoder = AutoEncoder(source_size, hidden_size, **auto_encoder_kwargs)
self.seq2seq = Seq2SeqLSTM(hidden_size, target_size, hidden_size, **lstm_kwargs)
self.HistoryTemporalModule = TransformerDecoderLayer(hidden_size, **attn_kwargs)
self.ForecastTemporalModule = TransformerDecoderLayer(hidden_size, **attn_kwargs)
def forward(self, x):
"""
:param x: the state sequence
:return: hidden state
Shape:
- x: :math:`(Batch, Building, Time, State)`.
- return: :math:`(Batch, Building, Hidden_State*2)`.
"""
def to_seq_first(tensor):
tensor = tensor.unsqueeze(0).transpose(0, -2)
return tensor.reshape(tensor.size(0), -1, tensor.size(-1))
def undo_seq_first(tensor, lead_dims):
tensor.transpose_(0, -2)
return tensor.reshape(*lead_dims, *tensor.shape[-2:])
src, tgt = self.target_fn(x)
assert src.shape[:-2] == tgt.shape[:-2]
h_s = self.auto_encoder(src.reshape(-1, src.size(-1))).reshape(*src.shape[:-1], -1)
h_s, tgt = to_seq_first(h_s), to_seq_first(tgt)
_, h_t = self.seq2seq(src=h_s, tgt=tgt)
h_cur = h_s[[-1]]
h_t = self.ForecastTemporalModule(tgt=h_cur, memory=h_t)
h_s = self.HistoryTemporalModule(tgt=h_cur, memory=h_s)
out = undo_seq_first(torch.cat((h_s, h_t), dim=-1), src.shape[:-2]).squeeze(-2)
# out.transpose_(0, 1) # -> (Building, Batch, State)
# out = self.BuildingAttnModule(out)
# out.transpose_(0, 1) # -> (Batch, Building, State)
return out
class Seq2SeqTCNEncoder(BaseEncoder):
def __init__(self, source_size, target_size, hidden_size, target_fn,
auto_encoder_kwargs, unique_kwargs_history, unique_kwargs_forecast, lstm_kwargs=None, **kwargs):
super(Seq2SeqTCNEncoder, self).__init__()
if lstm_kwargs is None:
lstm_kwargs = {}
self.target_fn = target_fn
self.auto_encoder = AutoEncoder(source_size, hidden_size, **auto_encoder_kwargs)
self.seq2seq = Seq2SeqLSTM(hidden_size, target_size, hidden_size, **lstm_kwargs)
self.HistoryTemporalModule = TemporalConvNet(hidden_size, hidden_size, **unique_kwargs_history)
self.ForecastTemporalModule = TemporalConvNet(hidden_size, hidden_size, **unique_kwargs_forecast)
# self.BuildingAttnModule = TransformerEncoderLayer(hidden_size * 2, **attn_kwargs)
def forward(self, x):
"""
:param x: the state sequence
:return: hidden state
Shape:
- x: :math:`(Batch, Building, Time, State)`.
- return: :math:`(Batch, Building, Hidden_State*2)`.
"""
def to_seq_first(tensor):
tensor = tensor.unsqueeze(0).transpose(0, -2)
return tensor.reshape(tensor.size(0), -1, tensor.size(-1))
def undo_seq_first(tensor, lead_dims):
tensor.transpose_(0, -2)
return tensor.reshape(*lead_dims, *tensor.shape[-2:])
def to_TCN_input(tensor):
# tensor: (seq, batch*building, s_dim)
tensor = tensor.transpose(1, 0) # (batch*building, seq, s_dim)
old_shape = tensor.shape
return tensor.reshape((-1, 9, *old_shape[-2:]))
def reverse_t_dim(tensor):
inv_idx = torch.arange(tensor.size(2) - 1, -1, -1).long().to(tensor.device)
# or equivalently torch.range(tensor.size(0)-1, 0, -1).long()
inv_tensor = tensor.index_select(2, inv_idx)
return inv_tensor
src, tgt = self.target_fn(x)
assert src.shape[:-2] == tgt.shape[:-2]
h_s = self.auto_encoder(src.reshape(-1, src.size(-1))).reshape(*src.shape[:-1], -1)
h_s, tgt = to_seq_first(h_s), to_seq_first(tgt)
_, h_t = self.seq2seq(src=h_s, tgt=tgt)
# h_cur = h_s[[-1]]
# h_t = self.ForecastTemporalModule(tgt=h_cur, memory=h_t)
# h_s = self.HistoryTemporalModule(tgt=h_cur, memory=h_s)
# (seq, batch*building, s_dim)
# TCN input: (batch, building, seq, s_dim)
h_t = self.ForecastTemporalModule(reverse_t_dim(to_TCN_input(h_t))) # (batch, building, 128)
h_s = self.HistoryTemporalModule(to_TCN_input(h_s)) # reverse the forecast sequence on t-dim
out = torch.cat((h_s, h_t), dim=-1) # (batch, building, 256)
# out.transpose_(0, 1) # -> (Building, Batch, State)
# out = self.BuildingAttnModule(out)
# out.transpose_(0, 1) # -> (Batch, Building, State)
return out
class Seq2SeqSymTCNEncoder_old(BaseEncoder):
def __init__(self, source_size, target_size, hidden_size, target_fn, pred_len,
auto_encoder_kwargs, tcn_kwargs, lstm_kwargs=None, **kwargs):
super(Seq2SeqSymTCNEncoder_old, self).__init__()
if lstm_kwargs is None:
lstm_kwargs = {}
self.pred_len = pred_len
self.target_fn = target_fn
self.auto_encoder = AutoEncoder(source_size, hidden_size, **auto_encoder_kwargs)
self.seq2seq = Seq2SeqLSTM(hidden_size, target_size, hidden_size, **lstm_kwargs)
self.TemporalModule = TemporalConvNet(hidden_size, hidden_size, **tcn_kwargs)
# self.BuildingAttnModule = TransformerEncoderLayer(hidden_size * 2, **attn_kwargs)
def forward(self, x):
"""
:param x: the state sequence
:return: hidden state
Shape:
- x: :math:`(Batch, Building, Time, State)`.
- return: :math:`(Batch, Building, Hidden_State*2)`.
"""
def to_seq_first(tensor):
tensor = tensor.unsqueeze(0).transpose(0, -2)
return tensor.reshape(tensor.size(0), -1, tensor.size(-1))
def undo_seq_first(tensor, lead_dims):
tensor.transpose_(0, -2)
return tensor.reshape(*lead_dims, *tensor.shape[-2:])
def to_TCN_input(tensor):
# tensor: (seq, batch*building, s_dim)
tensor = tensor.transpose(1, 0) # (batch*building, seq, s_dim)
old_shape = tensor.shape
return tensor.reshape((-1, 9, *old_shape[-2:]))
src, tgt = self.target_fn(x)
assert src.shape[:-2] == tgt.shape[:-2]
h_s = self.auto_encoder(src.reshape(-1, src.size(-1))).reshape(*src.shape[:-1], -1)
h_s, tgt = to_seq_first(h_s), to_seq_first(tgt)
_, h_t = self.seq2seq(src=h_s, tgt=tgt)
# h_cur = h_s[[-1]]
h = torch.cat((h_s, h_t), dim=0)
# (seq, batch*building, s_dim)
# TCN input: (batch, building, seq, s_dim)
out = self.TemporalModule((to_TCN_input(h))).unbind(-2)[-1] # (batch, building, seq, 128)
# out.transpose_(0, 1) # -> (Building, Batch, State)
# out = self.BuildingAttnModule(out)
# out.transpose_(0, 1) # -> (Batch, Building, State)
return out[:, :, -(self.pred_len + 1)]
class Seq2SeqMixedTCNEncoder(BaseEncoder):
def __init__(self, source_size, target_size, hidden_size, target_fn,
auto_encoder_kwargs, unique_kwargs_history, unique_kwargs_forecast, lstm_kwargs=None, **kwargs):
super(Seq2SeqMixedTCNEncoder, self).__init__()
if lstm_kwargs is None:
lstm_kwargs = {}
self.target_fn = target_fn
self.auto_encoder = AutoEncoder(21, hidden_size, **auto_encoder_kwargs).eval()
self.seq2seq = Seq2SeqLSTM(hidden_size, target_size, hidden_size, **lstm_kwargs).eval()
self.HistoryTemporalModule = TemporalConvNet(source_size, hidden_size, **unique_kwargs_history)
self.ForecastTemporalModule = TemporalConvNet(hidden_size, hidden_size, **unique_kwargs_forecast)
# self.BuildingAttnModule = TransformerEncoderLayer(hidden_size * 2, **attn_kwargs)
def forward(self, x):
"""
:param x: the state sequence
:return: hidden state
Shape:
- x: :math:`(Batch, Building, Time, State)`.
- return: :math:`(Batch, Building, Hidden_State*2)`.
"""
def to_seq_first(tensor):
tensor = tensor.unsqueeze(0).transpose(0, -2)
return tensor.reshape(tensor.size(0), -1, tensor.size(-1))
def undo_seq_first(tensor, lead_dims):
tensor.transpose_(0, -2)
return tensor.reshape(*lead_dims, *tensor.shape[-2:])
def to_TCN_input(tensor):
# tensor: (seq, batch*building, s_dim)
tensor = tensor.transpose(1, 0) # (batch*building, seq, s_dim)
old_shape = tensor.shape
return tensor.reshape((-1, 9, *old_shape[-2:]))
def reverse_t_dim(tensor):
inv_idx = torch.arange(tensor.size(2) - 1, -1, -1).long().to(tensor.device)
# or equivalently torch.range(tensor.size(0)-1, 0, -1).long()
inv_tensor = tensor.index_select(2, inv_idx)
return inv_tensor
src_full, src, tgt = self.target_fn(x)
assert src_full.shape[:-2] == src.shape[:-2] == tgt.shape[:-2]
h_s = self.auto_encoder(src.reshape(-1, src.size(-1))).reshape(*src.shape[:-1], -1)
h_s, tgt = to_seq_first(h_s), to_seq_first(tgt)
_, h_t = self.seq2seq(src=h_s, tgt=tgt)
# (seq, batch*building, s_dim)
# TCN input: (batch, building, seq, s_dim)
# reverse the forecast sequence on t-dim
h_t = self.ForecastTemporalModule(reverse_t_dim(to_TCN_input(h_t))).unbind(-2)[-1]
h_s = self.HistoryTemporalModule(src_full).unbind(-2)[-1] # (batch, building, 128)
out = torch.cat((h_s, h_t), dim=-1) # (batch, building, 256)
# out.transpose_(0, 1) # -> (Building, Batch, State)
# out = self.BuildingAttnModule(out)
# out.transpose_(0, 1) # -> (Batch, Building, State)
return out
class Seq2SeqSymTCNEncoder(BaseEncoder):
def __init__(self, source_size, target_size, hidden_size, target_fn, pred_len,
auto_encoder_kwargs, tcn_kwargs, lstm_kwargs=None, **kwargs):
super(Seq2SeqSymTCNEncoder, self).__init__()
if lstm_kwargs is None:
lstm_kwargs = {}
self.pred_len = pred_len
self.target_fn = target_fn
self.history_AE = AutoEncoder(source_size, hidden_size, **auto_encoder_kwargs) # src_size = 21
self.auto_encoder = AutoEncoder(source_size - 2, hidden_size, **auto_encoder_kwargs) # src_size = 19
self.seq2seq = Seq2SeqLSTM(hidden_size, target_size, hidden_size, **lstm_kwargs)
self.TemporalModule = TemporalConvNet(hidden_size, hidden_size, **tcn_kwargs)
# self.BuildingAttnModule = TransformerEncoderLayer(hidden_size * 2, **attn_kwargs)
def forward(self, x):
"""
:param x: the state sequence
:return: hidden state
Shape:
- x: :math:`(Batch, Building, Time, State)`.
- return: :math:`(Batch, Building, Hidden_State*2)`.
"""
def to_seq_first(tensor):
tensor = tensor.unsqueeze(0).transpose(0, -2)
return tensor.reshape(tensor.size(0), -1, tensor.size(-1))
def undo_seq_first(tensor, lead_dims):
tensor.transpose_(0, -2)
return tensor.reshape(*lead_dims, *tensor.shape[-2:])
def to_TCN_input(tensor):
# tensor: (seq, batch*building, s_dim)
tensor = tensor.transpose(1, 0) # (batch*building, seq, s_dim)
old_shape = tensor.shape
return tensor.reshape((-1, 9, *old_shape[-2:]))
src, tgt = self.target_fn(x)
src_noSOC = src[:, :, :, :-2] # discard soc states
assert src.shape[:-2] == tgt.shape[:-2]
# generate hidden states of history seq
h_s_out = self.history_AE(src.reshape(-1, src.size(-1))).reshape(*src.shape[:-1], -1)
h_s_out = to_seq_first(h_s_out)
# generate hidden states of forecast seq
h_s = self.auto_encoder(src_noSOC.reshape(-1, src_noSOC.size(-1))).reshape(*src_noSOC.shape[:-1], -1)
h_s, tgt = to_seq_first(h_s), to_seq_first(tgt)
_, h_t_out = self.seq2seq(src=h_s, tgt=tgt)
h = torch.cat((h_s_out, h_t_out), dim=0)
# (seq, batch*building, s_dim)
# TCN input: (batch, building, seq, s_dim)
out = self.TemporalModule((to_TCN_input(h))).unbind(-2)[-1] # (batch, building, seq, 128)
# out.transpose_(0, 1) # -> (Building, Batch, State)
# out = self.BuildingAttnModule(out)
# out.transpose_(0, 1) # -> (Batch, Building, State)
return out[:, :, -(self.pred_len + 1)]
class Seq2SeqSymTCNEncoder_new(BaseEncoder):
def __init__(self, enc_src_size, history_src_size, hidden_size, pred_len,
auto_encoder_kwargs, tcn_kwargs, lstm_kwargs=None, **kwargs):
super(Seq2SeqSymTCNEncoder_new, self).__init__()
if lstm_kwargs is None:
lstm_kwargs = {}
self.pred_len = pred_len
self.history_AE = AutoEncoder(history_src_size, hidden_size, **auto_encoder_kwargs) # src_size = 21
self.auto_encoder = AutoEncoder(enc_src_size, hidden_size, **auto_encoder_kwargs) # src_size = 31
self.seq2seq = Seq2SeqLSTM_new(source_size=hidden_size, target_size=hidden_size,
pred_len=pred_len, hidden_size=hidden_size, **lstm_kwargs)
self.TemporalModule = TemporalConvNet(hidden_size, hidden_size, **tcn_kwargs)
# self.BuildingAttnModule = TransformerEncoderLayer(hidden_size * 2, **attn_kwargs)
def forward(self, x):
"""
:param x: the state sequence
:return: hidden state
Shape:
- x: :math:`(Batch, Building, Time, State)`.
- return: :math:`(Batch, Building, Hidden_State)`.
"""
def extract_history(states):
"""
:param states: dim=33->21
:return:
"""
result_list = [
states[:, :, :, 0:10],
states[:, :, :, 10:11],
states[:, :, :, 14:15],
states[:, :, :, 18:19],
states[:, :, :, 22:23],
states[:, :, :, 26:33],
]
return torch.cat(result_list, -1)
def to_seq_first(tensor):
tensor = tensor.unsqueeze(0).transpose(0, -2)
return tensor.reshape(tensor.size(0), -1, tensor.size(-1))
def undo_seq_first(tensor, lead_dims):
tensor.transpose_(0, -2)
return tensor.reshape(*lead_dims, *tensor.shape[-2:])
def to_TCN_input(tensor):
# tensor: (seq, batch*building, s_dim)
tensor = tensor.transpose(1, 0) # (batch*building, seq, s_dim)
old_shape = tensor.shape
return tensor.reshape((-1, 9, *old_shape[-2:]))
# x dim = 33
src = extract_history(x) # src dim=21
enc_in = x[:, :, :, :-2] # encoder input: dim = 31
# generate hidden states of history seq
h_s_out = self.history_AE(src.reshape(-1, src.size(-1))).reshape(*src.shape[:-1], -1)
h_s_out = to_seq_first(h_s_out)
# generate hidden states of forecast seq
h_s = self.auto_encoder(enc_in.reshape(-1, enc_in.size(-1))).reshape(*enc_in.shape[:-1], -1)
h_s = to_seq_first(h_s)
_, h_t_out = self.seq2seq(src=h_s)
h = torch.cat((h_s_out, h_t_out), dim=0)
# (seq, batch*building, s_dim)
# TCN input: (batch, building, seq, s_dim)
out = self.TemporalModule(to_TCN_input(h)) # (batch, building, seq, 128)
# out.transpose_(0, 1) # -> (Building, Batch, State)
# out = self.BuildingAttnModule(out)
# out.transpose_(0, 1) # -> (Batch, Building, State)
return out[:, :, -(self.pred_len + 1)]
| 41.908654
| 113
| 0.607835
| 2,235
| 17,434
| 4.481432
| 0.05906
| 0.053914
| 0.044728
| 0.027955
| 0.889677
| 0.863219
| 0.849641
| 0.829673
| 0.814796
| 0.810403
| 0
| 0.023155
| 0.256854
| 17,434
| 416
| 114
| 41.908654
| 0.749923
| 0.217908
| 0
| 0.683983
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021645
| 1
| 0.155844
| false
| 0
| 0.017316
| 0
| 0.329004
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1d99a7312ae1e219f6c94e7d842340e0e502b902
| 30
|
py
|
Python
|
test/login.py
|
schwert0501/manager
|
1e2f5d1e73cd34c28bb70366ee15e254ccf2d2a7
|
[
"MIT"
] | null | null | null |
test/login.py
|
schwert0501/manager
|
1e2f5d1e73cd34c28bb70366ee15e254ccf2d2a7
|
[
"MIT"
] | null | null | null |
test/login.py
|
schwert0501/manager
|
1e2f5d1e73cd34c28bb70366ee15e254ccf2d2a7
|
[
"MIT"
] | null | null | null |
a = 10
b = 20
c = 300000
| 3.333333
| 10
| 0.433333
| 6
| 30
| 2.166667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.625
| 0.466667
| 30
| 8
| 11
| 3.75
| 0.1875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d52aace3638073485f08f9cdb66b6bf2bd3f884f
| 178
|
py
|
Python
|
students/K33401/Nikitin_michael/lab2/lab2/tours/admin.py
|
mexannik1998/ITMO_ICT_WebDevelopment_2021-2022
|
0894edd7d49a73abba31f72266fdeb35fc3f6367
|
[
"MIT"
] | null | null | null |
students/K33401/Nikitin_michael/lab2/lab2/tours/admin.py
|
mexannik1998/ITMO_ICT_WebDevelopment_2021-2022
|
0894edd7d49a73abba31f72266fdeb35fc3f6367
|
[
"MIT"
] | null | null | null |
students/K33401/Nikitin_michael/lab2/lab2/tours/admin.py
|
mexannik1998/ITMO_ICT_WebDevelopment_2021-2022
|
0894edd7d49a73abba31f72266fdeb35fc3f6367
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import *
admin.site.register(User)
admin.site.register(Tour)
admin.site.register(UsersComments)
admin.site.register(Booked)
| 22.25
| 35
| 0.780899
| 24
| 178
| 5.791667
| 0.5
| 0.258993
| 0.489209
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.11236
| 178
| 8
| 36
| 22.25
| 0.879747
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
d54b99a826de04b44bc3ff168d6a7f749cd546cb
| 29
|
py
|
Python
|
simulation/common/flex_lab/__init__.py
|
LBNL-ETA/LPDM
|
3384a784b97e49cd7a801b758717a7107a51119f
|
[
"BSD-3-Clause-LBNL"
] | 2
|
2019-01-05T02:33:38.000Z
|
2020-04-22T16:57:50.000Z
|
simulation/common/flex_lab/__init__.py
|
LBNL-ETA/LPDM
|
3384a784b97e49cd7a801b758717a7107a51119f
|
[
"BSD-3-Clause-LBNL"
] | 3
|
2019-04-17T18:13:08.000Z
|
2021-04-23T22:40:23.000Z
|
simulation/common/flex_lab/__init__.py
|
LBNL-ETA/LPDM
|
3384a784b97e49cd7a801b758717a7107a51119f
|
[
"BSD-3-Clause-LBNL"
] | 1
|
2019-01-31T08:37:44.000Z
|
2019-01-31T08:37:44.000Z
|
from flex_lab import FlexLab
| 14.5
| 28
| 0.862069
| 5
| 29
| 4.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 29
| 1
| 29
| 29
| 0.96
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d567a995fca8ac4cfdeec9be20c4e4200c181574
| 35
|
py
|
Python
|
ads/exercises/string_manipulation/__init__.py
|
Aminul-Momin/Algorithms_and_Data_Structures
|
cba73b36b73ad92fb34bc34a0e03503f7a137713
|
[
"MIT"
] | null | null | null |
ads/exercises/string_manipulation/__init__.py
|
Aminul-Momin/Algorithms_and_Data_Structures
|
cba73b36b73ad92fb34bc34a0e03503f7a137713
|
[
"MIT"
] | null | null | null |
ads/exercises/string_manipulation/__init__.py
|
Aminul-Momin/Algorithms_and_Data_Structures
|
cba73b36b73ad92fb34bc34a0e03503f7a137713
|
[
"MIT"
] | null | null | null |
from .interconvert_str_int import *
| 35
| 35
| 0.857143
| 5
| 35
| 5.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085714
| 35
| 1
| 35
| 35
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d581e8f8aab5e7037e51e82b9b46ec188c113c46
| 188
|
py
|
Python
|
sentence_transformers/datasets/__init__.py
|
azdaly/sentence-transformers
|
d365d14e6eb3a79b7589c6404020833d5bda7322
|
[
"Apache-2.0"
] | 16
|
2020-12-22T07:35:20.000Z
|
2022-02-09T19:49:02.000Z
|
sentence_transformers/datasets/__init__.py
|
azdaly/sentence-transformers
|
d365d14e6eb3a79b7589c6404020833d5bda7322
|
[
"Apache-2.0"
] | 1
|
2021-12-21T14:33:15.000Z
|
2021-12-27T20:40:39.000Z
|
sentence_transformers/datasets/__init__.py
|
azdaly/sentence-transformers
|
d365d14e6eb3a79b7589c6404020833d5bda7322
|
[
"Apache-2.0"
] | 3
|
2020-09-28T09:25:04.000Z
|
2021-06-23T19:16:53.000Z
|
from .sampler import *
from .ParallelSentencesDataset import ParallelSentencesDataset
from .SentenceLabelDataset import SentenceLabelDataset
from .SentencesDataset import SentencesDataset
| 37.6
| 62
| 0.888298
| 15
| 188
| 11.133333
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085106
| 188
| 4
| 63
| 47
| 0.97093
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d5afe3a3ac807cc4d2f0da0f3d889267787923ef
| 38,413
|
py
|
Python
|
qradar4py/endpoints/system.py
|
ryukisec/qradar4py
|
958cdea92709778916f0ff8d84d75b18aaad4a66
|
[
"MIT"
] | 10
|
2019-11-19T21:13:32.000Z
|
2021-11-17T19:35:53.000Z
|
qradar4py/endpoints/system.py
|
ryukisec/qradar4py
|
958cdea92709778916f0ff8d84d75b18aaad4a66
|
[
"MIT"
] | 2
|
2021-05-21T16:15:16.000Z
|
2021-07-20T12:34:49.000Z
|
qradar4py/endpoints/system.py
|
ryukisec/qradar4py
|
958cdea92709778916f0ff8d84d75b18aaad4a66
|
[
"MIT"
] | 6
|
2020-09-14T13:44:55.000Z
|
2021-11-17T19:35:55.000Z
|
from urllib.parse import urljoin
from qradar4py.endpoints.api_endpoint import QRadarAPIEndpoint
from qradar4py.endpoints.api_endpoint import request_vars
from qradar4py.endpoints.api_endpoint import header_vars
class System(QRadarAPIEndpoint):
"""
The QRadar API endpoint group /system and its endpoints.
"""
__baseurl = 'system/'
def __init__(self, url, header, verify):
super().__init__(urljoin(url, self.__baseurl),
header,
verify)
@request_vars('fields')
def get_about(self, *, fields=None, **kwargs):
"""
GET /system/about
Retrieves the current system information
"""
function_endpoint = urljoin(self._baseurl, 'about')
return self._call('GET', function_endpoint, **kwargs)
@header_vars('Range')
@request_vars('filter', 'fields')
def get_authorization_capabilities(self, *, Range=None, filter=None, fields=None, **kwargs):
"""
GET /system/authorization/capabilities
Retrieves a list of capabilities that are currently in the system.
UNDOCUMENTED
"""
headers = kwargs.get('headers', {}).update({'Allow-Hidden': True})
function_endpoint = urljoin(self._baseurl, 'authorization/capabilities')
return self._call('GET', function_endpoint, headers=headers, **kwargs)
@header_vars('Range')
@request_vars('filter', 'fields')
def get_authorization_password_policies(self, *, Range=None, filter=None, fields=None, **kwargs):
"""
GET /system/authorization/password_policies
Retrieves a list of Password Policies that exist on the system
"""
function_endpoint = urljoin(self._baseurl, 'authorization/password_policies')
return self._call('GET', function_endpoint, **kwargs)
@header_vars('fields')
def post_authorization_password_policies_by_id(self, id, *, policy, fields=None, **kwargs):
"""
POST /system/authorization/password_policies/{id}
See api_mapping.xml
"""
function_endpoint = urljoin(self._baseurl, 'authorization/password_policies/{id}'.format(id=id))
return self._call('POST', function_endpoint, json=policy, **kwargs)
@request_vars('fields')
def get_authorization_password_policies_by_id(self, id, *, fields=None, **kwargs):
"""
GET /system/authorization/password_policies/{id}
Retrieves a single Password Policies that exist on the system
"""
function_endpoint = urljoin(self._baseurl, 'authorization/password_policies/{id}'.format(id=id))
return self._call('GET', function_endpoint, **kwargs)
@header_vars('fields')
def post_authorization_password_validators(self, *, body, fields=None, **kwargs):
"""
POST /system/authorization/password_validators
Creates a new password validator for the provided password based
on the current Password Policy.
"""
function_endpoint = urljoin(self._baseurl, 'authorization/password_validators')
return self._call('POST', function_endpoint, json=body, **kwargs)
def post_email_servers(self, *, email_server_details, **kwargs):
"""
POST /system/email_servers
Creates a new email server.
"""
function_endpoint = urljoin(self._baseurl, 'email_servers')
return self._call('POST', function_endpoint, json=email_server_details, **kwargs)
@header_vars('Range')
@request_vars('filter', 'fields')
def get_email_servers(self, *, Range=None, filter=None, fields=None, **kwargs):
"""
GET /system/email_servers
Retrieves a list of all email servers.
"""
function_endpoint = urljoin(self._baseurl, 'email_servers')
return self._call('GET', function_endpoint, **kwargs)
def post_email_servers_by_email_server_id(self, email_server_id, *, email_server_details, **kwargs):
"""
POST /system/email_servers/{email_server_id}
Updates an existing email server.
"""
function_endpoint = urljoin(self._baseurl,
'email_servers/{email_server_id}'.format(email_server_id=email_server_id))
return self._call('POST', function_endpoint, json=email_server_details, **kwargs)
@request_vars('fields')
def get_email_servers_by_email_server_id(self, email_server_id, *, fields=None, **kwargs):
"""
GET /system/email_servers/{email_server_id}
Retrieves an email server based on the supplied email server ID.
"""
function_endpoint = urljoin(self._baseurl,
'email_servers/{email_server_id}'.format(email_server_id=email_server_id))
return self._call('GET', function_endpoint, **kwargs)
def delete_email_servers_by_email_server_id(self, email_server_id, **kwargs):
"""
DELETE /system/email_servers/{email_server_id}
Deletes an email server.
"""
function_endpoint = urljoin(self._baseurl,
'email_servers/{email_server_id}'.format(email_server_id=email_server_id))
return self._call('DELETE', function_endpoint, response_type='text/plain', **kwargs)
def get_eula_acceptances(self, **kwargs):
"""
GET /system/eula_acceptances
Retrieves the list of EULA acceptance statuses that the caller has permission to see.
"""
function_endpoint = urljoin(self._baseurl, 'eula_acceptances')
return self._call('GET', function_endpoint, **kwargs)
@header_vars('fields')
def post_eula_acceptances_by_id(self, id, *, data, fields=None, **kwargs):
"""
POST /system/eula_acceptances/{id}
Updates an individual EULA acceptance.
"""
function_endpoint = urljoin(self._baseurl, 'eula_acceptances/{id}'.format(id=id))
return self._call('POST', function_endpoint, json=data, **kwargs)
def get_eula_acceptances_by_id(self, id, **kwargs):
"""
GET /system/eula_acceptances/{id}
Retrieves an individual EULA Acceptance by id.
"""
function_endpoint = urljoin(self._baseurl, 'eula_acceptances/{id}'.format(id=id))
return self._call('GET', function_endpoint, **kwargs)
def get_eulas(self, **kwargs):
"""
GET /system/eulas
Retrieves a list of EULAs.
"""
function_endpoint = urljoin(self._baseurl, 'eulas')
return self._call('GET', function_endpoint, **kwargs)
@header_vars('Range')
@request_vars('filter', 'fields')
def get_information_encodings(self, *, Range=None, filter=None, fields=None, **kwargs):
"""
GET /system/information/encodings
Retrieves the list of encodings that are supported by the system for event data..
"""
function_endpoint = urljoin(self._baseurl, 'information/encodings')
return self._call('GET', function_endpoint, **kwargs)
@header_vars('Range')
@request_vars('sample_type', 'filter', 'fields', 'sort')
def get_information_locales(self, *, sample_type=None, filter=None, fields=None, sort=None, Range=None, **kwargs):
"""
GET /system/information/locales
Retrieve Locales.
"""
function_endpoint = urljoin(self._baseurl, 'information/locales')
return self._call('GET', function_endpoint, **kwargs)
@request_vars('since', 'limit', 'fields')
def get_notifications(self, *, since=None, limit=None, fields=None, **kwargs):
"""
GET /system/notifications
Retrieves notifications
UNDOCUMENTED
"""
headers = kwargs.get('headers', {}).update({'Allow-Hidden': True})
function_endpoint = urljoin(self._baseurl, 'notifications')
return self._call('GET', function_endpoint, headers=headers, **kwargs)
def delete_notifications_by_qid(self, qid, **kwargs):
"""
DELETE /system/notifications/{qid}
dismisses a notification
UNDOCUMENTED
"""
headers = kwargs.get('headers', {}).update({'Allow-Hidden': True})
function_endpoint = urljoin(self._baseurl, 'notifications/{qid}'.format(qid=qid))
return self._call('DELETE', function_endpoint, response_type='text/plain', headers=headers, **kwargs)
@request_vars('fields')
def get_notifications_by_qid(self, qid, *, fields=None, **kwargs):
"""
GET /system/notifications/{qid}
Retrieves notification by QID
UNDOCUMENTED
"""
headers = kwargs.get('headers', {}).update({'Allow-Hidden': True})
function_endpoint = urljoin(self._baseurl, 'notifications/{qid}'.format(qid=qid))
return self._call('GET', function_endpoint, headers=headers, **kwargs)
def post_proxy_servers(self, *, proxy_server_details, **kwargs):
"""
POST /system/proxy_servers
Create a proxy server
"""
function_endpoint = urljoin(self._baseurl, 'proxy_servers')
return self._call('POST', function_endpoint, json=proxy_server_details, **kwargs)
@header_vars('Range')
@request_vars('filter', 'fields')
def get_proxy_servers(self, *, Range=None, filter=None, fields=None, **kwargs):
"""
GET /system/proxy_servers
Read all proxy servers
"""
function_endpoint = urljoin(self._baseurl, 'proxy_servers')
return self._call('GET', function_endpoint, **kwargs)
def delete_proxy_servers_by_id(self, id, **kwargs):
"""
DELETE /system/proxy_servers/{id}
Delete a proxy server
"""
function_endpoint = urljoin(self._baseurl, 'proxy_servers/{id}'.format(id=id))
return self._call('DELETE', function_endpoint, response_type='text/plain', **kwargs)
@header_vars('fields')
def post_proxy_servers_by_id(self, id, *, proxy_server_details, fields=None, **kwargs):
"""
POST /system/proxy_servers/{id}
Update a proxy server
"""
function_endpoint = urljoin(self._baseurl, 'proxy_servers/{id}'.format(id=id))
return self._call('POST', function_endpoint, json=proxy_server_details, **kwargs)
@request_vars('fields')
def get_proxy_servers_by_id(self, id, *, fields=None, **kwargs):
"""
GET /system/proxy_servers/{id}
Read a proxy server
"""
function_endpoint = urljoin(self._baseurl, 'proxy_servers/{id}'.format(id=id))
return self._call('GET', function_endpoint, **kwargs)
@header_vars('fields')
def post_server_connection_validator(self, *, request_details, fields=None, **kwargs):
"""
POST /system/server_connection_validator
Creates a server connection validator for the provided hostname and port, based
on the provided host ids.
UNDOCUMENTED
"""
headers = kwargs.get('headers', {}).update({'Allow-Hidden': True})
function_endpoint = urljoin(self._baseurl, 'server_connection_validator')
return self._call('POST', function_endpoint, json=request_details, headers=headers, **kwargs)
@header_vars('Range')
@request_vars('filter', 'fields')
def get_servers(self, *, Range=None, filter=None, fields=None, **kwargs):
"""
GET /system/servers
Retrieves a list of all server hosts in the deployment.
"""
function_endpoint = urljoin(self._baseurl, 'servers')
return self._call('GET', function_endpoint, **kwargs)
@request_vars('fields')
def get_servers_by_server_id(self, server_id, *, fields=None, **kwargs):
"""
GET /system/servers/{server_id}
Retrieves a server host based on the supplied server ID.
"""
function_endpoint = urljoin(self._baseurl, 'servers/{server_id}'.format(server_id=server_id))
return self._call('GET', function_endpoint, **kwargs)
def post_servers_by_server_id(self, server_id, *, details, **kwargs):
"""
POST /system/servers/{server_id}
Updates an existing server.
"""
function_endpoint = urljoin(self._baseurl, 'servers/{server_id}'.format(server_id=server_id))
return self._call('POST', function_endpoint, json=details, **kwargs)
@header_vars('Range')
@request_vars('filter', 'fields')
def get_servers_firewall_rules_by_server_id(self, server_id, *, Range=None, filter=None, fields=None, **kwargs):
"""
GET /system/servers/{server_id}/firewall_rules
Retrieves a list of access control firewall rules based on the supplied server ID.
"""
function_endpoint = urljoin(self._baseurl, 'servers/{server_id}/firewall_rules'.format(server_id=server_id))
return self._call('GET', function_endpoint, **kwargs)
def put_servers_firewall_rules_by_server_id(self, server_id, *, rules, **kwargs):
"""
PUT /system/servers/{server_id}/firewall_rules
Sets the access control firewall rules based on the supplied server ID.
"""
function_endpoint = urljoin(self._baseurl, 'servers/{server_id}/firewall_rules'.format(server_id=server_id))
return self._call('PUT', function_endpoint, **kwargs)
@header_vars('Range')
@request_vars('filter', 'fields')
def get_servers_network_interfaces_bonded_by_server_id(self, server_id, *, Range=None, filter=None, fields=None,
**kwargs):
"""
GET /system/servers/{server_id}/network_interfaces/bonded
Retrieves a list of the bonded network interfaces based on the supplied server ID.
"""
function_endpoint = urljoin(self._baseurl,
'servers/{server_id}/network_interfaces/bonded'.format(server_id=server_id))
return self._call('GET', function_endpoint, **kwargs)
def post_servers_network_interfaces_bonded_by_server_id(self, server_id, *, details, **kwargs):
"""
POST /system/servers/{server_id}/network_interfaces/bonded
Creates a new bonded network interface.
"""
function_endpoint = urljoin(self._baseurl,
'servers/{server_id}/network_interfaces/bonded'.format(server_id=server_id))
return self._call('POST', function_endpoint, json=details, **kwargs)
def post_servers_server_id_network_interfaces_bonded_by_device_name(self, server_id, device_name, *, details,
**kwargs):
"""
POST /system/servers/{server_id}/network_interfaces/bonded/{device_name}
Updates an existing bonded network interface.
"""
function_endpoint = urljoin(self._baseurl, 'servers/{server_id}/network_interfaces/bonded/{device_name}'.format(
server_id=server_id, device_name=device_name))
return self._call('POST', function_endpoint, json=details, **kwargs)
def delete_servers_server_id_network_interfaces_bonded_by_device_name(self, server_id, device_name, **kwargs):
"""
DELETE /system/servers/{server_id}/network_interfaces/bonded/{device_name}
Removes a bonded network interface.
"""
function_endpoint = urljoin(self._baseurl, 'servers/{server_id}/network_interfaces/bonded/{device_name}'.format(
server_id=server_id, device_name=device_name))
return self._call('DELETE', function_endpoint, response_type='text/plain', **kwargs)
@header_vars('Range')
@request_vars('filter', 'fields')
def get_servers_network_interfaces_dag_by_server_id(self, server_id, *, Range=None, filter=None, fields=None,
**kwargs):
"""
GET /system/servers/{server_id}/network_interfaces/dag
Retrieves a list of DAG network interfaces based on the supplied server ID.
UNDOCUMENTED
"""
headers = kwargs.get('headers', {}).update({'Allow-Hidden': True})
function_endpoint = urljoin(self._baseurl,
'servers/{server_id}/network_interfaces/dag'.format(server_id=server_id))
return self._call('GET', function_endpoint, headers=headers, **kwargs)
def post_servers_server_id_network_interfaces_dag_by_device_name(self, server_id, device_name, *, details,
**kwargs):
"""
POST /system/servers/{server_id}/network_interfaces/dag/{device_name}
Updates an existing DAG network interface.
UNDOCUMENTED
"""
headers = kwargs.get('headers', {}).update({'Allow-Hidden': True})
function_endpoint = urljoin(self._baseurl, 'servers/{server_id}/network_interfaces/dag/{device_name}'.format(
server_id=server_id, device_name=device_name))
return self._call('POST', function_endpoint, json=details, headers=headers, **kwargs)
@header_vars('Range')
@request_vars('filter', 'fields')
def get_servers_network_interfaces_ethernet_by_server_id(self, server_id, *, Range=None, filter=None, fields=None,
**kwargs):
"""
GET /system/servers/{server_id}/network_interfaces/ethernet
Retrieves a list of the ethernet network interfaces based on the supplied server ID.
"""
function_endpoint = urljoin(self._baseurl,
'servers/{server_id}/network_interfaces/ethernet'.format(server_id=server_id))
return self._call('GET', function_endpoint, **kwargs)
def post_servers_server_id_network_interfaces_ethernet_by_device_name(self, server_id, device_name, *, details,
**kwargs):
"""
POST /system/servers/{server_id}/network_interfaces/ethernet/{device_name}
Updates an ethernet network interface based on the suppied server_Id and device_name.
"""
function_endpoint = urljoin(self._baseurl,
'servers/{server_id}/network_interfaces/ethernet/{device_name}'.format(
server_id=server_id, device_name=device_name))
return self._call('POST', function_endpoint, json=details, **kwargs)
@request_vars('fields')
def get_servers_system_time_settings_by_server_id(self, server_id, *, fields=None, **kwargs):
"""
GET /system/servers/{server_id}/system_time_settings
Retrieves the system time and time zone settings of a server host based on the supplied server ID.
"""
function_endpoint = urljoin(self._baseurl,
'servers/{server_id}/system_time_settings'.format(server_id=server_id))
return self._call('GET', function_endpoint, **kwargs)
@header_vars('fields')
def post_servers_system_time_settings_by_server_id(self, server_id, *, settings, fields=None, **kwargs):
"""
POST /system/servers/{server_id}/system_time_settings
Sets the system time and time zone settings of to a server host. Services are restarted after the call and service interruptions will occur.
"""
function_endpoint = urljoin(self._baseurl,
'servers/{server_id}/system_time_settings'.format(server_id=server_id))
return self._call('POST', function_endpoint, json=settings, **kwargs)
@request_vars('fields')
def get_servers_timezones_by_server_id(self, server_id, *, fields=None, **kwargs):
"""
GET /system/servers/{server_id}/timezones
Retrieves all the available time zones that can be set for a server.
"""
function_endpoint = urljoin(self._baseurl, 'servers/{server_id}/timezones'.format(server_id=server_id))
return self._call('GET', function_endpoint, **kwargs)
@request_vars('fields')
def get_summary(self, *, fields=None, **kwargs):
"""
GET /system/summary
Retrieves notifications summary
UNDOCUMENTED
"""
headers = kwargs.get('headers', {}).update({'Allow-Hidden': True})
function_endpoint = urljoin(self._baseurl, 'summary')
return self._call('GET', function_endpoint, headers=headers, **kwargs)
@request_vars('task_id', 'email_addresses')
def post_task_management_email_action(self, *, task_id, email_addresses, **kwargs):
"""
POST /system/task_management/email_action
Adds an email action to the TaskStatus. The email will be
executed on Completion or Exception of the Task
UNDOCUMENTED
"""
headers = kwargs.get('headers', {}).update({'Allow-Hidden': True})
function_endpoint = urljoin(self._baseurl, 'task_management/email_action')
return self._call('POST', function_endpoint, response_type='text/plain', headers=headers, **kwargs)
@request_vars('host_id', 'app_id', 'status_uuid', 'task_class', 'task_type', 'children_ids', 'sub_task_ids',
'task_state', 'task_name_local_info', 'message_local_info', 'progress', 'minimum', 'maximum',
'created_by', 'cancelled_by', 'created_time', 'started_time', 'modified_time', 'completed_time',
'retention', 'result_url', 'result_delete_task', 'is_cancel_requested', 'delete_task_id', 'fields')
def post_task_management_internal_tasks_by_id(self, id, *, host_id=None, app_id=None, status_uuid=None,
task_class=None, task_type=None, children_ids=None, sub_task_ids=None,
task_state=None, task_name_local_info=None, message_local_info=None,
progress=None, minimum=None, maximum=None, created_by=None,
cancelled_by=None, created_time=None, started_time=None,
modified_time=None, completed_time=None, retention=None,
result_url=None, result_delete_task=None, is_cancel_requested=None,
delete_task_id=None, fields=None, **kwargs):
"""
POST /system/task_management/internal_tasks/{id}
Updates a TaskStatus
UNDOCUMENTED
"""
headers = kwargs.get('headers', {}).update({'Allow-Hidden': True})
function_endpoint = urljoin(self._baseurl, 'task_management/internal_tasks/{id}'.format(id=id))
return self._call('POST', function_endpoint, headers=headers, **kwargs)
@header_vars('Range')
@request_vars('filter', 'fields')
def get_task_management_subtasks(self, *, filter=None, fields=None, Range=None, **kwargs):
"""
GET /system/task_management/subtasks
Gets all TaskSubStatuses
UNDOCUMENTED
"""
headers = kwargs.get('headers', {}).update({'Allow-Hidden': True})
function_endpoint = urljoin(self._baseurl, 'task_management/subtasks')
return self._call('GET', function_endpoint, headers=headers, **kwargs)
@request_vars('task_state', 'message_local_info', 'progress', 'minimum', 'maximum', 'created_time', 'started_time',
'modified_time', 'completed_time', 'fields')
def post_task_management_subtasks(self, *, task_state, message_local_info, progress=None, minimum=None,
maximum=None, created_time=None, started_time=None, modified_time=None,
completed_time=None, fields=None, **kwargs):
"""
POST /system/task_management/subtasks
Create a TaskSubStatus
UNDOCUMENTED
"""
headers = kwargs.get('headers', {}).update({'Allow-Hidden': True})
function_endpoint = urljoin(self._baseurl, 'task_management/subtasks')
return self._call('POST', function_endpoint, headers=headers, **kwargs)
@request_vars('task_state', 'message_local_info', 'progress', 'minimum', 'maximum', 'created_time', 'started_time',
'modified_time', 'completed_time', 'fields')
def post_task_management_subtasks_by_id(self, id, *, task_state=None, message_local_info=None, progress=None,
minimum=None, maximum=None, created_time=None, started_time=None,
modified_time=None, completed_time=None, fields=None, **kwargs):
"""
POST /system/task_management/subtasks/{id}
Updates a TaskSubStatus
UNDOCUMENTED
"""
headers = kwargs.get('headers', {}).update({'Allow-Hidden': True})
function_endpoint = urljoin(self._baseurl, 'task_management/subtasks/{id}'.format(id=id))
return self._call('POST', function_endpoint, headers=headers, **kwargs)
def delete_task_management_subtasks_by_id(self, id, **kwargs):
"""
DELETE /system/task_management/subtasks/{id}
Deletes a TaskSubStatus
UNDOCUMENTED
"""
headers = kwargs.get('headers', {}).update({'Allow-Hidden': True})
function_endpoint = urljoin(self._baseurl, 'task_management/subtasks/{id}'.format(id=id))
return self._call('DELETE', function_endpoint, response_type='text/plain', headers=headers, **kwargs)
@request_vars('fields')
def get_task_management_subtasks_by_id(self, id, *, fields=None, **kwargs):
"""
GET /system/task_management/subtasks/{id}
Gets a TaskSubStatus
UNDOCUMENTED
"""
headers = kwargs.get('headers', {}).update({'Allow-Hidden': True})
function_endpoint = urljoin(self._baseurl, 'task_management/subtasks/{id}'.format(id=id))
return self._call('GET', function_endpoint, headers=headers, **kwargs)
@request_vars('host_id', 'app_id', 'status_uuid', 'children_ids', 'task_type', 'task_state', 'task_name_local_info',
'message_local_info', 'progress', 'minimum', 'maximum', 'created_by', 'cancelled_by', 'created',
'started', 'modified', 'completed', 'retention', 'result_url', 'result_delete_task', 'delete_task_id',
'fields')
def post_task_management_task(self, *, app_id, task_type, task_state, task_name_local_info, message_local_info,
created_by, host_id=None, status_uuid=None, children_ids=None, progress=None,
minimum=None, maximum=None, cancelled_by=None, created=None, started=None,
modified=None, completed=None, retention=None, result_url=None,
result_delete_task=None, delete_task_id=None, fields=None, **kwargs):
"""
POST /system/task_management/task
Create a TaskStatus
UNDOCUMENTED
"""
headers = kwargs.get('headers', {}).update({'Allow-Hidden': True})
function_endpoint = urljoin(self._baseurl, 'task_management/task')
return self._call('POST', function_endpoint, headers=headers, **kwargs)
@header_vars('Range')
@request_vars('filter', 'fields')
def get_task_management_task(self, *, filter=None, fields=None, Range=None, **kwargs):
"""
GET /system/task_management/task
Gets all TaskStatuses
UNDOCUMENTED
"""
headers = kwargs.get('headers', {}).update({'Allow-Hidden': True})
function_endpoint = urljoin(self._baseurl, 'task_management/task')
return self._call('GET', function_endpoint, headers=headers, **kwargs)
@request_vars('id_type', 'fields')
def get_task_management_task_by_id(self, id, *, id_type=None, fields=None, **kwargs):
"""
GET /system/task_management/task/{id}
Gets a TaskStatus
UNDOCUMENTED
"""
headers = kwargs.get('headers', {}).update({'Allow-Hidden': True})
function_endpoint = urljoin(self._baseurl, 'task_management/task/{id}'.format(id=id))
return self._call('GET', function_endpoint, headers=headers, **kwargs)
@request_vars('delete_result')
def delete_task_management_task_by_id(self, id, *, delete_result=None, **kwargs):
"""
DELETE /system/task_management/task/{id}
Deletes a TaskStatus
UNDOCUMENTED
"""
headers = kwargs.get('headers', {}).update({'Allow-Hidden': True})
function_endpoint = urljoin(self._baseurl, 'task_management/task/{id}'.format(id=id))
return self._call('DELETE', function_endpoint, response_type='text/plain', headers=headers, **kwargs)
@request_vars('host_id', 'app_id', 'status_uuid', 'task_type', 'children_ids', 'task_state', 'task_name_local_info',
'message_local_info', 'progress', 'minimum', 'maximum', 'created_by', 'cancelled_by', 'created',
'started', 'modified', 'completed', 'retention', 'result_url', 'result_delete_task',
'is_cancel_requested', 'delete_task_id', 'fields')
def post_task_management_task_by_id(self, id, *, host_id=None, app_id=None, status_uuid=None, task_type=None,
children_ids=None, task_state=None, task_name_local_info=None,
message_local_info=None, progress=None, minimum=None, maximum=None,
created_by=None, cancelled_by=None, created=None, started=None, modified=None,
completed=None, retention=None, result_url=None, result_delete_task=None,
is_cancel_requested=None, delete_task_id=None, fields=None, **kwargs):
"""
POST /system/task_management/task/{id}
Updates a TaskStatus
UNDOCUMENTED
"""
headers = kwargs.get('headers', {}).update({'Allow-Hidden': True})
function_endpoint = urljoin(self._baseurl, 'task_management/task/{id}'.format(id=id))
return self._call('POST', function_endpoint, headers=headers, **kwargs)
def delete_task_management_task_result_by_id(self, id, **kwargs):
"""
DELETE /system/task_management/task/{id}/result
Gets the result from the TaskStatus
UNDOCUMENTED
"""
headers = kwargs.get('headers', {}).update({'Allow-Hidden': True})
function_endpoint = urljoin(self._baseurl, 'task_management/task/{id}/result'.format(id=id))
return self._call('DELETE', function_endpoint, response_type='text/plain', headers=headers, **kwargs)
def get_task_management_task_resume_data_by_id(self, id, **kwargs):
"""
GET /system/task_management/task/{id}/resume_data
Gets the resume from the TaskStatus
UNDOCUMENTED
"""
headers = kwargs.get('headers', {}).update({'Allow-Hidden': True})
function_endpoint = urljoin(self._baseurl, 'task_management/task/{id}/resume_data'.format(id=id))
return self._call('GET', function_endpoint, response_type='text/plain', headers=headers, **kwargs)
@request_vars('resume_data')
def post_task_management_task_resume_data_by_id(self, id, *, resume_data, **kwargs):
"""
POST /system/task_management/task/{id}/resume_data
Creates the result from the TaskStatus
UNDOCUMENTED
"""
headers = kwargs.get('headers', {}).update({'Allow-Hidden': True})
function_endpoint = urljoin(self._baseurl, 'task_management/task/{id}/resume_data'.format(id=id))
return self._call('POST', function_endpoint, response_type='text/plain', headers=headers, **kwargs)
@request_vars('task_id')
def post_task_management_task_id_by_uuid(self, uuid, *, task_id, **kwargs):
"""
POST /system/task_management/task_id/{uuid}
No summary provided
UNDOCUMENTED
"""
headers = kwargs.get('headers', {}).update({'Allow-Hidden': True})
function_endpoint = urljoin(self._baseurl, 'task_management/task_id/{uuid}'.format(uuid=uuid))
return self._call('POST', function_endpoint, response_type='text/plain', headers=headers, **kwargs)
def get_task_management_task_id_by_uuid(self, uuid, **kwargs):
"""
GET /system/task_management/task_id/{uuid}
No summary provided
UNDOCUMENTED
"""
headers = kwargs.get('headers', {}).update({'Allow-Hidden': True})
function_endpoint = urljoin(self._baseurl, 'task_management/task_id/{uuid}'.format(uuid=uuid))
return self._call('GET', function_endpoint, response_type='text/plain', headers=headers, **kwargs)
def delete_task_management_task_id_by_uuid(self, uuid, **kwargs):
"""
DELETE /system/task_management/task_id/{uuid}
No summary provided
UNDOCUMENTED
"""
headers = kwargs.get('headers', {}).update({'Allow-Hidden': True})
function_endpoint = urljoin(self._baseurl, 'task_management/task_id/{uuid}'.format(uuid=uuid))
return self._call('DELETE', function_endpoint, response_type='text/plain', headers=headers, **kwargs)
@request_vars('host_id', 'app_id', 'status_uuid', 'children_ids', 'sub_task_ids', 'task_class', 'task_type',
'task_state', 'task_name_local_info', 'message_local_info', 'progress', 'minimum', 'maximum',
'created_by', 'cancelled_by', 'created_time', 'started_time', 'modified_time', 'completed_time',
'retention', 'result_url', 'result_delete_task', 'delete_task_id', 'fields')
def post_task_management_tasks(self, *, app_id, task_class, task_type, task_state, task_name_local_info,
message_local_info, created_by, host_id=None, status_uuid=None, children_ids=None,
sub_task_ids=None, progress=None, minimum=None, maximum=None, cancelled_by=None,
created_time=None, started_time=None, modified_time=None, completed_time=None,
retention=None, result_url=None, result_delete_task=None, delete_task_id=None,
fields=None, **kwargs):
"""
POST /system/task_management/tasks
Create a TaskStatus
UNDOCUMENTED
"""
headers = kwargs.get('headers', {}).update({'Allow-Hidden': True})
function_endpoint = urljoin(self._baseurl, 'task_management/tasks')
return self._call('POST', function_endpoint, headers=headers, **kwargs)
@header_vars('Range')
@request_vars('filter', 'fields')
def get_task_management_tasks(self, *, filter=None, fields=None, Range=None, **kwargs):
"""
GET /system/task_management/tasks
Gets all TaskStatuses
UNDOCUMENTED
"""
headers = kwargs.get('headers', {}).update({'Allow-Hidden': True})
function_endpoint = urljoin(self._baseurl, 'task_management/tasks')
return self._call('GET', function_endpoint, headers=headers, **kwargs)
@request_vars('is_cancel_requested', 'fields')
def post_task_management_tasks_by_id(self, id, *, is_cancel_requested=None, fields=None, **kwargs):
"""
POST /system/task_management/tasks/{id}
Updates a TaskStatus
UNDOCUMENTED
"""
headers = kwargs.get('headers', {}).update({'Allow-Hidden': True})
function_endpoint = urljoin(self._baseurl, 'task_management/tasks/{id}'.format(id=id))
return self._call('POST', function_endpoint, headers=headers, **kwargs)
@request_vars('id_type', 'fields')
def get_task_management_tasks_by_id(self, id, *, id_type=None, fields=None, **kwargs):
"""
GET /system/task_management/tasks/{id}
Gets a TaskStatus
UNDOCUMENTED
"""
headers = kwargs.get('headers', {}).update({'Allow-Hidden': True})
function_endpoint = urljoin(self._baseurl, 'task_management/tasks/{id}'.format(id=id))
return self._call('GET', function_endpoint, headers=headers, **kwargs)
@request_vars('delete_result')
def delete_task_management_tasks_by_id(self, id, *, delete_result=None, **kwargs):
"""
DELETE /system/task_management/tasks/{id}
Deletes a TaskStatus
UNDOCUMENTED
"""
headers = kwargs.get('headers', {}).update({'Allow-Hidden': True})
function_endpoint = urljoin(self._baseurl, 'task_management/tasks/{id}'.format(id=id))
return self._call('DELETE', function_endpoint, response_type='text/plain', headers=headers, **kwargs)
def get_task_management_tasks_result_by_id(self, id, **kwargs):
"""
GET /system/task_management/tasks/{id}/result
Gets the result from the TaskStatus
UNDOCUMENTED
"""
headers = kwargs.get('headers', {}).update({'Allow-Hidden': True})
function_endpoint = urljoin(self._baseurl, 'task_management/tasks/{id}/result'.format(id=id))
return self._call('GET', function_endpoint, response_type='application/octet-stream', headers=headers, **kwargs)
def post_task_management_tasks_result_by_id(self, id, *, result, **kwargs):
"""
POST /system/task_management/tasks/{id}/result
Creates the result from the TaskStatus
UNDOCUMENTED
"""
headers = kwargs.get('headers', {}).update({'Allow-Hidden': True})
function_endpoint = urljoin(self._baseurl, 'task_management/tasks/{id}/result'.format(id=id))
return self._call('POST', function_endpoint, response_type='text/plain',
mime_type={'Content-Type': 'application/octet-stream'}, data=result, headers=headers,
**kwargs)
def delete_task_management_tasks_result_by_id(self, id, **kwargs):
"""
DELETE /system/task_management/tasks/{id}/result
Deletes a result
UNDOCUMENTED
"""
headers = kwargs.get('headers', {}).update({'Allow-Hidden': True})
function_endpoint = urljoin(self._baseurl, 'task_management/tasks/{id}/result'.format(id=id))
return self._call('DELETE', function_endpoint, response_type='text/plain', headers=headers, **kwargs)
| 50.477004
| 148
| 0.640486
| 4,311
| 38,413
| 5.430063
| 0.047553
| 0.094323
| 0.067794
| 0.079585
| 0.896322
| 0.870135
| 0.831304
| 0.790978
| 0.769405
| 0.737152
| 0
| 0.000102
| 0.234973
| 38,413
| 760
| 149
| 50.543421
| 0.796475
| 0.165178
| 0
| 0.592992
| 0
| 0
| 0.16109
| 0.056454
| 0
| 0
| 0
| 0
| 0
| 1
| 0.188679
| false
| 0.021563
| 0.010782
| 0
| 0.390836
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
639571f1b321312e941cb005a13270910e527b69
| 2,247
|
py
|
Python
|
frontend/extras/migration/versions/430a70c8aa21_version_1_2_1.py
|
krisshol/bach-kmno
|
f40d85b3397bb340e26a671c54d4a753dbbb0d43
|
[
"Apache-2.0"
] | 248
|
2015-01-08T09:36:44.000Z
|
2022-01-12T10:29:21.000Z
|
frontend/extras/migration/versions/430a70c8aa21_version_1_2_1.py
|
krisshol/bach-kmno
|
f40d85b3397bb340e26a671c54d4a753dbbb0d43
|
[
"Apache-2.0"
] | 50
|
2015-01-09T08:31:57.000Z
|
2022-03-30T10:41:13.000Z
|
frontend/extras/migration/versions/430a70c8aa21_version_1_2_1.py
|
krisshol/bach-kmno
|
f40d85b3397bb340e26a671c54d4a753dbbb0d43
|
[
"Apache-2.0"
] | 74
|
2015-01-05T09:11:21.000Z
|
2022-03-29T02:16:54.000Z
|
"""version 1.2.1
Revision ID: 430a70c8aa21
Revises: 2cc69d5c53eb
Create Date: 2015-07-06 16:34:44.422586
"""
# revision identifiers, used by Alembic.
revision = '430a70c8aa21'
down_revision = '2cc69d5c53eb'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.alter_column('irma_file',
'timestamp_first_scan',
nullable=False,
type_=sa.Numeric(asdecimal=False),
existing_type=sa.Float(precision=2),
existing_server_default=False,
existing_nullable=False)
op.alter_column('irma_file',
'timestamp_last_scan',
nullable=False,
type_=sa.Numeric(asdecimal=False),
existing_type=sa.Float(precision=2),
existing_server_default=False,
existing_nullable=False)
op.alter_column('irma_scanEvents',
'timestamp',
nullable=False,
type_=sa.Numeric(asdecimal=False),
existing_type=sa.Float(precision=2),
existing_server_default=False,
existing_nullable=False)
def downgrade():
op.alter_column('irma_file',
'timestamp_first_scan',
nullable=False,
type_=sa.Float(precision=2),
existing_type=sa.Numeric(asdecimal=False),
existing_server_default=False,
existing_nullable=False)
op.alter_column('irma_file',
'timestamp_last_scan',
nullable=False,
type_=sa.Float(precision=2),
existing_type=sa.Numeric(asdecimal=False),
existing_server_default=False,
existing_nullable=False)
op.alter_column('irma_scanEvents',
'timestamp',
nullable=False,
type_=sa.Float(precision=2),
existing_type=sa.Numeric(asdecimal=False),
existing_server_default=False,
existing_nullable=False)
| 34.569231
| 62
| 0.543836
| 210
| 2,247
| 5.566667
| 0.261905
| 0.133447
| 0.066724
| 0.087254
| 0.788708
| 0.788708
| 0.788708
| 0.788708
| 0.788708
| 0.788708
| 0
| 0.040397
| 0.372052
| 2,247
| 64
| 63
| 35.109375
| 0.788094
| 0.063195
| 0
| 0.84
| 0
| 0
| 0.088698
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0
| 0.04
| 0
| 0.08
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
63a0d67f1a458d889b69a9cef79f825fb47cc08e
| 44
|
py
|
Python
|
coffin/contrib/auth/middleware.py
|
spothero/coffin
|
9ea6a9173cbfed592c5b4776c489dba8d9280d52
|
[
"BSD-3-Clause"
] | 1
|
2016-11-19T06:32:20.000Z
|
2016-11-19T06:32:20.000Z
|
coffin/contrib/auth/middleware.py
|
spothero/coffin
|
9ea6a9173cbfed592c5b4776c489dba8d9280d52
|
[
"BSD-3-Clause"
] | null | null | null |
coffin/contrib/auth/middleware.py
|
spothero/coffin
|
9ea6a9173cbfed592c5b4776c489dba8d9280d52
|
[
"BSD-3-Clause"
] | 1
|
2022-03-08T23:12:00.000Z
|
2022-03-08T23:12:00.000Z
|
from django.contrib.auth.middleware import *
| 44
| 44
| 0.840909
| 6
| 44
| 6.166667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.068182
| 44
| 1
| 44
| 44
| 0.902439
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
894b7547cbf5ac37620d6e1b421a073c16c53c44
| 20
|
py
|
Python
|
letstest.py
|
AMARTYA2020/Verify-Service-provider-and-country-code-of-SIM-using-python
|
d434f7795fefa4509476d1afbce2c6e63ff0c513
|
[
"MIT"
] | null | null | null |
letstest.py
|
AMARTYA2020/Verify-Service-provider-and-country-code-of-SIM-using-python
|
d434f7795fefa4509476d1afbce2c6e63ff0c513
|
[
"MIT"
] | null | null | null |
letstest.py
|
AMARTYA2020/Verify-Service-provider-and-country-code-of-SIM-using-python
|
d434f7795fefa4509476d1afbce2c6e63ff0c513
|
[
"MIT"
] | null | null | null |
no = "+917481866756"
| 20
| 20
| 0.7
| 2
| 20
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.666667
| 0.1
| 20
| 1
| 20
| 20
| 0.111111
| 0
| 0
| 0
| 0
| 0
| 0.619048
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
89bdcbe5abf1ac15ca682d966cc2aa392c592396
| 93
|
py
|
Python
|
week3/merge_sort.py
|
ravichalla/wallbreaker
|
0d587f12c60df5e4bca47f9183484a69d284d1f5
|
[
"MIT"
] | null | null | null |
week3/merge_sort.py
|
ravichalla/wallbreaker
|
0d587f12c60df5e4bca47f9183484a69d284d1f5
|
[
"MIT"
] | null | null | null |
week3/merge_sort.py
|
ravichalla/wallbreaker
|
0d587f12c60df5e4bca47f9183484a69d284d1f5
|
[
"MIT"
] | null | null | null |
def mergeSort(lst):
pass
print(mergeSort([2,3,4,5,15,19,26,27,36,38,44,46,47,48,50] ))
| 15.5
| 61
| 0.634409
| 21
| 93
| 2.809524
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.317073
| 0.11828
| 93
| 6
| 61
| 15.5
| 0.402439
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0.333333
| 0
| 0
| 0.333333
| 0.333333
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
982388985bc501fa6da69aaca11839047a391bf1
| 65
|
py
|
Python
|
NTMY-code/panel/logic/__init__.py
|
AndreaCossio/PoliTo-Projects
|
f89c8ce1e04d54e38a1309a01c7e3a9aa67d5a81
|
[
"MIT"
] | null | null | null |
NTMY-code/panel/logic/__init__.py
|
AndreaCossio/PoliTo-Projects
|
f89c8ce1e04d54e38a1309a01c7e3a9aa67d5a81
|
[
"MIT"
] | null | null | null |
NTMY-code/panel/logic/__init__.py
|
AndreaCossio/PoliTo-Projects
|
f89c8ce1e04d54e38a1309a01c7e3a9aa67d5a81
|
[
"MIT"
] | 1
|
2022-02-19T11:26:30.000Z
|
2022-02-19T11:26:30.000Z
|
from . import exceptions
from . import graph
from . import route
| 16.25
| 24
| 0.769231
| 9
| 65
| 5.555556
| 0.555556
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.184615
| 65
| 3
| 25
| 21.666667
| 0.943396
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
98326c45e7401affd51e2528c345c51f615b4e01
| 736
|
py
|
Python
|
tests/test_MCPlayerQ.py
|
maburto00/ndsgo
|
9cd27adcdf937cdf9863c158e039ad131d6b24eb
|
[
"MIT"
] | 1
|
2018-02-20T15:51:05.000Z
|
2018-02-20T15:51:05.000Z
|
tests/test_MCPlayerQ.py
|
maburto00/ndsgo
|
9cd27adcdf937cdf9863c158e039ad131d6b24eb
|
[
"MIT"
] | 2
|
2020-02-11T13:11:08.000Z
|
2020-02-12T16:59:11.000Z
|
tests/test_MCPlayerQ.py
|
maburto00/ndsgo
|
9cd27adcdf937cdf9863c158e039ad131d6b24eb
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
class TestMCPlayerQ(TestCase):
def test_copy(self):
pass
# def test_new_game(self):
# self.fail()
#
# def test_save_Q(self):
# self.fail()
#
# def test_load_Q(self):
# self.fail()
#
# def test_plot_Q_history(self):
# self.fail()
#
# def test__get_state(self):
# self.fail()
#
# def test_genmove(self):
# self.fail()
#
# def test_update_Q(self):
# self.fail()
#
# def test_automatch(self):
# self.fail()
#
# def test_self_play(self):
# self.fail()
| 21.647059
| 40
| 0.447011
| 74
| 736
| 4.189189
| 0.337838
| 0.225806
| 0.348387
| 0.387097
| 0.5
| 0.193548
| 0
| 0
| 0
| 0
| 0
| 0
| 0.436141
| 736
| 33
| 41
| 22.30303
| 0.746988
| 0.506793
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0.25
| 0.25
| 0
| 0.75
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
984f8ea6d5d4ca903e86d0ebd154005b9e5d0b28
| 114
|
py
|
Python
|
src/test/resources/cases/ModulesWithNoFuncs/dunder_and_testable.py
|
AlexTereshenkov/pybutler
|
3712e116d9b33fb8ad52b8f00fd41136f1266090
|
[
"MIT"
] | null | null | null |
src/test/resources/cases/ModulesWithNoFuncs/dunder_and_testable.py
|
AlexTereshenkov/pybutler
|
3712e116d9b33fb8ad52b8f00fd41136f1266090
|
[
"MIT"
] | null | null | null |
src/test/resources/cases/ModulesWithNoFuncs/dunder_and_testable.py
|
AlexTereshenkov/pybutler
|
3712e116d9b33fb8ad52b8f00fd41136f1266090
|
[
"MIT"
] | null | null | null |
def __somefunc__():
return 42
def not_dunder_func(arg1, arg2):
"""Docstring of function1"""
return 42
| 19
| 32
| 0.675439
| 15
| 114
| 4.733333
| 0.8
| 0.225352
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.077778
| 0.210526
| 114
| 6
| 33
| 19
| 0.711111
| 0.192982
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
985d0fad013a881e0b4d202362ee110159b7a3e1
| 6,292
|
py
|
Python
|
test/test_authorization.py
|
periodo/periodo-server
|
7cd0250cbc6260cdc6f66aa8d95b316d9eaaf9ac
|
[
"CC0-1.0"
] | 9
|
2015-05-07T07:40:16.000Z
|
2020-01-13T15:53:01.000Z
|
test/test_authorization.py
|
periodo/periodo-server
|
7cd0250cbc6260cdc6f66aa8d95b316d9eaaf9ac
|
[
"CC0-1.0"
] | 118
|
2015-01-27T21:14:49.000Z
|
2022-03-18T07:06:14.000Z
|
test/test_authorization.py
|
periodo/periodo-server
|
7cd0250cbc6260cdc6f66aa8d95b316d9eaaf9ac
|
[
"CC0-1.0"
] | 1
|
2015-11-09T10:31:16.000Z
|
2015-11-09T10:31:16.000Z
|
import httpx
import json
import pytest
from urllib.parse import urlparse
from periodo import app, database
def test_unauthorized_user(unauthorized_user):
with app.app_context():
row = database.query_db_for_one(
"SELECT permissions FROM user WHERE id = ?", (unauthorized_user.id,)
)
assert json.loads(row["permissions"]) == []
def test_admin_user(admin_user):
with app.app_context():
row = database.query_db_for_one(
"SELECT permissions FROM user WHERE id = ?", (admin_user.id,)
)
assert json.loads(row["permissions"]) == [
["action", "submit-patch"],
["action", "create-bag"],
["action", "accept-patch"],
["action", "create-graph"],
]
@pytest.mark.client_auth_token("this-token-has-no-permissions")
def test_unauthorized_user_submit_patch(unauthorized_user, client):
unauthorized_user
res = client.patch("/d/")
assert res.status_code == httpx.codes.FORBIDDEN
assert res.headers["WWW-Authenticate"] == (
'Bearer realm="PeriodO", error="insufficient_scope", '
+ "error_description="
+ '"The access token does not provide sufficient privileges", '
+ 'error_uri="http://tools.ietf.org/html/rfc6750#section-6.2.3"'
)
@pytest.mark.client_auth_token("this-token-has-normal-permissions")
def test_authorized_identity_submit_patch(active_user, client, load_json):
res = client.patch("/d/", json=load_json("test-patch-replace-values-1.json"))
assert res.status_code == httpx.codes.ACCEPTED
patch_id = int(res.headers["Location"].split("/")[-2])
with app.app_context():
creator = database.query_db_for_one(
"SELECT created_by FROM patch_request WHERE id = ?", (patch_id,)
)["created_by"]
assert creator == active_user.id
@pytest.mark.client_auth_token("this-token-has-normal-permissions")
def test_nonadmin_user_merge_patch(active_user, client, load_json):
active_user
# submit the patch
res = client.patch("/d/", json=load_json("test-patch-replace-values-1.json"))
# There should be NO link header
patch_url = urlparse(res.headers["Location"]).path
res = client.get(patch_url)
assert "Link" not in res.headers
# now try to merge the patch
res = client.post(patch_url + "merge")
assert res.status_code == httpx.codes.FORBIDDEN
assert res.headers["WWW-Authenticate"] == (
'Bearer realm="PeriodO", error="insufficient_scope", '
+ "error_description="
+ '"The access token does not provide sufficient privileges", '
+ 'error_uri="http://tools.ietf.org/html/rfc6750#section-6.2.3"'
)
@pytest.mark.client_auth_token("this-token-has-admin-permissions")
def test_admin_user_merge_patch(
admin_user,
active_user,
client,
load_json,
bearer_auth,
):
active_user
# submit the patch as normal user
res = client.patch(
"/d/",
auth=bearer_auth("this-token-has-normal-permissions"),
json=load_json("test-patch-replace-values-1.json"),
)
patch_id = int(res.headers["Location"].split("/")[-2])
# Admin should see a link header
patch_url = urlparse(res.headers["Location"]).path
res = client.get(patch_url)
assert res.headers.get("Link") == f'<{patch_url + "merge"}>;rel="merge"'
# now merge the patch
res = client.post(patch_url + "merge")
assert res.status_code, httpx.codes.NO_CONTENT
with app.app_context():
merger = database.query_db_for_one(
"SELECT merged_by FROM patch_request WHERE id = ?", (patch_id,)
)["merged_by"]
assert merger == admin_user.id
@pytest.mark.client_auth_token("this-token-has-admin-permissions")
def test_noncreator_identity_update_patch(
admin_user,
active_user,
client,
load_json,
bearer_auth,
):
admin_user, active_user
# submit the patch as normal user
res = client.patch(
"/d/",
auth=bearer_auth("this-token-has-normal-permissions"),
json=load_json("test-patch-replace-values-1.json"),
)
# now try to update the patch as a different user (admin)
patch_url = urlparse(res.headers["Location"]).path
res = client.put(
patch_url + "patch.jsonpatch",
json=load_json("test-patch-replace-values-2.json"),
)
assert res.status_code == httpx.codes.FORBIDDEN
assert res.headers["WWW-Authenticate"] == (
'Bearer realm="PeriodO", error="insufficient_scope", '
+ "error_description="
+ '"The access token does not provide sufficient privileges", '
+ 'error_uri="http://tools.ietf.org/html/rfc6750#section-6.2.3"'
)
@pytest.mark.client_auth_token("this-token-has-normal-permissions")
def test_creator_identity_update_patch(active_user, client, load_json):
active_user
# submit the patch
res = client.patch("/d/", json=load_json("test-patch-replace-values-1.json"))
# update the patch
patch_url = urlparse(res.headers["Location"]).path
res = client.put(
patch_url + "patch.jsonpatch",
json=load_json("test-patch-replace-values-2.json"),
)
assert res.status_code == httpx.codes.OK
@pytest.mark.client_auth_token("this-token-has-normal-permissions")
def test_creator_identity_update_merged_patch(
admin_user,
active_user,
client,
load_json,
bearer_auth,
):
admin_user, active_user
# submit the patch
res = client.patch("/d/", json=load_json("test-patch-replace-values-1.json"))
# merge the patch (as admin)
patch_url = urlparse(res.headers["Location"]).path
res = client.post(
patch_url + "merge", auth=bearer_auth("this-token-has-admin-permissions")
)
# now try to update the patch (as original creator)
res = client.put(
patch_url + "patch.jsonpatch",
json=load_json("test-patch-replace-values-2.json"),
)
assert res.status_code == httpx.codes.FORBIDDEN
assert res.headers["WWW-Authenticate"] == (
'Bearer realm="PeriodO", error="insufficient_scope", '
+ "error_description="
+ '"The access token does not provide sufficient privileges", '
+ 'error_uri="http://tools.ietf.org/html/rfc6750#section-6.2.3"'
)
| 33.115789
| 81
| 0.66227
| 819
| 6,292
| 4.90232
| 0.148962
| 0.033624
| 0.029888
| 0.035866
| 0.835866
| 0.820922
| 0.789539
| 0.752927
| 0.710834
| 0.710834
| 0
| 0.007766
| 0.201844
| 6,292
| 189
| 82
| 33.291005
| 0.791716
| 0.059282
| 0
| 0.631944
| 0
| 0.027778
| 0.322269
| 0.125318
| 0
| 0
| 0
| 0
| 0.118056
| 1
| 0.0625
| false
| 0
| 0.034722
| 0
| 0.097222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9885afefa49d0b7e238aeedbd7a21ad99aab13e3
| 32
|
py
|
Python
|
nsedata/__init__.py
|
Codelif/nsedata
|
c2e4b2da3bd810c13fe13245d0ba6666cabe2583
|
[
"MIT"
] | null | null | null |
nsedata/__init__.py
|
Codelif/nsedata
|
c2e4b2da3bd810c13fe13245d0ba6666cabe2583
|
[
"MIT"
] | 1
|
2021-05-23T15:14:08.000Z
|
2021-05-23T15:14:08.000Z
|
nsedata/__init__.py
|
Codelif/nsedata
|
c2e4b2da3bd810c13fe13245d0ba6666cabe2583
|
[
"MIT"
] | null | null | null |
from nsedata.nsedata import Nse
| 16
| 31
| 0.84375
| 5
| 32
| 5.4
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 32
| 1
| 32
| 32
| 0.964286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
989cdd2c85443bed490122ae67c510db2a173357
| 33
|
py
|
Python
|
utils/__init__.py
|
clcert/beacon-verifier
|
7523756d84c309a01b3606b0602e8d082a47d867
|
[
"MIT"
] | 4
|
2018-09-04T17:45:52.000Z
|
2020-10-09T22:18:37.000Z
|
utils/__init__.py
|
clcert/beacon-verifier
|
7523756d84c309a01b3606b0602e8d082a47d867
|
[
"MIT"
] | 7
|
2018-07-12T18:32:01.000Z
|
2019-04-24T19:50:10.000Z
|
utils/__init__.py
|
clcert/beacon-verifier
|
7523756d84c309a01b3606b0602e8d082a47d867
|
[
"MIT"
] | null | null | null |
from utils.crypto.sloth import *
| 16.5
| 32
| 0.787879
| 5
| 33
| 5.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 33
| 1
| 33
| 33
| 0.896552
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7f75e7aed7ded78b7d6dde1274708a4ae73bae84
| 13,575
|
py
|
Python
|
icem/environments/robotics.py
|
emiliojorge/iCEM
|
abf8e08e5993eaad2b61d7f56906808de964330a
|
[
"MIT"
] | 27
|
2020-11-17T17:59:43.000Z
|
2022-02-24T16:43:53.000Z
|
icem/environments/robotics.py
|
emiliojorge/iCEM
|
abf8e08e5993eaad2b61d7f56906808de964330a
|
[
"MIT"
] | 4
|
2021-02-04T04:40:43.000Z
|
2021-09-10T13:17:06.000Z
|
icem/environments/robotics.py
|
emiliojorge/iCEM
|
abf8e08e5993eaad2b61d7f56906808de964330a
|
[
"MIT"
] | 4
|
2021-03-17T16:12:14.000Z
|
2022-01-17T15:08:47.000Z
|
from gym.envs.robotics.fetch.pick_and_place import FetchPickAndPlaceEnv as FetchPickAndPlaceEnv_v1
from gym.envs.robotics.fetch.reach import FetchReachEnv
from gym.envs.robotics.robot_env import RobotEnv
from gym.utils import EzPickle
from .abstract_environments import *
class GymRoboticsGroundTruthSupportEnv(GroundTruthSupportEnv, RobotEnv, ABC):
""" adds generic state operations for all Mujoco-based envs """
window_exists = False
# noinspection PyPep8Naming
def set_GT_state(self, state):
self.sim.set_state_from_flattened(state.copy())
self.sim.forward()
# noinspection PyPep8Naming
def get_GT_state(self):
return self.sim.get_state().flatten()
# noinspection PyMethodMayBeStatic
def prepare_for_recording(self):
if not self.window_exists:
from mujoco_py import GlfwContext
GlfwContext(offscreen=True)
self.window_exists = True
class FetchPickAndPlace(MaskedGoalSpaceEnvironmentInterface, GymRoboticsGroundTruthSupportEnv, FetchPickAndPlaceEnv_v1):
def __init__(self, *, name, sparse, threshold, fixed_object_pos=None, fixed_goal=None,
shaped_reward=False, **kwargs):
self.fixed_object_pos = fixed_object_pos
self.fixed_goal = fixed_goal
self.shaped_reward = shaped_reward
FetchPickAndPlaceEnv_v1.__init__(self, **kwargs)
GymRoboticsGroundTruthSupportEnv.__init__(self, name=name, **kwargs)
self.store_init_arguments(locals())
EzPickle.__init__(self, name=name, sparse=sparse, threshold=threshold,
**kwargs) # needed to call make the pickling work with the args given
assert (isinstance(self.observation_space, spaces.Dict))
orig_obs_len = self.observation_space.spaces['observation'].shape[0]
goal_space_size = self.observation_space.spaces['desired_goal'].shape[0]
goal_idx = np.arange(orig_obs_len, orig_obs_len + goal_space_size)
achieved_goal_idx = [3, 4, 5]
self.observation_space = spaces.Box(-np.inf, np.inf, shape=(orig_obs_len + goal_space_size,), dtype='float32')
MaskedGoalSpaceEnvironmentInterface.__init__(self, name=name, goal_idx=goal_idx,
achieved_goal_idx=achieved_goal_idx, sparse=sparse,
threshold=threshold)
def _step_callback(self):
self.sim.forward() # we need to call forward because part of the model was overwritten and it is not consistent
def get_pos_vel_of_joints(self, names):
if self.sim.data.qpos is not None and self.sim.model.joint_names:
return (
np.array([self.sim.data.get_joint_qpos(name) for name in names]),
np.array([self.sim.data.get_joint_qvel(name) for name in names]),
)
def set_pos_vel_of_joints(self, names, q_pos, q_vel):
if self.sim.data.qpos is not None and self.sim.model.joint_names:
for n, p, v in zip(names, q_pos, q_vel):
self.sim.data.set_joint_qpos(n, p)
self.sim.data.set_joint_qvel(n, v)
@staticmethod
def flatten_observation(obs):
return np.concatenate((obs['observation'], obs['desired_goal']))
def step(self, action):
obs, reward, done, info = super().step(action)
return self.flatten_observation(obs), reward, done, info
def reset(self):
# return self.flatten_observation(super().reset())
# Attempt to reset the simulator. Since we randomize initial conditions, it
# is possible to get into a state with numerical issues (e.g. due to penetration or
# Gimbel lock) or we may not achieve an initial condition (e.g. an object is within the hand).
# In this case, we just keep randomizing until we eventually achieve a valid initial
# configuration.
did_reset_sim = False
while not did_reset_sim:
did_reset_sim = self._reset_sim()
self.goal = self._sample_goal().copy()
obs = self._get_obs()
return self.flatten_observation(obs)
def get_GT_state(self):
return np.concatenate((super().get_GT_state(), self.goal))
def set_GT_state(self, state):
mj_state = state[:-3]
self.goal = state[-3:]
super().set_GT_state(mj_state)
def set_state_from_observation(self, observation):
raise NotImplementedError("FetchPickAndPlace env needs the real GT states to be reset")
def _reset_sim(self):
self.sim.set_state(self.initial_state)
# Randomize start position of object.
if self.has_object:
if self.fixed_object_pos is not None:
object_xpos = self.initial_gripper_xpos[:2] + np.asarray(self.fixed_object_pos) * self.obj_range
else:
object_xpos = self.initial_gripper_xpos[:2]
while np.linalg.norm(object_xpos - self.initial_gripper_xpos[:2]) < 0.1:
object_xpos = self.initial_gripper_xpos[:2] + self.np_random.uniform(-self.obj_range,
self.obj_range, size=2)
object_qpos = self.sim.data.get_joint_qpos('object0:joint')
assert object_qpos.shape == (7,)
object_qpos[:2] = object_xpos
self.sim.data.set_joint_qpos('object0:joint', object_qpos)
self.sim.forward()
return True
def _sample_goal(self):
if self.has_object:
if self.fixed_goal is not None:
goal = self.initial_gripper_xpos[:3] + np.asarray(self.fixed_goal) * self.target_range
goal += self.target_offset
goal[2] = self.height_offset
if self.target_in_the_air:
goal[2] += self.fixed_goal[2] * 0.45
else:
goal = self.initial_gripper_xpos[:3] + self.np_random.uniform(-self.target_range, self.target_range,
size=3)
goal += self.target_offset
goal[2] = self.height_offset
if self.target_in_the_air and self.np_random.uniform() < 0.5:
goal[2] += self.np_random.uniform(0, 0.45)
else:
goal = self.initial_gripper_xpos[:3] + self.np_random.uniform(-0.15, 0.15, size=3)
return goal.copy()
def cost_fn(self, observation, action, next_obs):
dist_box_to_goal = np.linalg.norm(self.goal_from_observation(observation) -
self.achieved_goal_from_observation(observation), axis=-1)
dist_end_eff_to_box = 0
if self.shaped_reward:
dist_end_eff_to_box = np.linalg.norm(observation[:, :3] - observation[:, 3:6], axis=-1)
if self.sparse:
cost = np.asarray(dist_box_to_goal > self.threshold, dtype=np.float32) + \
np.asarray(dist_end_eff_to_box > self.threshold, dtype=np.float32) * 0.1
else:
cost = dist_box_to_goal + dist_end_eff_to_box * 0.1
return cost
def is_success(self, observation, action, next_obs):
dist = np.linalg.norm(self.goal_from_observation(next_obs) -
self.achieved_goal_from_observation(next_obs), axis=-1)
is_success = np.asarray(dist <= self.threshold, dtype=np.float32)
return is_success
class FetchReach(MaskedGoalSpaceEnvironmentInterface, GymRoboticsGroundTruthSupportEnv, FetchReachEnv):
def __init__(self, *, name, sparse, threshold, fixed_goal=None,
**kwargs):
self.fixed_goal = fixed_goal
FetchReachEnv.__init__(self, **kwargs)
GymRoboticsGroundTruthSupportEnv.__init__(self, name=name, **kwargs)
self.store_init_arguments(locals())
EzPickle.__init__(self, name=name, sparse=sparse, threshold=threshold,
**kwargs) # needed to call make the pickling work with the args given
assert (isinstance(self.observation_space, spaces.Dict))
orig_obs_len = self.observation_space.spaces['observation'].shape[0]
self.goal_space_size = self.observation_space.spaces['desired_goal'].shape[0]
goal_idx = np.arange(orig_obs_len, orig_obs_len + self.goal_space_size)
achieved_goal_idx = [0, 1, 2]
self.observation_space = spaces.Box(-np.inf, np.inf, shape=(orig_obs_len + self.goal_space_size,),
dtype='float32')
MaskedGoalSpaceEnvironmentInterface.__init__(self, name=name, goal_idx=goal_idx,
achieved_goal_idx=achieved_goal_idx, sparse=sparse,
threshold=threshold)
def _step_callback(self):
self.sim.forward() # we need to call forward because part of the model was overwritten and it is not consistent
def get_pos_vel_of_joints(self, names):
if self.sim.data.qpos is not None and self.sim.model.joint_names:
return (
np.array([self.sim.data.get_joint_qpos(name) for name in names]),
np.array([self.sim.data.get_joint_qvel(name) for name in names]),
)
def set_pos_vel_of_joints(self, names, q_pos, q_vel):
if self.sim.data.qpos is not None and self.sim.model.joint_names:
for n, p, v in zip(names, q_pos, q_vel):
self.sim.data.set_joint_qpos(n, p)
self.sim.data.set_joint_qvel(n, v)
@staticmethod
def flatten_observation(obs):
return np.concatenate((obs['observation'], obs['desired_goal']))
def step(self, action):
obs, reward, done, info = super().step(action)
return self.flatten_observation(obs), reward, done, info
def reset(self):
return self.flatten_observation(super().reset())
def get_GT_state(self):
return np.concatenate((super().get_GT_state(), self.goal))
def set_GT_state(self, state):
mj_state = state[:-3]
self.goal = state[-3:]
super().set_GT_state(mj_state)
def set_state_from_observation(self, observation):
raise NotImplementedError("FetchPickAndPlace env needs the real GT states to be reset")
def _reset_sim(self):
self.sim.set_state(self.initial_state)
# Randomize start position of object.
if self.has_object:
if self.fixed_object_pos is not None:
object_xpos = self.initial_gripper_xpos[:2] + np.asarray(self.fixed_object_pos) * self.obj_range
else:
object_xpos = self.initial_gripper_xpos[:2]
while np.linalg.norm(object_xpos - self.initial_gripper_xpos[:2]) < 0.1:
object_xpos = self.initial_gripper_xpos[:2] + self.np_random.uniform(-self.obj_range,
self.obj_range, size=2)
object_qpos = self.sim.data.get_joint_qpos('object0:joint')
assert object_qpos.shape == (7,)
object_qpos[:2] = object_xpos
self.sim.data.set_joint_qpos('object0:joint', object_qpos)
self.sim.forward()
return True
def _sample_goal(self):
if self.has_object:
if self.fixed_goal is not None:
goal = self.initial_gripper_xpos[:3] + np.asarray(self.fixed_goal) * self.target_range
goal += self.target_offset
goal[2] = self.height_offset
if self.target_in_the_air:
goal[2] += self.fixed_goal[2] * 0.45
else:
goal = self.initial_gripper_xpos[:3] + self.np_random.uniform(-self.target_range, self.target_range,
size=3)
goal += self.target_offset
goal[2] = self.height_offset
if self.target_in_the_air and self.np_random.uniform() < 0.5:
goal[2] += self.np_random.uniform(0, 0.45)
else:
if self.fixed_goal is not None:
goal = self.initial_gripper_xpos[:3] + np.asarray(self.fixed_goal)
else:
goal = self.initial_gripper_xpos[:3] + self.np_random.uniform(-0.15, 0.15, size=3)
return goal.copy()
def cost_fn(self, observation, action, next_obs):
dist_gripper_to_goal = np.linalg.norm(self.goal_from_observation(observation) -
self.achieved_goal_from_observation(observation), axis=-1)
if self.sparse:
cost = np.asarray(dist_gripper_to_goal > self.threshold, dtype=np.float32)
else:
cost = dist_gripper_to_goal
return cost
def is_success(self, observation, action, next_obs):
dist = np.linalg.norm(self.goal_from_observation(next_obs) -
self.achieved_goal_from_observation(next_obs), axis=-1)
is_success = np.asarray(dist <= self.threshold, dtype=np.float32)
return is_success
if __name__ == '__main__':
env = FetchPickAndPlace(name='blub', sparse=False, threshold=0.05, fixed_goal=[0.5, -0.3, 0.6],
fixed_object_pos=[0.85, 0.85])
while True:
env.reset()
for _ in range(50):
env.render()
env.step(env.action_space.sample())
| 42.958861
| 120
| 0.61768
| 1,717
| 13,575
| 4.624345
| 0.131043
| 0.025567
| 0.022166
| 0.041562
| 0.794836
| 0.772166
| 0.757053
| 0.73728
| 0.73728
| 0.729345
| 0
| 0.014037
| 0.286262
| 13,575
| 315
| 121
| 43.095238
| 0.805449
| 0.066888
| 0
| 0.732456
| 0
| 0
| 0.022616
| 0
| 0
| 0
| 0
| 0
| 0.017544
| 1
| 0.135965
| false
| 0
| 0.026316
| 0.026316
| 0.263158
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7f900805da2f659cef185d62b249994806954cca
| 42
|
py
|
Python
|
Talos_Test/Talos_test/envs/__init__.py
|
zaceJin/Talos
|
209439bb254a6884c9f195e247ec5783404a8f6b
|
[
"MIT"
] | null | null | null |
Talos_Test/Talos_test/envs/__init__.py
|
zaceJin/Talos
|
209439bb254a6884c9f195e247ec5783404a8f6b
|
[
"MIT"
] | null | null | null |
Talos_Test/Talos_test/envs/__init__.py
|
zaceJin/Talos
|
209439bb254a6884c9f195e247ec5783404a8f6b
|
[
"MIT"
] | null | null | null |
from Talos_test.envs.Simple_Env import *
| 14
| 40
| 0.809524
| 7
| 42
| 4.571429
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119048
| 42
| 2
| 41
| 21
| 0.864865
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f68fb5118d4c711bc52cec7ebbc22a01e3e5f561
| 220
|
py
|
Python
|
giscube_search/settings.py
|
aroiginfraplan/giscube-admin
|
b7f3131b0186f847f3902df97f982cb288b16a49
|
[
"BSD-3-Clause"
] | 5
|
2018-06-07T12:54:35.000Z
|
2022-01-14T10:38:38.000Z
|
giscube_search/settings.py
|
aroiginfraplan/giscube-admin
|
b7f3131b0186f847f3902df97f982cb288b16a49
|
[
"BSD-3-Clause"
] | 140
|
2018-06-18T10:27:28.000Z
|
2022-03-23T09:53:15.000Z
|
giscube_search/settings.py
|
aroiginfraplan/giscube-admin
|
b7f3131b0186f847f3902df97f982cb288b16a49
|
[
"BSD-3-Clause"
] | 1
|
2021-04-13T11:20:54.000Z
|
2021-04-13T11:20:54.000Z
|
from django.conf import settings
GISCUBE_SEARCH_DEFAULT_DICTIONARY = getattr(settings, 'GISCUBE_SEARCH_DEFAULT_DICTIONARY', 'english')
GISCUBE_SEARCH_MAX_RESULTS = getattr(settings, 'GISCUBE_SEARCH_MAX_RESULTS', None)
| 36.666667
| 101
| 0.85
| 27
| 220
| 6.481481
| 0.518519
| 0.297143
| 0.36
| 0.32
| 0.434286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.072727
| 220
| 5
| 102
| 44
| 0.857843
| 0
| 0
| 0
| 0
| 0
| 0.3
| 0.268182
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
f6eba0f6cca366fdd7f895e1ae24ced5de833d8e
| 194
|
py
|
Python
|
core/admin.py
|
jordij/menorkayak
|
b9b1a80230b111c2bd422de88215102a5f944fe6
|
[
"MIT"
] | 1
|
2017-04-25T10:17:52.000Z
|
2017-04-25T10:17:52.000Z
|
core/admin.py
|
jordij/menorkayak
|
b9b1a80230b111c2bd422de88215102a5f944fe6
|
[
"MIT"
] | null | null | null |
core/admin.py
|
jordij/menorkayak
|
b9b1a80230b111c2bd422de88215102a5f944fe6
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from core.models import Day, Picture, Place, Point
admin.site.register(Day)
admin.site.register(Picture)
admin.site.register(Place)
admin.site.register(Point)
| 21.555556
| 50
| 0.804124
| 29
| 194
| 5.37931
| 0.448276
| 0.230769
| 0.435897
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.087629
| 194
| 8
| 51
| 24.25
| 0.881356
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
f6fc125ad1488503f5a068a6401c1fc3e112f3da
| 29
|
py
|
Python
|
AC/hunter/__init__.py
|
imandr/KeRLas
|
8c347cbfea982f470372fb7cf8943f4d6bda8a8a
|
[
"BSD-3-Clause"
] | null | null | null |
AC/hunter/__init__.py
|
imandr/KeRLas
|
8c347cbfea982f470372fb7cf8943f4d6bda8a8a
|
[
"BSD-3-Clause"
] | null | null | null |
AC/hunter/__init__.py
|
imandr/KeRLas
|
8c347cbfea982f470372fb7cf8943f4d6bda8a8a
|
[
"BSD-3-Clause"
] | null | null | null |
from .hunter import HunterEnv
| 29
| 29
| 0.862069
| 4
| 29
| 6.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103448
| 29
| 1
| 29
| 29
| 0.961538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
63dee7ce8586326e8ee39294742cc5bc14cd9a9b
| 18,208
|
py
|
Python
|
figs/plot.py
|
owensgroup/BGHT
|
0bddb60fe15fe50a23e640da1e87ad3906982a40
|
[
"Apache-2.0"
] | 8
|
2021-11-15T23:46:37.000Z
|
2022-03-24T15:54:20.000Z
|
figs/plot.py
|
owensgroup/BGHT
|
0bddb60fe15fe50a23e640da1e87ad3906982a40
|
[
"Apache-2.0"
] | 2
|
2021-12-09T10:14:44.000Z
|
2022-01-26T09:12:52.000Z
|
figs/plot.py
|
owensgroup/BGHT
|
0bddb60fe15fe50a23e640da1e87ad3906982a40
|
[
"Apache-2.0"
] | 2
|
2021-12-01T06:39:54.000Z
|
2022-03-05T18:47:29.000Z
|
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import sys
import argparse
import scipy.stats
from matplotlib.offsetbox import AnchoredText
def remove_failed_experiments(df):
df = df.applymap(lambda x: float('nan') if x < 0 else x)
return df
def print_summary(label, insert_col, find100_col, find50_col, find0_col):
mean_insertion_rate = scipy.stats.hmean(insert_col.dropna())
mean_find100_rate = scipy.stats.hmean(find100_col.dropna())
mean_find50_rate = scipy.stats.hmean(find50_col.dropna())
mean_find0_rate = scipy.stats.hmean(find0_col.dropna())
print("{0: <16}".format(label) + ' | ',\
"{0: <15}".format(round(mean_insertion_rate,3)) + '|',\
"{0: <8}".format(round(mean_find100_rate,3)) + ' |',\
"{0: <8}".format(round(mean_find50_rate,3)) + ' |',\
"{0: <8}".format(round(mean_find0_rate,3)))
def plot_rates_fixed_lf(results_dir, output_dir, min_find, max_find, min_insert, max_insert, load_factor, probing = 'BCHT'):
df = pd.DataFrame()
svg_name=''
bucket_sizes = []
if probing == 'BCHT':
subdir = '/rates_fixed_lf/bcht_rates_lfeq'
fmt = '.csv'
df = pd.read_csv(results_dir + subdir + str(load_factor) + fmt)
svg_name='bcht_rates_lfeq' + str(load_factor)
bucket_sizes = [1, 8, 16, 32]
elif probing == 'P2BHT':
subdir = '/rates_fixed_lf/p2bht_rates_lfeq'
fmt = '.csv'
df = pd.read_csv(results_dir + subdir + str(load_factor) + fmt)
svg_name='p2bht_rates_lfeq' + str(load_factor)
bucket_sizes = [16, 32]
elif probing == 'IHT':
subdir = '/rates_fixed_lf/iht_rates_lfeq'
fmt = '.csv'
df = pd.read_csv(results_dir + subdir+ str(load_factor) + fmt)
svg_name='iht_rates_lfeq' + str(load_factor)
bucket_sizes = [16, 32]
else:
print("Uknown probing scheme")
sys.exit()
df = remove_failed_experiments(df)
df['num_keys'] = df['num_keys'].divide(1.0e6)
scale=5
subplots=[]
fig = plt.figure(figsize=(4*scale,1*scale))
ax = fig.add_subplot(111)
titles = ['100% Positive queries', '50% Positive queries', '0% Positive queries']
subplots.append(fig.add_subplot(141))
for i in range(2, 5):
subplots.append(fig.add_subplot(1, 4, i))
subplots[-1].title.set_text(titles[i-2])
ax.spines['top'].set_color('none')
ax.spines['bottom'].set_color('none')
ax.spines['left'].set_color('none')
ax.spines['right'].set_color('none')
ax.tick_params(labelcolor='w', top=False, bottom=False, left=False, right=False)
markers =['s', 'o', '^', 'D']
print(probing + ': Fixed load factor = 0.' + str(load_factor) + ' summary:')
print('Probing scheme | HMean insertion | HMean find 100')
print(' | | 100% | 50% | 0%')
if probing == 'BCHT' or probing == 'P2BHT':
for b, m in zip(bucket_sizes, markers):
insert_c = 'insert_' + str(b)
find_c = 'find_' + str(b) + '_'
l = probing + ', b=' + str(b)
subplots[0].plot(df['num_keys'], df[insert_c], marker = m, label=l)
subplots[1].plot(df['num_keys'], df[find_c + str(100)], marker = m)
subplots[2].plot(df['num_keys'], df[find_c + str(50)], marker = m)
subplots[3].plot(df['num_keys'], df[find_c + str(0)], marker = m)
print_summary(l, df[insert_c], df[find_c + str(100)], df[find_c + str(50)], df[find_c + str(0)])
elif probing == 'IHT':
thresholds = [20, 40, 60, 80]
for b in bucket_sizes:
for t, m in zip(thresholds, markers):
insert_c = 'insert_' + str(b) + '_' + str(t)
find_c = 'find_' + str(b) + '_' + str(t) + '_'
l = probing + ', b=' + str(b) + ', t=' + str(t) + '%'
subplots[0].plot(df['num_keys'], df[insert_c], marker = m, label=l)
subplots[1].plot(df['num_keys'], df[find_c + str(100)], marker = m)
subplots[2].plot(df['num_keys'], df[find_c + str(50)], marker = m)
subplots[3].plot(df['num_keys'], df[find_c + str(0)], marker = m)
print_summary(l, df[insert_c], df[find_c + str(100)], df[find_c + str(50)], df[find_c + str(0)])
print('--------------------------------------------------------')
ax.set_xlabel('Millions of keys')
subplots[0].set_ylabel('Insert Rate (MKey/s) ' + 'load factor = 0.' + str(load_factor))
subplots[1].set_ylabel('Find Rate (MKey/s) ' + 'load factor = 0.' + str(load_factor))
for p in subplots:
p.spines["right"].set_visible(False)
p.spines["top"].set_visible(False)
if min_insert != -1 and max_insert != 1:
subplots[0].set_ylim([min_insert, max_insert])
if min_find != -1 and max_find != 1:
subplots[1].set_ylim([min_find, max_find])
subplots[2].set_ylim([min_find, max_find])
subplots[3].set_ylim([min_find, max_find])
fig.tight_layout()
fig.legend(bbox_to_anchor = (1, 0.9), frameon=False)
fig.show()
fig.savefig(output_dir + '/' + svg_name + '.svg',bbox_inches='tight')
def plot_rates_fixed_keys(results_dir, output_dir, min_find, max_find, min_insert, max_insert, probing = 'BCHT'):
df = pd.DataFrame()
svg_name=''
bucket_sizes = []
if probing == 'BCHT':
df = pd.read_csv(results_dir + '/rates_fixed_keys/bcht_rates_fixed_keys.csv')
svg_name='bcht_rates_fixed_keys'
bucket_sizes = [1, 8, 16, 32]
elif probing == 'P2BHT':
df = pd.read_csv(results_dir + '/rates_fixed_keys/p2bht_rates_fixed_keys.csv')
svg_name='p2bht_rates_fixed_keys'
bucket_sizes = [16, 32]
elif probing == 'IHT':
df = pd.read_csv(results_dir + '/rates_fixed_keys/iht_rates_fixed_keys.csv')
svg_name='iht_rates_fixed_keys'
bucket_sizes = [16, 32]
else:
print("Uknown probing scheme")
sys.exit()
df = remove_failed_experiments(df)
scale=5
subplots=[]
fig = plt.figure(figsize=(4*scale,1*scale))
ax = fig.add_subplot(111)
subplots.append(fig.add_subplot(141))
titles = ['100% Positive queries', '50% Positive queries', '0% Positive queries']
for i in range(2, 5):
subplots.append(fig.add_subplot(1, 4, i))
subplots[-1].title.set_text(titles[i-2])
ax.spines['top'].set_color('none')
ax.spines['bottom'].set_color('none')
ax.spines['left'].set_color('none')
ax.spines['right'].set_color('none')
ax.tick_params(labelcolor='w', top=False, bottom=False, left=False, right=False)
markers =['s', 'o', '^', 'D']
min_x_axis = 0.6
max_x_axis = 1.0
print(probing + ': Fixed number of keys summary:')
print('Probing scheme | HMean insertion | HMean find 100')
print(' | | 100% | 50% | 0%')
if probing == 'BCHT' or probing == 'P2BHT':
for b, m in zip(bucket_sizes, markers):
insert_c = 'insert_' + str(b)
find_c = 'find_' + str(b) + '_'
l = probing + ', b=' + str(b)
subplots[0].plot(df['load_factor'], df[insert_c], marker = m, label=l)
subplots[1].plot(df['load_factor'], df[find_c + str(100)], marker = m)
subplots[2].plot(df['load_factor'], df[find_c + str(50)], marker = m)
subplots[3].plot(df['load_factor'], df[find_c + str(0)], marker = m)
print_summary(l, df[insert_c], df[find_c + str(100)], df[find_c + str(50)], df[find_c + str(0)])
elif probing == 'IHT':
thresholds = [20, 40, 60, 80]
for b in bucket_sizes:
for t, m in zip(thresholds, markers):
insert_c = 'insert_' + str(b) + '_' + str(t)
find_c = 'find_' + str(b) + '_' + str(t) + '_'
l = probing + ', b=' + str(b) + ', t=' + str(t) + '%'
subplots[0].plot(df['load_factor'], df[insert_c], marker = m, label=l)
subplots[1].plot(df['load_factor'], df[find_c + str(100)], marker = m)
subplots[2].plot(df['load_factor'], df[find_c + str(50)], marker = m)
subplots[3].plot(df['load_factor'], df[find_c + str(0)], marker = m)
print_summary(l, df[insert_c], df[find_c + str(100)], df[find_c + str(50)], df[find_c + str(0)])
print('--------------------------------------------------------')
ax.set_xlabel('Load factor')
subplots[0].set_ylabel('Insert Rate (MKey/s)')
subplots[1].set_ylabel('Find Rate (MKey/s)')
for p in subplots:
p.spines["right"].set_visible(False)
p.spines["top"].set_visible(False)
if min_insert != -1 and max_insert != 1:
subplots[0].set_ylim([min_insert, max_insert])
if min_find != -1 and max_find != 1:
subplots[1].set_ylim([min_find, max_find])
subplots[2].set_ylim([min_find, max_find])
subplots[3].set_ylim([min_find, max_find])
for ax in fig.get_axes():
ax.set_xlim([min_x_axis, max_x_axis])
fig.tight_layout()
fig.legend(bbox_to_anchor = (1, 0.9), frameon=False)
fig.show()
fig.savefig(output_dir + '/' + svg_name + '.svg',bbox_inches='tight')
def plot_avg_probes_fixed_keys(results_dir, output_dir, probing = 'BCHT'):
df = pd.DataFrame()
svg_name=''
bucket_sizes = []
if probing == 'BCHT':
df = pd.read_csv(results_dir + '/avg_probes/bcht_probes.csv')
svg_name='bcht_probes'
bucket_sizes = [1, 8, 16, 32]
elif probing == 'P2BHT':
df = pd.read_csv(results_dir + '/avg_probes/p2bht_probes.csv')
svg_name='p2ht_probes'
bucket_sizes = [16, 32]
elif probing == 'IHT':
df = pd.read_csv(results_dir + '/avg_probes/iht_probes.csv')
svg_name='iht_probes'
bucket_sizes = [16, 32]
else:
print("Uknown probing scheme")
sys.exit()
df = remove_failed_experiments(df)
scale=5
subplots=[]
fig = plt.figure(figsize=(4*scale,1*scale))
ax = fig.add_subplot(111)
titles = ['100% Positive queries', '50% Positive queries', '0% Positive queries']
subplots.append(fig.add_subplot(141))
for i in range(2, 5):
subplots.append(fig.add_subplot(1, 4, i))
subplots[-1].title.set_text(titles[i-2])
ax.spines['top'].set_color('none')
ax.spines['bottom'].set_color('none')
ax.spines['left'].set_color('none')
ax.spines['right'].set_color('none')
ax.tick_params(labelcolor='w', top=False, bottom=False, left=False, right=False)
markers =['s', 'o', '^', 'D']
min_x_axis = 0.6
max_x_axis = 1.0
min_y_axis = 1.0
max_y_axis = 4.0
print(probing + ': average probes summary:')
print('Probing scheme | HMean insertion | HMean find 100')
print(' | | 100% | 50% | 0%')
if probing == 'BCHT' or probing == 'P2BHT':
for b, m in zip(bucket_sizes, markers):
insert_c = 'insert_' + str(b)
find_c = 'find_' + str(b) + '_'
l = probing + ', b=' + str(b)
subplots[0].plot(df['load_factor'], df[insert_c], marker = m, label=l)
subplots[1].plot(df['load_factor'], df[find_c + str(100)], marker = m)
subplots[2].plot(df['load_factor'], df[find_c + str(50)], marker = m)
subplots[3].plot(df['load_factor'], df[find_c + str(0)], marker = m)
print_summary(l, df[insert_c], df[find_c + str(100)], df[find_c + str(50)], df[find_c + str(0)])
elif probing == 'IHT':
thresholds = [20, 40, 60, 80]
for b in bucket_sizes:
for t, m in zip(thresholds, markers):
insert_c = 'insert_' + str(b) + '_' + str(t)
find_c = 'find_' + str(b) + '_' + str(t) + '_'
l = probing + ', b=' + str(b) + ', t=' + str(t) + '%'
subplots[0].plot(df['load_factor'], df[insert_c], marker = m, label=l)
subplots[1].plot(df['load_factor'], df[find_c + str(100)], marker = m)
subplots[2].plot(df['load_factor'], df[find_c + str(50)], marker = m)
subplots[3].plot(df['load_factor'], df[find_c + str(0)], marker = m)
print_summary(l, df[insert_c], df[find_c + str(100)], df[find_c + str(50)], df[find_c + str(0)])
print('--------------------------------------------------------')
ax.set_xlabel('Load factor')
subplots[0].set_ylabel('Average number of probes per key')
for p in subplots:
p.spines["right"].set_visible(False)
p.spines["top"].set_visible(False)
for ax in fig.get_axes():
ax.set_xlim([min_x_axis, max_x_axis])
ax.set_ylim([min_y_axis, max_y_axis])
fig.tight_layout()
fig.legend(bbox_to_anchor = (1, 0.9), frameon=False)
fig.show()
fig.savefig(output_dir + '/' + svg_name + '.svg',bbox_inches='tight')
def plot_avg_probes_fixed_keys_best(dfs, xcol, output_dir, svg_name, x_title, y_title):
scale=5
subplots=[]
fig = plt.figure(figsize=(4*scale,1*scale))
ax = fig.add_subplot(111)
titles = ['100% Positive queries', '50% Positive queries', '0% Positive queries']
subplots.append(fig.add_subplot(141))
for i in range(2, 5):
subplots.append(fig.add_subplot(1, 4, i))
subplots[-1].title.set_text(titles[i-2])
ax.spines['top'].set_color('none')
ax.spines['bottom'].set_color('none')
ax.spines['left'].set_color('none')
ax.spines['right'].set_color('none')
ax.tick_params(labelcolor='w', top=False, bottom=False, left=False, right=False)
markers =['s', 'o', '^', 'D']
print('Best ' + svg_name + ' summary:')
print('Probing scheme | HMean insertion | HMean find 100')
print(' | | 100% | 50% | 0%')
best_prefix = ['CHT' , 'BCHT', 'PB2HT', 'IHT']
best_suffix = ['1','16', '32', '32_80']
for df, s, p, m in zip(dfs, best_suffix, best_prefix, markers):
insert_c = 'insert_' + s
find_c = 'find_' + s + '_'
l = s + p
subplots[0].plot(df[xcol], df[insert_c], marker = m, label=l)
subplots[1].plot(df[xcol], df[find_c + str(100)], marker = m)
subplots[2].plot(df[xcol], df[find_c + str(50)], marker = m)
subplots[3].plot(df[xcol], df[find_c + str(0)], marker = m)
print_summary(l, df[insert_c], df[find_c + str(100)], df[find_c + str(50)], df[find_c + str(0)])
print('--------------------------------------------------------')
ax.set_xlabel(x_title)
subplots[0].set_ylabel(y_title)
for p in subplots:
p.spines["right"].set_visible(False)
p.spines["top"].set_visible(False)
fig.tight_layout()
fig.legend(bbox_to_anchor = (1, 0.9), frameon=False)
fig.show()
fig.savefig(output_dir + '/' + svg_name + '.svg',bbox_inches='tight')
def plot_best(results_dir, output_dir):
svg_names=['rates_fixed_keys', 'rates_fixed_lf', 'avg_probes']
csv_names=['_rates_fixed_keys', '_rates_lfeq90','_probes']
cols =['load_factor', 'num_keys','load_factor']
titles_x = ['Load factor', 'Number of keys', 'Load Factor']
titles_y = ['Rate (MOperation/s)', 'Rate (MOperation/s)', 'Average number of probes per key']
for s, csv, col, tx, ty in zip(svg_names, csv_names, cols, titles_x, titles_y):
dfs = [pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), pd.DataFrame()]
dfs[0] = pd.read_csv(results_dir + s + '/' + 'bcht' + csv +'.csv')
dfs[1] = pd.read_csv(results_dir + s + '/' + 'bcht' + csv +'.csv')
dfs[2] = pd.read_csv(results_dir + '/' + s + '/' + 'p2bht' + csv +'.csv')
dfs[3] = pd.read_csv(results_dir + '/' + s + '/' + 'iht' + csv +'.csv')
dfs[0] = remove_failed_experiments(dfs[0])
dfs[1] = remove_failed_experiments(dfs[1])
dfs[2] = remove_failed_experiments(dfs[2])
dfs[3] = remove_failed_experiments(dfs[3])
plot_avg_probes_fixed_keys_best(dfs, col, output_dir, s, tx, ty)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--dir')
parser.add_argument('-od', '--output-dir', default='')
parser.add_argument('-mf','--min-find-throughput', default=-1,type=int)
parser.add_argument('-xf','--max-find-throughput', default=-1,type=int)
parser.add_argument('-mi','--min-insert-throughput', default=-1,type=int)
parser.add_argument('-xi','--max-insert-throughput', default=-1,type=int)
parser.add_argument('-p','--probing-scheme', default='all')
probing_schemes =['BCHT', 'P2BHT', 'IHT']
args = parser.parse_args()
print("Reading results from: ", args.dir)
load_factors = [80, 90]
if args.probing_scheme == 'all':
plot_best(args.dir, args.output_dir)
for p in probing_schemes:
# Plotting rates vs. load factor
plot_rates_fixed_keys(args.dir, args.output_dir, args.min_find_throughput, args.max_find_throughput,\
args.min_insert_throughput, args.max_insert_throughput, p)
# Plotting rates vs. number of keys
for lf in load_factors:
plot_rates_fixed_lf(args.dir, args.output_dir, args.min_find_throughput, args.max_find_throughput,\
args.min_insert_throughput, args.max_insert_throughput, lf, p)
# Plotting probes count vs. load factor
plot_avg_probes_fixed_keys(args.dir, args.output_dir, p)
else:
# Plotting rates vs. load factor
plot_rates_fixed_keys(args.dir, args.output_dir, args.min_find_throughput, args.max_find_throughput,\
args.min_insert_throughput, args.max_insert_throughput, args.probing_scheme)
# Plotting rates vs. number of keys
for lf in load_factors:
plot_rates_fixed_lf(args.dir, args.output_dir, args.min_find_throughput, args.max_find_throughput,\
args.min_insert_throughput, args.max_insert_throughput, lf, args.probing_scheme)
# Plotting probes count vs. load factor
plot_avg_probes_fixed_keys(args.dir, args.output_dir, args.probing_scheme)
| 42.44289
| 124
| 0.586171
| 2,601
| 18,208
| 3.878893
| 0.080738
| 0.024284
| 0.029141
| 0.04163
| 0.822579
| 0.810388
| 0.791159
| 0.767866
| 0.750223
| 0.711765
| 0
| 0.031468
| 0.237313
| 18,208
| 428
| 125
| 42.542056
| 0.695039
| 0.011259
| 0
| 0.692529
| 0
| 0
| 0.164564
| 0.03657
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020115
| false
| 0
| 0.020115
| 0
| 0.043103
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
63ed40bbe3b9d11c423a800c85e3139434d80b7b
| 11,156
|
py
|
Python
|
network/openpose/CMUnet_loss.py
|
H-Liu1997/Pytorch_Pose_Estimation_Framework
|
06616b3459ff639f8486e6ea4f93922597788b2a
|
[
"MIT"
] | 1
|
2019-09-04T11:52:26.000Z
|
2019-09-04T11:52:26.000Z
|
network/openpose/CMUnet_loss.py
|
HaiyangLiu1997/Pytorch_Pose_Estimation_Framework
|
06616b3459ff639f8486e6ea4f93922597788b2a
|
[
"MIT"
] | null | null | null |
network/openpose/CMUnet_loss.py
|
HaiyangLiu1997/Pytorch_Pose_Estimation_Framework
|
06616b3459ff639f8486e6ea4f93922597788b2a
|
[
"MIT"
] | null | null | null |
# ------------------------------------------------------------------------------
# define the CMUnet loss calculation
# Written by Haiyang Liu (haiyangliu1997@gmail.com)
# ------------------------------------------------------------------------------
import torch
import torch.nn as nn
def loss_cli(parser,name):
print('using',name,'loss success')
group = parser.add_argument_group('loss')
group.add_argument('--auto_weight', default=False, type=bool)
def get_offset_loss(saved_for_loss,target_heat,heat_mask,target_paf,paf_mask,target_offset,args,epoch):
loss = {}
loss['final'] = 0
batch_size = args.batch_size
criterion = My_loss().cuda()
criterion_offset = My_loss_offset().cuda()
heat_output = saved_for_loss[-1][:,:19,:,:].detach()
#print(heat_output.requires_grad,1)
heat_output_copy = torch.zeros(heat_output.shape,requires_grad=False)
#print(heat_output_copy.requires_grad,2)
heat_output_copy.copy_(heat_output)
#print(heat_output_copy.requires_grad,3)
heat_output_copy = heat_output_copy.cuda()
#print(heat_output_copy.requires_grad,4)
heat_output_copy_final = torch.zeros([heat_output.shape[0],heat_output.shape[1]*2,
heat_output.shape[2],heat_output.shape[3]],requires_grad=False)
#print(heat_output_copy_final.requires_grad,5)
for i in range(heat_output.shape[1]):
heat_output_copy_final[:,2*i,:,:] = heat_output_copy[:,i,:,:]
heat_output_copy_final[:,2*i+1,:,:] = heat_output_copy[:,i,:,:]
heat_output_copy_final = heat_output_copy_final.cuda()
#print(heat_output_copy_final.requires_grad,6)
#heat_output_copy_final.requires_grad = False
#heat_output_copy.requires_grad = False
#heat_output.requires_grad = False
#print(heat_output_copy_final.requires_grad,7)
#print(heat_output_copy.requires_grad,8)
#print(heat_output.requires_grad,9)
# for debug
# print(target_heat.size())
# print(heat_mask.size())
# print(target_paf.size())
# print(paf_mask.size())
# print(saved_for_loss[0].size())
# print(saved_for_loss[1].size())
for i in range(args.paf_stage):
loss['stage_{}'.format(i)] = criterion(saved_for_loss[i] * paf_mask,target_paf * paf_mask,batch_size)
loss['final'] += loss['stage_{}'.format(i)]
for i in range(args.paf_stage,6):
loss_a = criterion(saved_for_loss[i][:,:19,:,:] * heat_mask,target_heat * heat_mask,batch_size)
loss_b = criterion_offset(saved_for_loss[i][:,19:,:,:], heat_output_copy_final,target_offset,batch_size)
if epoch > 4:
loss['stage_{}'.format(i)] = loss_a + loss_b
else:
loss['stage_{}'.format(i)] = loss_a
loss['final'] += loss['stage_{}'.format(i)]
'''for i in range(6,7):
loss['stage_{}'.format(i)] = criterion_offset(saved_for_loss[-1], heat_output_copy_final,target_offset,batch_size)
if epoch > 4:
loss['final'] += loss['stage_{}'.format(i)]'''
return loss
def get_loss(saved_for_loss,target_heat,target_paf,args,wei_con):
loss = {}
length = len(saved_for_loss)
loss['final'] = 0
weights = torch.ones([6,args.paf_num+args.heatmap_num])
#print("weights size",weights.size())
#print("weigcon size",wei_con.size())
if args.auto_weight == True:
for i in range(args.paf_num+args.heatmap_num):
weights[0][i] = wei_con[0][i]
weights = weights.cuda()
criterion = nn.MSELoss(size_average=True).cuda()
if args.auto_weight == True:
for i in range(args.paf_stage):
loss['stage_{}'.format(i)] = 0
for j in range(args.paf_num):
#print(saved_for_loss[i].size(),target_paf.size())
#print(weights.size())
#loss['stage_{0}_{1}'.format(i,j)] = criterion(saved_for_loss[i][:,j,:,:],target_paf[:,j,:,:])
loss['stage_{0}_{1}'.format(i,j)] = criterion(saved_for_loss[i][:,j,:,:],target_paf[:,j,:,:]) * weights[0][j]
loss['stage_{}'.format(i)] += loss['stage_{0}_{1}'.format(i,j)]
loss['stage_{}'.format(i)] /= 38
loss['final'] += loss['stage_{}'.format(i)]
for i in range(args.paf_stage,length):
loss['stage_{}'.format(i)] = 0
for j in range(args.heatmap_num):
loss['stage_{0}_{1}'.format(i,j)] = criterion(saved_for_loss[i][:,j,:,:],target_heat[:,j,:,:]) * weights[0][j+args.paf_num]
loss['stage_{}'.format(i)] += loss['stage_{0}_{1}'.format(i,j)]
loss['stage_{}'.format(i)] /= 19
loss['final'] += loss['stage_{}'.format(i)]
else:
for i in range(args.paf_stage):
loss['stage_{}'.format(i)] = 0
for j in range(args.paf_num):
#print(saved_for_loss[i].size(),target_paf.size())
#print(weights.size())
loss['stage_{0}_{1}'.format(i,j)] = criterion(saved_for_loss[i][:,j,:,:],target_paf[:,j,:,:])
loss['stage_{}'.format(i)] += loss['stage_{0}_{1}'.format(i,j)]
loss['stage_{}'.format(i)] /= 38
loss['final'] += loss['stage_{}'.format(i)]
for i in range(args.paf_stage,length):
loss['stage_{}'.format(i)] = 0
for j in range(args.heatmap_num):
loss['stage_{0}_{1}'.format(i,j)] = criterion(saved_for_loss[i][:,j,:,:],target_heat[:,j,:,:])
loss['stage_{}'.format(i)] += loss['stage_{0}_{1}'.format(i,j)]
loss['stage_{}'.format(i)] /= 19
loss['final'] += loss['stage_{}'.format(i)]
return loss
class My_loss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y, batch_size):
return torch.sum(torch.pow((x - y), 2))/batch_size/2
class My_loss_focus(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y, batch_size):
return torch.sum(torch.pow((x - y), 4))/batch_size
class My_loss_focus2(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y, batch_size):
return torch.sum(torch.log1p(torch.abs((x - y))))/batch_size/4
class My_loss2(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y, batch_size,mask):
return torch.sum(torch.pow((x - y), 2) * mask)/batch_size/2
class My_loss_offset(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, mask, y, batch_size):
return torch.sum(torch.abs(torch.pow((x - y), 2) * mask))/batch_size/2
def get_old_loss(saved_for_loss,target_heat,target_paf,args,wei_con):
loss = {}
loss['final'] = 0
batch_size = args.batch_size
criterion = nn.MSELoss(size_average=True)
for i in range(6):
loss['stage_1_{}'.format(i)] = criterion(saved_for_loss[2*i],target_paf,batch_size)
loss['stage_2_{}'.format(i)] = criterion(saved_for_loss[2*i+1],target_heat,batch_size)
loss['final'] += loss['stage_1_{}'.format(i)]
loss['final'] += loss['stage_2_{}'.format(i)]
return loss
def get_mask_loss(saved_for_loss,target_heat,heat_mask,target_paf,paf_mask,args,wei_con):
loss = {}
loss['final'] = 0
batch_size = args.batch_size
criterion = My_loss().cuda()
# for debug
# print(target_heat.size())
# print(heat_mask.size())
# print(target_paf.size())
# print(paf_mask.size())
# print(saved_for_loss[0].size())
# print(saved_for_loss[1].size())
for i in range(6):
loss['stage_1_{}'.format(i)] = criterion(saved_for_loss[2*i] * paf_mask,target_paf * paf_mask,batch_size)
loss['stage_2_{}'.format(i)] = criterion(saved_for_loss[2*i+1] * heat_mask,target_heat * heat_mask,batch_size)
loss['final'] += loss['stage_1_{}'.format(i)]
loss['final'] += loss['stage_2_{}'.format(i)]
return loss
def get_old_loss(saved_for_loss,target_heat,target_paf,args,wei_con):
loss = {}
loss['final'] = 0
batch_size = args.batch_size
criterion = nn.MSELoss(size_average=True)
for i in range(6):
loss['stage_1_{}'.format(i)] = criterion(saved_for_loss[2*i],target_paf,batch_size)
loss['stage_2_{}'.format(i)] = criterion(saved_for_loss[2*i+1],target_heat,batch_size)
loss['final'] += loss['stage_1_{}'.format(i)]
loss['final'] += loss['stage_2_{}'.format(i)]
return loss
def get_mask_loss_self(saved_for_loss,target_heat,heat_mask,target_paf,paf_mask_self,args,wei_con):
loss = {}
loss['final'] = 0
batch_size = args.batch_size
criterion = My_loss().cuda()
criterion2 = My_loss2().cuda()
factors = 1
# for debug
# print(target_heat.size())
# print(heat_mask.size())
# print(target_paf.size())
# print(paf_mask.size())
# print(saved_for_loss[0].size())
# print(saved_for_loss[1].size())
for i in range(6):
loss['stage_1_{}'.format(i)] = factors * criterion2(saved_for_loss[2*i] * paf_mask_self[:19,:,:],target_paf * paf_mask_self[:19,:,:],batch_size,paf_mask_self[19:,:,:])
loss['stage_2_{}'.format(i)] = criterion(saved_for_loss[2*i+1] * heat_mask,target_heat * heat_mask,batch_size)
loss['final'] += loss['stage_1_{}'.format(i)]
loss['final'] += loss['stage_2_{}'.format(i)]
return loss
def get_new_mask_loss(saved_for_loss,target_heat,heat_mask,target_paf,paf_mask,args,wei_con):
loss = {}
loss['final'] = 0
batch_size = args.batch_size
criterion = My_loss().cuda()
# for debug
# print(target_heat.size())
# print(heat_mask.size())
# print(target_paf.size())
# print(paf_mask.size())
# print(saved_for_loss[0].size())
# print(saved_for_loss[1].size())
for i in range(args.paf_stage):
loss['stage_{}'.format(i)] = criterion(saved_for_loss[i] * paf_mask,target_paf * paf_mask,batch_size)
loss['final'] += loss['stage_{}'.format(i)]
for i in range(args.paf_stage,6):
loss['stage_{}'.format(i)] = criterion(saved_for_loss[i] * heat_mask,target_heat * heat_mask,batch_size)
loss['final'] += loss['stage_{}'.format(i)]
return loss
def get_new_focus_loss(saved_for_loss,target_heat,heat_mask,target_paf,paf_mask,args,wei_con):
loss = {}
loss['final'] = 0
batch_size = args.batch_size
criterion = My_loss_focus2().cuda()
# for debug
# print(target_heat.size())
# print(heat_mask.size())
# print(target_paf.size())
# print(paf_mask.size())
# print(saved_for_loss[0].size())
# print(saved_for_loss[1].size())
for i in range(args.paf_stage):
loss['stage_{}'.format(i)] = criterion(saved_for_loss[i] * paf_mask,target_paf * paf_mask,batch_size)
loss['final'] += loss['stage_{}'.format(i)]
for i in range(args.paf_stage,6):
loss['stage_{}'.format(i)] = criterion(saved_for_loss[i] * heat_mask,target_heat * heat_mask,batch_size)
loss['final'] += loss['stage_{}'.format(i)]
return loss
| 39.28169
| 175
| 0.610344
| 1,603
| 11,156
| 3.93325
| 0.068621
| 0.079937
| 0.08184
| 0.078668
| 0.860904
| 0.820936
| 0.774148
| 0.747185
| 0.732435
| 0.718319
| 0
| 0.0162
| 0.203209
| 11,156
| 284
| 176
| 39.28169
| 0.693104
| 0.168878
| 0
| 0.676471
| 0
| 0
| 0.073439
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111765
| false
| 0
| 0.011765
| 0.029412
| 0.229412
| 0.005882
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
63f66fd08433472237cfdd758577ddd5133e675f
| 4,475
|
py
|
Python
|
tests/test_history.py
|
jcaw/traad
|
770924d9df89037c9a21f1946096aec35685f73d
|
[
"MIT"
] | 74
|
2015-01-10T20:02:41.000Z
|
2021-09-29T15:05:42.000Z
|
tests/test_history.py
|
jcaw/traad
|
770924d9df89037c9a21f1946096aec35685f73d
|
[
"MIT"
] | 37
|
2015-01-06T08:56:43.000Z
|
2022-02-18T06:51:32.000Z
|
tests/test_history.py
|
jcaw/traad
|
770924d9df89037c9a21f1946096aec35685f73d
|
[
"MIT"
] | 16
|
2015-08-02T13:14:58.000Z
|
2022-02-17T00:14:08.000Z
|
import common
import paths
import pytest
@pytest.fixture
def workspace(activate_package, make_workspace):
activate_package(package='basic', into='main')
workspace = make_workspace('main')
yield workspace
def test_undo_undoes_changes(workspace):
workspace.perform(
workspace.rename(
'basic/foo.py',
8,
'Llama'))
with pytest.raises(ValueError):
common.compare_workspaces(
paths.packages('basic'),
paths.active('main', 'basic'))
workspace.undo()
common.compare_workspaces(
paths.packages('basic'),
paths.active('main', 'basic'))
def test_undo_exceptions(workspace):
with pytest.raises(IndexError):
workspace.undo()
workspace.perform(
workspace.rename(
'basic/foo.py',
8,
'Llama'))
workspace.undo()
with pytest.raises(IndexError):
workspace.undo(1)
def test_undo_adds_history(workspace):
assert len(workspace.root_project.history.undo_list) == 0
workspace.perform(
workspace.rename(
'basic/foo.py',
8,
'Llama'))
assert len(workspace.root_project.history.undo_list) == 1
def test_redo_redoes_changes(workspace):
workspace.perform(
workspace.rename(
'basic/foo.py',
8,
'Llama'))
with pytest.raises(ValueError):
common.compare_workspaces(
paths.packages('basic'),
paths.active('main', 'basic'))
workspace.undo()
common.compare_workspaces(
paths.packages('basic'),
paths.active('main', 'basic'))
workspace.redo()
with pytest.raises(ValueError):
common.compare_workspaces(
paths.packages('basic'),
paths.active('main', 'basic'))
def test_redo_adds_history(workspace):
workspace.perform(
workspace.rename(
'basic/foo.py',
8,
'Llama'))
assert len(workspace.root_project.history.redo_list) == 0
assert len(workspace.root_project.history.undo_list) == 1
workspace.undo()
assert len(workspace.root_project.history.redo_list) == 1
assert len(workspace.root_project.history.undo_list) == 0
workspace.redo()
assert len(workspace.root_project.history.redo_list) == 0
assert len(workspace.root_project.history.undo_list) == 1
def test_redo_exceptions(workspace):
with pytest.raises(IndexError):
workspace.redo()
workspace.perform(
workspace.rename(
'basic/foo.py',
8,
'Llama'))
workspace.undo()
workspace.redo()
with pytest.raises(IndexError):
workspace.redo(1)
def test_undo_history(workspace):
assert len(workspace.undo_history()) == 0
workspace.perform(
workspace.rename('basic/foo.py',
8,
'Llama'))
assert len(workspace.undo_history()) == 1
def test_undo_info(workspace):
workspace.perform(
workspace.rename('basic/foo.py',
8,
'Llama'))
i = workspace.undo_info(0)
for k in ['description', 'time', 'full_change', 'changes']:
assert k in i
def test_undo_info_exceptions(workspace):
with pytest.raises(IndexError):
workspace.undo_info(0)
workspace.perform(
workspace.rename('basic/foo.py',
8,
'Llama'))
workspace.undo_info(0)
with pytest.raises(IndexError):
workspace.undo_info(1)
def test_redo_history(workspace):
assert len(workspace.redo_history()) == 0
workspace.perform(
workspace.rename('basic/foo.py',
8,
'Llama'))
workspace.undo()
assert len(workspace.redo_history()) == 1
def test_redo_info(workspace):
workspace.perform(
workspace.rename('basic/foo.py',
8,
'Llama'))
workspace.undo()
i = workspace.redo_info(0)
for k in ['description', 'time', 'full_change', 'changes']:
assert k in i
def test_redo_info_exceptions(workspace):
with pytest.raises(IndexError):
workspace.redo_info(0)
workspace.perform(
workspace.rename('basic/foo.py',
8,
'Llama'))
workspace.undo()
workspace.redo_info(0)
| 22.60101
| 63
| 0.585251
| 472
| 4,475
| 5.404661
| 0.112288
| 0.081537
| 0.117601
| 0.145825
| 0.874559
| 0.816151
| 0.785574
| 0.768718
| 0.657389
| 0.657389
| 0
| 0.010483
| 0.296536
| 4,475
| 197
| 64
| 22.715736
| 0.799873
| 0
| 0
| 0.781022
| 0
| 0
| 0.078883
| 0
| 0
| 0
| 0
| 0
| 0.10219
| 1
| 0.094891
| false
| 0
| 0.021898
| 0
| 0.116788
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
12269e563d225dd844e98da3c2a56c49550c9831
| 51
|
py
|
Python
|
minerva/engines/web_server.py
|
vicotrbb/minerva
|
628e6c4fda115d2f26d0d3789ae8483053c3960d
|
[
"MIT"
] | null | null | null |
minerva/engines/web_server.py
|
vicotrbb/minerva
|
628e6c4fda115d2f26d0d3789ae8483053c3960d
|
[
"MIT"
] | null | null | null |
minerva/engines/web_server.py
|
vicotrbb/minerva
|
628e6c4fda115d2f26d0d3789ae8483053c3960d
|
[
"MIT"
] | null | null | null |
class WebServer:
def __init__():
pass
| 10.2
| 19
| 0.568627
| 5
| 51
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.352941
| 51
| 4
| 20
| 12.75
| 0.757576
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0.333333
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
125596d4220d7ab7e24376aff3426f5993f95afb
| 42
|
py
|
Python
|
azure-quantum/azure/quantum/aio/job/__init__.py
|
Anatoliy-Litvinenko/qdk-python
|
74b2638a404717424090023ef49afb3045ea920e
|
[
"MIT"
] | 53
|
2021-01-21T23:38:09.000Z
|
2022-03-29T16:34:42.000Z
|
azure-quantum/azure/quantum/aio/job/__init__.py
|
Anatoliy-Litvinenko/qdk-python
|
74b2638a404717424090023ef49afb3045ea920e
|
[
"MIT"
] | 152
|
2021-01-23T07:01:49.000Z
|
2022-03-31T19:43:21.000Z
|
azure-quantum/azure/quantum/aio/job/__init__.py
|
slowy07/qdk-python
|
e4ce0c433cc986bc1c746e9a58f3f05733c657e2
|
[
"MIT"
] | 47
|
2021-01-30T20:15:46.000Z
|
2022-03-25T23:35:28.000Z
|
from azure.quantum.aio.job.job import Job
| 21
| 41
| 0.809524
| 8
| 42
| 4.25
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 42
| 1
| 42
| 42
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c3eb520f184d01e8efa3848267051abb5ee9217e
| 43
|
py
|
Python
|
__main__.py
|
jupiterbjy/OpenPortScanner
|
902a076e0c8538615af9050e2551392a75e89a50
|
[
"MIT"
] | 1
|
2021-02-12T16:11:26.000Z
|
2021-02-12T16:11:26.000Z
|
__main__.py
|
jupiterbjy/OpenPortScanner
|
902a076e0c8538615af9050e2551392a75e89a50
|
[
"MIT"
] | null | null | null |
__main__.py
|
jupiterbjy/OpenPortScanner
|
902a076e0c8538615af9050e2551392a75e89a50
|
[
"MIT"
] | null | null | null |
# Will default to Trio
import SharedData
| 8.6
| 22
| 0.767442
| 6
| 43
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.209302
| 43
| 4
| 23
| 10.75
| 0.970588
| 0.465116
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c3fed29f9b332a9552eb10967c62615b07b25172
| 73
|
py
|
Python
|
src/stk/calculators/optimization/__init__.py
|
fiszczyp/stk
|
56e75c493a472d98ccbf3af14cc9ce7f12cbe3d7
|
[
"MIT"
] | null | null | null |
src/stk/calculators/optimization/__init__.py
|
fiszczyp/stk
|
56e75c493a472d98ccbf3af14cc9ce7f12cbe3d7
|
[
"MIT"
] | null | null | null |
src/stk/calculators/optimization/__init__.py
|
fiszczyp/stk
|
56e75c493a472d98ccbf3af14cc9ce7f12cbe3d7
|
[
"MIT"
] | null | null | null |
from .optimizers import *
from .macromodel import *
from .mopac import *
| 18.25
| 25
| 0.753425
| 9
| 73
| 6.111111
| 0.555556
| 0.363636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.164384
| 73
| 3
| 26
| 24.333333
| 0.901639
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6139dfdcadb2dea7e819c8edc10dd3ffc01e4fed
| 91
|
py
|
Python
|
automd/__init__.py
|
cliftbar/auto-api
|
bcba991da69551fe964fb5b52ab034fc0c1785c3
|
[
"MIT"
] | null | null | null |
automd/__init__.py
|
cliftbar/auto-api
|
bcba991da69551fe964fb5b52ab034fc0c1785c3
|
[
"MIT"
] | 25
|
2020-05-03T01:45:55.000Z
|
2020-12-17T07:14:56.000Z
|
automd/__init__.py
|
cliftbar/auto-api
|
bcba991da69551fe964fb5b52ab034fc0c1785c3
|
[
"MIT"
] | null | null | null |
from automd.decorators import override_webargs_flaskparser
override_webargs_flaskparser()
| 22.75
| 58
| 0.901099
| 10
| 91
| 7.8
| 0.7
| 0.384615
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.065934
| 91
| 3
| 59
| 30.333333
| 0.917647
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
618e1a10038c5ea1fb764f8f744af4c8f26186fb
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/crashtest/solution_providers/solution_provider_repository.py
|
Retraces/UkraineBot
|
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/crashtest/solution_providers/solution_provider_repository.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/crashtest/solution_providers/solution_provider_repository.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/bd/4a/27/2fca0aac2f3217d73f128af063cd6541040a12b4079302c3e945c3c277
| 96
| 96
| 0.895833
| 9
| 96
| 9.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.4375
| 0
| 96
| 1
| 96
| 96
| 0.458333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
61a95917e5673acf62f3a933766a3ab403e8995e
| 321
|
py
|
Python
|
build/lib/tnetwork/DCD/analytics/__init__.py
|
Yquetzal/tnetwork
|
43fb2f19aeed57a8a9d9af032ee80f1c9f58516d
|
[
"BSD-2-Clause"
] | 4
|
2019-02-19T07:49:06.000Z
|
2020-09-01T16:17:54.000Z
|
tnetwork/DCD/analytics/__init__.py
|
Yquetzal/tnetwork
|
43fb2f19aeed57a8a9d9af032ee80f1c9f58516d
|
[
"BSD-2-Clause"
] | 1
|
2019-07-13T16:16:28.000Z
|
2019-07-15T09:34:33.000Z
|
build/lib/tnetwork/DCD/analytics/__init__.py
|
Yquetzal/tnetwork
|
43fb2f19aeed57a8a9d9af032ee80f1c9f58516d
|
[
"BSD-2-Clause"
] | 3
|
2019-07-13T16:09:20.000Z
|
2022-02-08T02:23:46.000Z
|
#from tnetwork.DCD.analytics.dynamic_community import *
from tnetwork.DCD.analytics.dynamic_partition import *
#analytics_all = ["longitudinal_similarity", "consecutive_sn_similarity", "similarity_at_each_step", "quality_at_each_step", "nb_node_change", "entropy_by_node","SM_N","SM_L","SM_P"]
#__all__ = analytics_all
| 45.857143
| 182
| 0.806854
| 44
| 321
| 5.340909
| 0.590909
| 0.102128
| 0.12766
| 0.204255
| 0.26383
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.065421
| 321
| 7
| 183
| 45.857143
| 0.783333
| 0.803738
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
61acc419e8870356b0407a3ac85c6ad5c9782879
| 189
|
py
|
Python
|
bip_utils/bip/conf/bip49/__init__.py
|
MIPPLTeam/bip_utils
|
c66446e7ac3879d2cf6308c5b8eb7f7705292660
|
[
"MIT"
] | 149
|
2020-05-15T08:11:43.000Z
|
2022-03-29T16:34:42.000Z
|
bip_utils/bip/conf/bip49/__init__.py
|
MIPPLTeam/bip_utils
|
c66446e7ac3879d2cf6308c5b8eb7f7705292660
|
[
"MIT"
] | 41
|
2020-04-03T15:57:56.000Z
|
2022-03-31T08:25:11.000Z
|
bip_utils/bip/conf/bip49/__init__.py
|
MIPPLTeam/bip_utils
|
c66446e7ac3879d2cf6308c5b8eb7f7705292660
|
[
"MIT"
] | 55
|
2020-04-03T17:05:15.000Z
|
2022-03-24T12:43:42.000Z
|
from bip_utils.bip.conf.bip49.bip49_coins import Bip49Coins
from bip_utils.bip.conf.bip49.bip49_conf import Bip49Conf
from bip_utils.bip.conf.bip49.bip49_conf_getter import Bip49ConfGetter
| 47.25
| 70
| 0.873016
| 31
| 189
| 5.096774
| 0.354839
| 0.132911
| 0.227848
| 0.28481
| 0.601266
| 0.601266
| 0.601266
| 0.417722
| 0
| 0
| 0
| 0.101695
| 0.063492
| 189
| 3
| 71
| 63
| 0.79096
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
61c2f3bdb110860655d587c684cf7f02848a2e8d
| 82
|
py
|
Python
|
jetway/memberships/messages.py
|
grow/jetway
|
e32a6f447922c364d876e694f0303ae75523c7ed
|
[
"MIT"
] | null | null | null |
jetway/memberships/messages.py
|
grow/jetway
|
e32a6f447922c364d876e694f0303ae75523c7ed
|
[
"MIT"
] | 6
|
2015-04-10T00:52:05.000Z
|
2015-04-10T03:11:22.000Z
|
jetway/memberships/messages.py
|
grow/jetway
|
e32a6f447922c364d876e694f0303ae75523c7ed
|
[
"MIT"
] | null | null | null |
from protorpc import messages
class MembershipMessage(messages.Message):
pass
| 13.666667
| 42
| 0.817073
| 9
| 82
| 7.444444
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.134146
| 82
| 5
| 43
| 16.4
| 0.943662
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
f622e3f3f454b1ff32310ac3d3927a7253377603
| 130
|
py
|
Python
|
kedro_mlflow/io/models/__init__.py
|
felipeeeantunes/kedro-mlflow
|
6d7023d7b859e4645053db39b2296a7d1ab67073
|
[
"Apache-2.0"
] | 97
|
2020-04-18T14:24:57.000Z
|
2022-03-19T17:42:43.000Z
|
kedro_mlflow/io/models/__init__.py
|
felipeeeantunes/kedro-mlflow
|
6d7023d7b859e4645053db39b2296a7d1ab67073
|
[
"Apache-2.0"
] | 235
|
2020-04-25T08:15:42.000Z
|
2022-03-31T22:07:36.000Z
|
kedro_mlflow/io/models/__init__.py
|
felipeeeantunes/kedro-mlflow
|
6d7023d7b859e4645053db39b2296a7d1ab67073
|
[
"Apache-2.0"
] | 14
|
2020-04-22T14:46:36.000Z
|
2022-03-10T07:14:45.000Z
|
from .mlflow_model_logger_dataset import MlflowModelLoggerDataSet
from .mlflow_model_saver_dataset import MlflowModelSaverDataSet
| 43.333333
| 65
| 0.923077
| 14
| 130
| 8.142857
| 0.642857
| 0.175439
| 0.263158
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.061538
| 130
| 2
| 66
| 65
| 0.934426
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f62a1fd299013647c5482569ef25d6dc0ecb21fd
| 51
|
py
|
Python
|
test/pkgman_triggers_test.py
|
KOLANICH/pkgman_triggers.py
|
7a121da345e55603c3024b8facfc7dad1e8817db
|
[
"Unlicense"
] | null | null | null |
test/pkgman_triggers_test.py
|
KOLANICH/pkgman_triggers.py
|
7a121da345e55603c3024b8facfc7dad1e8817db
|
[
"Unlicense"
] | null | null | null |
test/pkgman_triggers_test.py
|
KOLANICH/pkgman_triggers.py
|
7a121da345e55603c3024b8facfc7dad1e8817db
|
[
"Unlicense"
] | null | null | null |
def test_trigger():
print("test trigger called")
| 12.75
| 29
| 0.72549
| 7
| 51
| 5.142857
| 0.714286
| 0.611111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137255
| 51
| 3
| 30
| 17
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0.38
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
f633987ea2d5e16fde0aed4afae267becd24a755
| 136
|
py
|
Python
|
Chapter04/05_relational_fields/my_library/__init__.py
|
Sople/Odoo-14-Cookbook-Code
|
2813676a3f28c942ecb823fdfddd423ea9ca4f97
|
[
"MIT"
] | null | null | null |
Chapter04/05_relational_fields/my_library/__init__.py
|
Sople/Odoo-14-Cookbook-Code
|
2813676a3f28c942ecb823fdfddd423ea9ca4f97
|
[
"MIT"
] | null | null | null |
Chapter04/05_relational_fields/my_library/__init__.py
|
Sople/Odoo-14-Cookbook-Code
|
2813676a3f28c942ecb823fdfddd423ea9ca4f97
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# 数据关联字段,有三种:
# 1.多对一,Many2one
# 2.一对多,one2many
# 3.多对多,many2many
from . import models
from . import controllers
| 19.428571
| 25
| 0.683824
| 20
| 136
| 4.65
| 0.9
| 0.215054
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.060345
| 0.147059
| 136
| 7
| 25
| 19.428571
| 0.741379
| 0.580882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9c8ba27b0e69c79f56c487b279124ac3154f4051
| 213
|
py
|
Python
|
bclearer_boson_1_1_source/b_code/configurations/resource_constants/resources_namespace_constants.py
|
boro-alpha/bclearer_boson_1_1
|
15207d240fd3144b155922dc5c5d14822023026a
|
[
"MIT"
] | 1
|
2021-07-20T15:48:58.000Z
|
2021-07-20T15:48:58.000Z
|
bclearer_boson_1_1_source/b_code/configurations/resource_constants/resources_namespace_constants.py
|
boro-alpha/bclearer_boson_1_1
|
15207d240fd3144b155922dc5c5d14822023026a
|
[
"MIT"
] | null | null | null |
bclearer_boson_1_1_source/b_code/configurations/resource_constants/resources_namespace_constants.py
|
boro-alpha/bclearer_boson_1_1
|
15207d240fd3144b155922dc5c5d14822023026a
|
[
"MIT"
] | null | null | null |
CONTENT_OPERATIONS_RESOURCES_NAMESPACE = \
'bclearer_boson_1_1_source.resources.content_universes'
ADJUSTMENT_OPERATIONS_RESOURCES_NAMESPACE = \
'bclearer_boson_1_1_source.resources.adjustment_universes'
| 35.5
| 62
| 0.86385
| 24
| 213
| 7
| 0.416667
| 0.22619
| 0.333333
| 0.428571
| 0.690476
| 0.690476
| 0.690476
| 0.690476
| 0.690476
| 0
| 0
| 0.020408
| 0.079812
| 213
| 5
| 63
| 42.6
| 0.836735
| 0
| 0
| 0
| 0
| 0
| 0.511737
| 0.511737
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
143707ee29e32d9132daaf92d58a12d1dc0e9560
| 24
|
py
|
Python
|
ext_resources/__init__.py
|
Darshan-20310597/RumiGANs
|
a70d18b7f6f03570f9dae6d3b88f746eb71136d9
|
[
"MIT"
] | 26
|
2020-10-31T06:00:22.000Z
|
2022-02-13T19:30:49.000Z
|
ext_resources/__init__.py
|
Darshan-20310597/RumiGANs
|
a70d18b7f6f03570f9dae6d3b88f746eb71136d9
|
[
"MIT"
] | 3
|
2021-03-01T05:43:03.000Z
|
2021-07-10T13:08:18.000Z
|
ext_resources/__init__.py
|
DarthSid95/RumiGANs
|
9f7876e89caa0d39bd563947ab9c41f4e3745021
|
[
"MIT"
] | 5
|
2021-04-12T10:59:20.000Z
|
2021-06-04T08:52:51.000Z
|
from .prd_score import *
| 24
| 24
| 0.791667
| 4
| 24
| 4.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 24
| 1
| 24
| 24
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
149ce8fb5fea4e4dd2be51a9c5c12ce1f45d7b76
| 72,680
|
py
|
Python
|
fastCloningPrimer.py
|
tommyfuu/BioFoundry
|
a641d69cca04a2622636d7b1affea1c138bf1a5d
|
[
"MIT"
] | 2
|
2020-11-08T09:46:41.000Z
|
2020-11-08T09:48:29.000Z
|
fastCloningPrimer.py
|
tommyfuu/BioFoundry
|
a641d69cca04a2622636d7b1affea1c138bf1a5d
|
[
"MIT"
] | 1
|
2021-04-18T23:46:16.000Z
|
2021-04-18T23:46:16.000Z
|
fastCloningPrimer.py
|
tommyfuu/BioFoundry
|
a641d69cca04a2622636d7b1affea1c138bf1a5d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Author : Tom Fu
Date : 2020 Nov 7
FileName : fastCloningPrimer.py (for the BioFoundry Project at the HMC BioMakerspace)
Description : Find primer pairs for fast cloning
"""
import primer3
from Bio import SeqIO
import pandas as pd
import sys
import copy
import math
from Bio.Seq import Seq
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import os
import time
#################
### TESTCASES ###
#################
# TODO: when I run:
# ==============================================================================
# fastCloningPrimers(royaTestPlasmidSeq, royaTestInsertPlasmidSeq,
# royaTestVectorSeq, royaTestInsertSeq, maxTempDiff=MAX_TEMP_DIFF,
# destinationAddress='fastCloningPrimerInfo.csv',
# benchlingAddress='benchlingfastCloningPrimerInfo.csv',
# benchling=True, primerOptTm=PRIMER_OPT_TM, primerMinSize=PRIMER_MIN_SIZE)
# ==============================================================================
# benchlingfastCloningPrimerInfo.csv is empty. However, when I run the provided
# test cases, the correct primers are outputted.
# from
# https://benchling.com/roygoli/f/lib_mvm3FehI-biofoundry/seq_s57xycXu-copy-of-biofoundry-copy-of-pdms123/edit
royaTestPlasmidSeq = "GCTGATGCCGCTGGCGATTCAGGTTCATCATGCCGTTTGTGATGGCTTCCATGTCGGCAGAATGCTTAATGAATTACAACAGTACTGCGATGAGTGGCAGGGCGGGGCGTAATTTTTTTAAGGCAGTTATTGGTGCCCTTAAACGCCTGGGGTAATGACTCTCTAGCTTGAGGCATCAAATAAAACGAAAGGCTCAGTCGAAAGACTGGGCCTTTCGTTTTATCTGTTGTTTGTCGGTGAACGCTCTCCTGAGTAGGACAAATCCGCCCTCTAGCAGCCCGGGCTGCggccgcTATTTCTCCTTTCGCGCAGTACGTGGTTCGCGGCTTAATCCTGCTGGCAGCGGTGATCTTCGACCGTTACAAGCAAAAAGCGAAACGCACTGTCTGATGCTTTTTTCTGCAACAATTTAGCGTTTTTTCCCACCATAGCCAACCGCCATAACGGTTGGCTGTTCTTCGTTGCAAATGGCGACCCCCGTCACACTGTCTATACTTACATGTCTGTAAAGCGCGTTCTGCGCAACACAATAAGAAAAGAGAAGGAGGAGAACCGGgtgACAGAACCGTTAACCGAAACCCCTGAACTATCCGCGAAATATGCCTGGTTTTTTGATCTTGATGGAACGCTGGCGGAAATCAAACCGCATCCCGATCAGGTCGTCGTGCCTGACAATATTCTGCAAGGACTACAGCTACTGGCAACCGCAAGTGATGGTGCATTGGCATTGATATCAGGGCGCTCAATGGTGGAGCTTGACGCACTGGCAAAACCTTATCGCTTCCCGTtCTAGATTTAAGAAGGAGATATACATATGAGTAAAGGAGAAGAACTTTTCACTGGAGTTGTCCCAATTCTTGTTGAATTAGATGGTGATGTTAATGGGCACAAATTTTCTGTCAGTGGAGAGGGTGAAGGTGATGCTACATACGGAAAGCTTACCCTTAAATTTATTTGCACTACTGGAAAACTACCTGTTCCATGGCCAACACTTGTCACTACTTTGACCTATGGTGTTCAATGCTTTTCCCGTTATCCGGATCATATGAAACGGCATGACTTTTTCAAGAGTGCCATGCCCGAAGGTTATGTACAGGAACGCACTATATCTTTCAAAGATGACGGGAACTACAAGACGCGTGCTGAAGTCAAGTTTGAAGGTGATACCCTTGTTAATCGTATCGAGTTAAAAGGTATTGATTTTAAAGAAGATGGAAACATTCTCGGACACAAACTCGAGTACAACTATAACTCACACAATGTATACATCACGGCAGACAAACAAAAGAATGGAATCAAAGCTAACTTCAAAATTCGCCACAACATTGAAGATGGATCCGTTCAACTAGCAGACCATTATCAACAAAATACTCCAATTGGCGATGGCCCTGTCCTTTTACCAGACAACCATTACCTGTCGACACAATCTGCCCTTTCGAAAGATCCCAACGAAAAGCGTGACCACATGGTCCTTCTTGAGTTTGTAACTGCTGCTGGGATTACACATGGCATGGATGAGCTCTACAAATAATGAATTCCAGCTGAGCGCCGGTCGCTACCATTACCAGTTGGTCTGGTGTCAAAAATAATAATAACCGGGCAGGCCATGTCTGCCCGTATTTCGCGTAAGGAAATCCATTATGTACTATTTAATTCTTGAAGACGAAAGGGCCTCGTGATACGCCTATTTTTATAGGTTAATGTCATGATAATAATGGTTTCTTAGACGTCAGGTGGCGATATCGGGCTAGCCGGCCCGACGCACTTTGCGCCGAATAAATACCTGTGACGGAAGATCACTTCGCAGAATAAATAAATCCTGGTGTCCCTGTTGATACCGGGAAGCCCTGGGCCAACTTTTGGCGAAAATGAGACGTTGATCGGCACGTAAGAGGTTCCAACTTTCACCATAATGAAATAAGATCACTACCGGGCGTATTTTTTGAGTTATCGAGATTTTCAGGAGCTAAGGAAGCTAAAATGGAGAAAAAAATCACTGGATATACCACCGTTGATATATCCCAATGGCATCGTAAAGAACATTTTGAGGCATTTCAGTCAGTTGCTCAATGTACCTATAACCAGACCGTTCAGCTGGATATTACGGCCTTTTTAAAGACCGTAAAGAAAAATAAGCACAAGTTTTATCCGGCCTTTATTCACATTCTTGCCCGCCTGATGAATGCTCATCCGGAATTCCGTATGGCAATGAAAGACGGTGAGCTGGTGATATGGGATAGTGTTCACCCTTGTTACACCGTTTTCCATGAGCAAACTGAAACGTTTTCATCGCTCTGGAGTGAATACCACGACGATTTCCGGCAGTTTCTACACATATATTCGCAAGATGTGGCGTGTTACGGTGAAAACCTGGCCTATTTCCCTAAAGGGTTTATTGAGAATATGTTTTTCGTCTCAGCCAATCCCTGGGTGAGTTTCACCAGTTTTGATTTAAACGTGGCCAATATGGACAACTTCTTCGCCCCCGTTTTCACCATGGGCAAATATTATACGCAAGGCGACAAGGTGCTGATGCCGCTGGCGATTCAGGTTCATCATGCCGTCTGTGATGGCTTCCATGTCGGCAGAATGCTTAATGAATTACAACAGTACTGCGATGAGTGGCAGGGCGGGGCGTAATTTTTTTAAGGCAGTTATTGGTGCCCTTAAACGCCTGGTGCTACGCCTGAATAAGTGATAATAAGCGGATGAATGGCAGAAATGACGGATATCGTCCATTCCGACAGCATCGCCAGTCACTATGGCGTGCTGCTAGCGCTTTTAGCCGCTTTAGCGGCCTTTCCCCCTACCCGAAGGGTGGGGGCGCGTGTGCAGCCCCGCAGGGCCTGTCTCGGTCGATCATTCAGCCCGGCTCATCCTTCTGGCGTGGCGGCAGACCGAACAAGGCGCGGTCGTGGTCGCGTTCAAGGTACGCATCCATTGCCGCCATGAGCCGATCCTCCGGCCACTCGCTGCTGTTCACCTTGGCCAAAATCATGGCCCCCACCAGCACCTTGCGCCTTGTTTCGTTCTTGCGCTCTTGCTGCTGTTCCCTTGCCCGCACCCGCTGAATTTCGGCATTGATTCGCGCTCGTTGTTCTTCGAGCTTGGCCAGCCGATCCGCCGCCTTGTTGCTCCCCTTAACCATCTTGACACCCCATTGTTAATGTGCTGTCTCGTAGGCTATCATGGAGGCACAGCGGCGGCAATCCCGACCCTACTTTGTAGGGGAGGGCGCACTTACCGGTTTCTCTTCGAGAAACTGGCCTAACGGCCACCCTTCGGGCGGTGCGCTCTCCGAGGGCCATTGCATGGAGCCGAAAAGCAAAAGCAACAGCGAGGCAGCATGGCGATTTATCACCTTACGGCGAAAACCGGCAGCAGGTCGGGCGGCCAATCGGCCAGGGCCAAGGCCGACTACATCCAGCGCGAAGGCAAGTATGCCCGCGACATGGATGAAGTCTTGCACGCCGAATCCGGGCACATGCCGGAGTTCGTCGAGCGGCCCGCCGACTACTGGGATGCTGCCGACCTGTATGAACGCGCCAATGGGCGGCTGTTCAAGGAGGTCGAATTTGCCCTGCCGGTCGAGCTGACCCTCGACCAGCAGAAGGCGCTGGCGTCCGAGTTCGCCCAGCACCTGACCGGTGCCGAGCGCCTGCCGTATACGCTGGCCATCCATGCCGGTGGCGGCGAGAACCCGCACTGCCACCTGATGATCTCCGAGCGGATCAATGACGGCATCGAGCGGCCCGCCGCTCAGTGGTTCAAGCGGTACAACGGCAAGACCCCGGAGAAGGGCGGGGCACAGAAGACCGAAGCGCTCAAGCCCAAGGCATGGCTTGAGCAGACCCGCGAGGCATGGGCCGACCATGCCAACCGGGCATTAGAGCGGGCTGGCCACGACGCCCGCATTGACCACAGAACACTTGAGGCGCAGGGCATCGAGCGCCTGCCCGGTGTTCACCTGGGGCCGAACGTGGTGGAGATGGAAGGCCGGGGCATCCGCACCGACCGGGCAGACGTGGCCCTGAACATCGACACCGCCAACGCCCAGATCATCGACTTACAGGAATACCGGGAGGCAATAGACCATGAACGCAATCGACAGAGTGAAGAAATCCAGAGGCATCAACGAGTTAGCGGAGCAGATCGAACCGCTGGCCCAGAGCATGGCGACACTGGCCGACGAAGCCCGGCAGGTCATGAGCCAGACCCAGCAGGCCAGCGAGGCGCAGGCGGCGGAGTGGCTGAAAGCCCAGCGCCAGACAGGGGCGGCATGGGTGGAGCTGGCCAAAGAGTTGCGGGAGGTAGCCGCCGAGGTGAGCAGCGCCGCGCAGAGCGCCCGGAGCGCGTCGCGGGGGTGGCACTGGAAGCTATGGCTAACCGTGATGCTGGCTTCCATGATGCCTACGGTGGTGCTGCTGATCGCATCGTTGCTCTTGCTCGACCTGACGCCACTGACAACCGAGGACGGCTCGATCTGGCTGCGCTTGGTGGCCCGATGAAGAACGACAGGACTTTGCAGGCCATAGGCCGACAGCTCAAGGCCATGGGCTGTGAGCGCTCTTCCGCTTCCTCGCTCACTGACTCGCTGCGCTCGGTCGTTCGGCTGCGGCGAGCGGTATCAGCTCACTCAAAGGCGGTAATACGGTTATCCACAGAATCAGGGGATAACGCAGGAAAGAACATGTGAGCAAAAGGCCAGCAAAAGGCCAGGAACCGTAAAAAGGCCGCGTTGCTGGCGTTTTTCCATAGGCTCCGCCCCCCTGACGAGCATCACAAAAATCGACGCTCAAGTCAGAGGTGGCGAAACCCGACAGGACTATAAAGATACCAGGCGTTTCCCCCTGGAAGCTCCCTCGTGCGCTCTCCTGTTCCGACCCTGCCGCTTACCGGATACCTGTCCGCCTTTCTCCCTTCGGGAAGCGTGGCGCTTTCTCATAGCTCACGCTGTAGGTATCTCAGTTCGGTGTAGGTCGTTCGCTCCAAGCTGGGCTGTGTGCACGAACCCCCCGTTCAGCCCGACCGCTGCGCCTTATCCGGTAACTATCGTCTTGAGTCCAACCCGGTAAGACACGACTTATCGCCACTGGCAGCAGCCACTGGTAACAGGATTAGCAGAGCGAGGTATGTAGGCGGTGCTACAGAGTTCTTGAAGTGGTGGCCTAACTACGGCTACACTAGAAGGACAGTATTTGGTATCTGCGCTCTGCTGAAGCCAGTTACCTTCGGAAAAAGAGTTGGTAGCTCTTGATCCGGCAAACAAACCACCGCTGGTAGCGGTGGTTTTTTTGTTTGCAAGCAGCAGATTACGCGCAGAAAAAAAGGATCTCAAGAAGATCCTTTGATCTTTTCTACGGGGTCTGACGCTCAGTGGAACGAAAACTCACGTTAAGGGATTTTGGTCATGAGATTATCAAAAAGGATCTTCACCTAGATCCTTTTAAATTAAAAATGAAGTTTTAAATCAATCTAAAGTATATATGAGTAAACTTGGTCTGACAGTTACCAATGCTTAATCAGTGAGGCACCTATCTCAGCGATCTGTCTATTTCGTTCATCCATAGTTGCCTGACTCCCCGTCGTGTAGATAACTACGATACGGGAGGGCTTACCATCTGGCCCCAGTGCTGCAATGATACCGCGAGACCCACGCTCACCGGCTCCAGATTTATCAGCAATAAACCAGCCAGCCGGAAGGGCCGAGCGCAGAAGTGGTCCTGCAACTTTATCCGCCTCCATCCAGTCTATTAATTGTTGCCGGGAAGCTAGAGTAAGTAGTTCGCCAGTTAATAGTTTGCGCAACGTTGTTGCCATTGCTGCAGGCATCGTGGTGTCACGCTCGTCGTTTGGTATGGCTTCATTCAGCTCCGGTTCCCAACGATCAAGGCGAGTTACATGATCCCCCATGTTGTGCAAAAAAGCGGTTAGCTCCTTCGGTCCTCCGATCGTTGTCAGAAGTAAGTTGGCCGCAGTGTTATCACTCATGGTTATGGCAGCACTGCATAATTCTCTTACTGTCATGCCATCCGTAAGATGCTTTTCTGTGACTGGTGAGTACTCAACCAAGTCATTCTGAGAATAGTGTATGCGGCGACCGAGTTGCTCTTGCCCGGCGTCAACACGGGATAATACCGCGCCACATAGCAGAACTTTAAAAGTGCTCATCATTGGAAAACGTTCTTCGGGGCGAAAACTCTCAAGGATCTTACCGCTGTTGAGATCCAGTTCGATGTAACCCACTCGTGCACCCAACTGATCTTCAGCATCTTTTACTTTCACCAGCGTTTCTGGGTGAGCAAAAACAGGAAGGCAAAATGCCGCAAAAAAGGGAATAAGGGCGACACGGAAATGTTGAATACTCATACTCTTCCTTTTTCAATATTATTGAAGCATTTATCAGGGTTATTGTCTCATGAGCGGATACATATTTGAATGTATTTAGAAAAATAAACAAATAGGGGTTCCGCGCACATTTCCCCGAAAAGTGCCACCTGACGTCTAAGAAACCATTATTATCATGACATTAACCTATAAAAATAGGCGTATCACGAGGCCCTTTCGTCTTCAAGAATTCGAGCTCGGTACCGGATCCGTCGACCTGCAGCCAAGCTTAATTAGCTGAGCTTGGACTCCTGTTGATAGATCCAGTAATGACCTCAGAACTCCATCTGGATTTGTTCAGAACGCTCGGTTGCCGCCGGGCGTTTTTTATTGGTGAGAATCCAAGCTAGCTTGGCGAGATTTTCAGGAGCTAAGGAAGCTAAAATGGAGAAAAAAATCACTGGATATACCACCGTTGATATATCCCAATGGCATCGTAAAGAACATTTTGAGGCATTTCAGTCAGTTGCTCAATGTACCTATAACCAGACCGTTCAGCTGGATATTACGGCCTTTTTAAAGACCGTAAAGAAAAATAAGCACAAGTTTTATCCGGCCTTTATTCACATTCTTGCCCGCCTGATGAATGCTCATCCGGAATTTCGTATGGCAATGAAAGACGGTGAGCTGGTGATATGGGATAGTGTTCACCCTTGTTACACCGTTTTCCATGAGCAAACTGAAACGTTTTCATCGCTCTGGAGTGAATACCACGACGATTTCCGGCAGTTTCTACACATATATTCGCAAGATGTGGCGTGTTACGGTGAAAACCTGGCCTATTTCCCTAAAGGGTTTATTGAGAATATGTTTTTCGTCTCAGCCAATCCCTGGGTGAGTTTCACCAGTTTTGATTTAAACGTGGCCAATATGGACAACTTCTTCGCCCCCGTTTTCACCATGGGCAAATATTATACGCAAGGCGACAAGGT"
# from
# https://benchling.com/roygoli/f/lib_mvm3FehI-biofoundry/seq_cttcEI6n-copy-of-biofoundry-copy-of-e-coli-iram-annotated/edit
royaTestInsertPlasmidSeq = "GCTAAAGTTGGATACTTAAGAAATGCTTCATAATTCAGTAAGGCATTAGCATAATGGAAATAAAAGTGCAGAGACTATCTCTATGGATGATTAATACTGTCTTTTTATTGTCACCCATAAATAATCACCAGACTAATACTATCAACTTGATATTTGAAATGTGATCACTTGACTTTTGATACGTTATTTTATAACGGTTAACATATTTATAAAAACAACGGCCGTGCCACACGTCCGTTTCAATACTTAACGCACATGTATTTTGGTTTAGTCATCATCCGGTTATATGTATTTTAGCCAGGAACAGGTTAAATCATTCCTATATAACTCAAAAATTGAAACCTTATTCTCATGTCATGCTTATATTCATTATTATCGTTATATAAAAAGGCAACCATAATGTTTAGCAAATTGGCACAAAGTAGCATAAAGGCTATGTTTTAATTACAGGATGTTCAGTCATTTGAATGTATAACATTATAGCTAAACAAATCTAAAACGAAGTCAATAATTTATTGCTTTCACAAAATCTCATTTTGTTTAACATCCATTGAGATTCCTTGCTTTAAATTTTATTTTATATAAGCCATCATTTTAATTAATTTATTTTTTTGAGGGGGGGGTAATATACTCATATGCAAAATCAAGAAATAAACATCCTAATGAACCATATTAAATACCGTGGGATAAGACATAACAAatgAAGTGGATAGTAATTGACACGGTAATTCAACCTACATGTGGTATATCTTTTTCAGCCATATGGGGTAATATGAAAATGATCATCTGGTATCAATCTACTATATTTCTCCCTCCTGGCAGTATATTTACACCGGTTAAGTCTGGTATTATCCTTAAGGATAAAGAATATCCTATTACTATTTATCACATCGCACCATTCAACAAGGATTTATGGAGTTTACTCAAAAGCAGTCAAGAGTGTCCTCCAGGAGAAAGCAAAATAACAAATAAATGTTTACATAATAGTTGCATTATAAAAATATGCCCATATGGGCTCAAGtaa"
# from
# https://benchling.com/roygoli/f/lib_mvm3FehI-biofoundry/seq_s57xycXu-copy-of-biofoundry-copy-of-pdms123/edit
royaTestVectorSeq = "CTAGATTTAAGAAGGAGATATACATATGAGTAAAGGAGAAGAACTTTTCACTGGAGTTGTCCCAATTCTTGTTGAATTAGATGGTGATGTTAATGGGCACAAATTTTCTGTCAGTGGAGAGGGTGAAGGTGATGCTACATACGGAAAGCTTACCCTTAAATTTATTTGCACTACTGGAAAACTACCTGTTCCATGGCCAACACTTGTCACTACTTTGACCTATGGTGTTCAATGCTTTTCCCGTTATCCGGATCATATGAAACGGCATGACTTTTTCAAGAGTGCCATGCCCGAAGGTTATGTACAGGAACGCACTATATCTTTCAAAGATGACGGGAACTACAAGACGCGTGCTGAAGTCAAGTTTGAAGGTGATACCCTTGTTAATCGTATCGAGTTAAAAGGTATTGATTTTAAAGAAGATGGAAACATTCTCGGACACAAACTCGAGTACAACTATAACTCACACAATGTATACATCACGGCAGACAAACAAAAGAATGGAATCAAAGCTAACTTCAAAATTCGCCACAACATTGAAGATGGATCCGTTCAACTAGCAGACCATTATCAACAAAATACTCCAATTGGCGATGGCCCTGTCCTTTTACCAGACAACCATTACCTGTCGACACAATCTGCCCTTTCGAAAGATCCCAACGAAAAGCGTGACCACATGGTCCTTCTTGAGTTTGTAACTGCTGCTGGGATTACACATGGCATGGATGAGCTCTACAAATAATGAATTCCAGCTGAGCGCCGGTCGCTACCATTACCAGTTGGTCTGGTGTCAAAAATAATAATAACCGGGCAGGCCATGTCTGCCCGTATTTCGCGTAAGGAAATCCATTATGTACTATTTAATTCTTGAAGACGAAAGGGCCTCGTGATACGCCTATTTTTATAGGTTAATGTCATGATAATAATGGTTTCTTAGACGTCAGGTGGCGATATCGGGCTAGCCGGCCCGACGCACTTTGCGCCGAATAAATACCTGTGACGGAAGATCACTTCGCAGAATAAATAAATCCTGGTGTCCCTGTTGATACCGGGAAGCCCTGGGCCAACTTTTGGCGAAAATGAGACGTTGATCGGCACGTAAGAGGTTCCAACTTTCACCATAATGAAATAAGATCACTACCGGGCGTATTTTTTGAGTTATCGAGATTTTCAGGAGCTAAGGAAGCTAAAATGGAGAAAAAAATCACTGGATATACCACCGTTGATATATCCCAATGGCATCGTAAAGAACATTTTGAGGCATTTCAGTCAGTTGCTCAATGTACCTATAACCAGACCGTTCAGCTGGATATTACGGCCTTTTTAAAGACCGTAAAGAAAAATAAGCACAAGTTTTATCCGGCCTTTATTCACATTCTTGCCCGCCTGATGAATGCTCATCCGGAATTCCGTATGGCAATGAAAGACGGTGAGCTGGTGATATGGGATAGTGTTCACCCTTGTTACACCGTTTTCCATGAGCAAACTGAAACGTTTTCATCGCTCTGGAGTGAATACCACGACGATTTCCGGCAGTTTCTACACATATATTCGCAAGATGTGGCGTGTTACGGTGAAAACCTGGCCTATTTCCCTAAAGGGTTTATTGAGAATATGTTTTTCGTCTCAGCCAATCCCTGGGTGAGTTTCACCAGTTTTGATTTAAACGTGGCCAATATGGACAACTTCTTCGCCCCCGTTTTCACCATGGGCAAATATTATACGCAAGGCGACAAGGTGCTGATGCCGCTGGCGATTCAGGTTCATCATGCCGTCTGTGATGGCTTCCATGTCGGCAGAATGCTTAATGAATTACAACAGTACTGCGATGAGTGGCAGGGCGGGGCGTAATTTTTTTAAGGCAGTTATTGGTGCCCTTAAACGCCTGGTGCTACGCCTGAATAAGTGATAATAAGCGGATGAATGGCAGAAATGACGGATATCGTCCATTCCGACAGCATCGCCAGTCACTATGGCGTGCTGCTAGCGCTTTTAGCCGCTTTAGCGGCCTTTCCCCCTACCCGAAGGGTGGGGGCGCGTGTGCAGCCCCGCAGGGCCTGTCTCGGTCGATCATTCAGCCCGGCTCATCCTTCTGGCGTGGCGGCAGACCGAACAAGGCGCGGTCGTGGTCGCGTTCAAGGTACGCATCCATTGCCGCCATGAGCCGATCCTCCGGCCACTCGCTGCTGTTCACCTTGGCCAAAATCATGGCCCCCACCAGCACCTTGCGCCTTGTTTCGTTCTTGCGCTCTTGCTGCTGTTCCCTTGCCCGCACCCGCTGAATTTCGGCATTGATTCGCGCTCGTTGTTCTTCGAGCTTGGCCAGCCGATCCGCCGCCTTGTTGCTCCCCTTAACCATCTTGACACCCCATTGTTAATGTGCTGTCTCGTAGGCTATCATGGAGGCACAGCGGCGGCAATCCCGACCCTACTTTGTAGGGGAGGGCGCACTTACCGGTTTCTCTTCGAGAAACTGGCCTAACGGCCACCCTTCGGGCGGTGCGCTCTCCGAGGGCCATTGCATGGAGCCGAAAAGCAAAAGCAACAGCGAGGCAGCATGGCGATTTATCACCTTACGGCGAAAACCGGCAGCAGGTCGGGCGGCCAATCGGCCAGGGCCAAGGCCGACTACATCCAGCGCGAAGGCAAGTATGCCCGCGACATGGATGAAGTCTTGCACGCCGAATCCGGGCACATGCCGGAGTTCGTCGAGCGGCCCGCCGACTACTGGGATGCTGCCGACCTGTATGAACGCGCCAATGGGCGGCTGTTCAAGGAGGTCGAATTTGCCCTGCCGGTCGAGCTGACCCTCGACCAGCAGAAGGCGCTGGCGTCCGAGTTCGCCCAGCACCTGACCGGTGCCGAGCGCCTGCCGTATACGCTGGCCATCCATGCCGGTGGCGGCGAGAACCCGCACTGCCACCTGATGATCTCCGAGCGGATCAATGACGGCATCGAGCGGCCCGCCGCTCAGTGGTTCAAGCGGTACAACGGCAAGACCCCGGAGAAGGGCGGGGCACAGAAGACCGAAGCGCTCAAGCCCAAGGCATGGCTTGAGCAGACCCGCGAGGCATGGGCCGACCATGCCAACCGGGCATTAGAGCGGGCTGGCCACGACGCCCGCATTGACCACAGAACACTTGAGGCGCAGGGCATCGAGCGCCTGCCCGGTGTTCACCTGGGGCCGAACGTGGTGGAGATGGAAGGCCGGGGCATCCGCACCGACCGGGCAGACGTGGCCCTGAACATCGACACCGCCAACGCCCAGATCATCGACTTACAGGAATACCGGGAGGCAATAGACCATGAACGCAATCGACAGAGTGAAGAAATCCAGAGGCATCAACGAGTTAGCGGAGCAGATCGAACCGCTGGCCCAGAGCATGGCGACACTGGCCGACGAAGCCCGGCAGGTCATGAGCCAGACCCAGCAGGCCAGCGAGGCGCAGGCGGCGGAGTGGCTGAAAGCCCAGCGCCAGACAGGGGCGGCATGGGTGGAGCTGGCCAAAGAGTTGCGGGAGGTAGCCGCCGAGGTGAGCAGCGCCGCGCAGAGCGCCCGGAGCGCGTCGCGGGGGTGGCACTGGAAGCTATGGCTAACCGTGATGCTGGCTTCCATGATGCCTACGGTGGTGCTGCTGATCGCATCGTTGCTCTTGCTCGACCTGACGCCACTGACAACCGAGGACGGCTCGATCTGGCTGCGCTTGGTGGCCCGATGAAGAACGACAGGACTTTGCAGGCCATAGGCCGACAGCTCAAGGCCATGGGCTGTGAGCGCTCTTCCGCTTCCTCGCTCACTGACTCGCTGCGCTCGGTCGTTCGGCTGCGGCGAGCGGTATCAGCTCACTCAAAGGCGGTAATACGGTTATCCACAGAATCAGGGGATAACGCAGGAAAGAACATGTGAGCAAAAGGCCAGCAAAAGGCCAGGAACCGTAAAAAGGCCGCGTTGCTGGCGTTTTTCCATAGGCTCCGCCCCCCTGACGAGCATCACAAAAATCGACGCTCAAGTCAGAGGTGGCGAAACCCGACAGGACTATAAAGATACCAGGCGTTTCCCCCTGGAAGCTCCCTCGTGCGCTCTCCTGTTCCGACCCTGCCGCTTACCGGATACCTGTCCGCCTTTCTCCCTTCGGGAAGCGTGGCGCTTTCTCATAGCTCACGCTGTAGGTATCTCAGTTCGGTGTAGGTCGTTCGCTCCAAGCTGGGCTGTGTGCACGAACCCCCCGTTCAGCCCGACCGCTGCGCCTTATCCGGTAACTATCGTCTTGAGTCCAACCCGGTAAGACACGACTTATCGCCACTGGCAGCAGCCACTGGTAACAGGATTAGCAGAGCGAGGTATGTAGGCGGTGCTACAGAGTTCTTGAAGTGGTGGCCTAACTACGGCTACACTAGAAGGACAGTATTTGGTATCTGCGCTCTGCTGAAGCCAGTTACCTTCGGAAAAAGAGTTGGTAGCTCTTGATCCGGCAAACAAACCACCGCTGGTAGCGGTGGTTTTTTTGTTTGCAAGCAGCAGATTACGCGCAGAAAAAAAGGATCTCAAGAAGATCCTTTGATCTTTTCTACGGGGTCTGACGCTCAGTGGAACGAAAACTCACGTTAAGGGATTTTGGTCATGAGATTATCAAAAAGGATCTTCACCTAGATCCTTTTAAATTAAAAATGAAGTTTTAAATCAATCTAAAGTATATATGAGTAAACTTGGTCTGACAGTTACCAATGCTTAATCAGTGAGGCACCTATCTCAGCGATCTGTCTATTTCGTTCATCCATAGTTGCCTGACTCCCCGTCGTGTAGATAACTACGATACGGGAGGGCTTACCATCTGGCCCCAGTGCTGCAATGATACCGCGAGACCCACGCTCACCGGCTCCAGATTTATCAGCAATAAACCAGCCAGCCGGAAGGGCCGAGCGCAGAAGTGGTCCTGCAACTTTATCCGCCTCCATCCAGTCTATTAATTGTTGCCGGGAAGCTAGAGTAAGTAGTTCGCCAGTTAATAGTTTGCGCAACGTTGTTGCCATTGCTGCAGGCATCGTGGTGTCACGCTCGTCGTTTGGTATGGCTTCATTCAGCTCCGGTTCCCAACGATCAAGGCGAGTTACATGATCCCCCATGTTGTGCAAAAAAGCGGTTAGCTCCTTCGGTCCTCCGATCGTTGTCAGAAGTAAGTTGGCCGCAGTGTTATCACTCATGGTTATGGCAGCACTGCATAATTCTCTTACTGTCATGCCATCCGTAAGATGCTTTTCTGTGACTGGTGAGTACTCAACCAAGTCATTCTGAGAATAGTGTATGCGGCGACCGAGTTGCTCTTGCCCGGCGTCAACACGGGATAATACCGCGCCACATAGCAGAACTTTAAAAGTGCTCATCATTGGAAAACGTTCTTCGGGGCGAAAACTCTCAAGGATCTTACCGCTGTTGAGATCCAGTTCGATGTAACCCACTCGTGCACCCAACTGATCTTCAGCATCTTTTACTTTCACCAGCGTTTCTGGGTGAGCAAAAACAGGAAGGCAAAATGCCGCAAAAAAGGGAATAAGGGCGACACGGAAATGTTGAATACTCATACTCTTCCTTTTTCAATATTATTGAAGCATTTATCAGGGTTATTGTCTCATGAGCGGATACATATTTGAATGTATTTAGAAAAATAAACAAATAGGGGTTCCGCGCACATTTCCCCGAAAAGTGCCACCTGACGTCTAAGAAACCATTATTATCATGACATTAACCTATAAAAATAGGCGTATCACGAGGCCCTTTCGTCTTCAAGAATTCGAGCTCGGTACCGGATCCGTCGACCTGCAGCCAAGCTTAATTAGCTGAGCTTGGACTCCTGTTGATAGATCCAGTAATGACCTCAGAACTCCATCTGGATTTGTTCAGAACGCTCGGTTGCCGCCGGGCGTTTTTTATTGGTGAGAATCCAAGCTAGCTTGGCGAGATTTTCAGGAGCTAAGGAAGCTAAAATGGAGAAAAAAATCACTGGATATACCACCGTTGATATATCCCAATGGCATCGTAAAGAACATTTTGAGGCATTTCAGTCAGTTGCTCAATGTACCTATAACCAGACCGTTCAGCTGGATATTACGGCCTTTTTAAAGACCGTAAAGAAAAATAAGCACAAGTTTTATCCGGCCTTTATTCACATTCTTGCCCGCCTGATGAATGCTCATCCGGAATTTCGTATGGCAATGAAAGACGGTGAGCTGGTGATATGGGATAGTGTTCACCCTTGTTACACCGTTTTCCATGAGCAAACTGAAACGTTTTCATCGCTCTGGAGTGAATACCACGACGATTTCCGGCAGTTTCTACACATATATTCGCAAGATGTGGCGTGTTACGGTGAAAACCTGGCCTATTTCCCTAAAGGGTTTATTGAGAATATGTTTTTCGTCTCAGCCAATCCCTGGGTGAGTTTCACCAGTTTTGATTTAAACGTGGCCAATATGGACAACTTCTTCGCCCCCGTTTTCACCATGGGCAAATATTATACGCAAGGCGACAAGGTGCTGATGCCGCTGGCGATTCAGGTTCATCATGCCGTTTGTGATGGCTTCCATGTCGGCAGAATGCTTAATGAATTACAACAGTACTGCGATGAGTGGCAGGGCGGGGCGTAATTTTTTTAAGGCAGTTATTGGTGCCCTTAAACGCCTGGGGTAATGACTCTCTAGCTTGAGGCATCAAATAAAACGAAAGGCTCAGTCGAAAGACTGGGCCTTTCGTTTTATCTGTTGTTTGTCGGTGAACGCTCTCCTGAGTAGGACAAATCCGCCCTCTAGCAGCCCGGGCTGC"
# from
# https://benchling.com/roygoli/f/lib_mvm3FehI-biofoundry/seq_cttcEI6n-copy-of-biofoundry-copy-of-e-coli-iram-annotated/edit
royaTestInsertSeq = "GCTAAAGTTGGATACTTAAGAAATGCTTCATAATTCAGTAAGGCATTAGCATAATGGAAATAAAAGTGCAGAGACTATCTCTATGGATGATTAATACTGTCTTTTTATTGTCACCCATAAATAATCACCAGACTAATACTATCAACTTGATATTTGAAATGTGATCACTTGACTTTTGATACGTTATTTTATAACGGTTAACATATTTATAAAAACAACGGCCGTGCCACACGTCCGTTTCAATACTTAACGCACATGTATTTTGGTTTAGTCATCATCCGGTTATATGTATTTTAGCCAGGAACAGGTTAAATCATTCCTATATAACTCAAAAATTGAAACCTTATTCTCATGTCATGCTTATATTCATTATTATCGTTATATAAAAAGGCAACCATAATGTTTAGCAAATTGGCACAAAGTAGCATAAAGGCTATGTTTTAATTACAGGATGTTCAGTCATTTGAATGTATAACATTATAGCTAAACAAATCTAAAACGAAGTCAATAATTTATTGCTTTCACAAAATCTCATTTTGTTTAACATCCATTGAGATTCCTTGCTTTAAATTTTATTTTATATAAGCCATCATTTTAATTAATTTATTTTTTTGAGGGGGGGGTAATATACTCATATGCAAAATCAAGAAATAAACATCCTAATGAACCATATTAAATACCGTGGGATAAGACATAACAA"
royaTestInsertSeq1 = 'TCACTTGACTTTTGATACGTTATTTTATAACGGTTAACATATTTATAAAAACAACGGCCGTGCCACACGTCCGTTTCAATACTTAACGCACATGTATTTTGGTTTAGTCATCATCCGGTTATATGTATTTTAGCCAGGAACAGGTTAAATCATTCCTATATAACTCAAAAATTGAAACCTTATTCTCATGTCATGCTTATATTCATTATTATCGTTATATAAAAAGGCAACCATAATGTTTAGCAAATTGGCACAAAGTAGCATAAAGGCTATGTTTTAATTACAGGATGTTCAGTCATTTGAATGTATAACATTATAGCTAAACAAATCTAAAACGAAGTCAATAATTTATTGCTTTCACAAAATCTCATTTTGTTTAACATCCATTGAGATTCCTTGCTTTAAATTTTATTTTATATAAGCCATCATTTTAATTAATTTATTTTTTTGAGGGGGGGGTAATATACTCATATGCAAAATCAAGAAATAAACATCCTAATGAACCATATTAAATACCGTGGGATAAGACATAACAA'
richardTestPrimerForward = "CCCGTTCTAGATTTAAGAAGGAGA"
richardTestPrimerReverse = "GTCATTACCCCAGGCGTTTA"
primer3pySeq = 'GCTTGCATGCCTGCAGGTCGACTCTAGAGGATCCCCCTACATTTTAGCATCAGTGAGTACAGCATGCTTACTGGAAGAGAGGGTCATGCAACAGATTAGGAGGTAAGTTTGCAAAGGCAGGCTAAGGAGGAGACGCACTGAATGCCATGGTAAGAACTCTGGACATAAAAATATTGGAAGTTGTTGAGCAAGTNAAAAAAATGTTTGGAAGTGTTACTTTAGCAATGGCAAGAATGATAGTATGGAATAGATTGGCAGAATGAAGGCAAAATGATTAGACATATTGCATTAAGGTAAAAAATGATAACTGAAGAATTATGTGCCACACTTATTAATAAGAAAGAATATGTGAACCTTGCAGATGTTTCCCTCTAGTAG'
vectorPlasmid1AddressGB = 'biofoundry-copy-of-pdms123.gb'
vectorPlasmid1AddressFA = 'biofoundry-copy-of-pdms123.fasta'
insertPlasmid1AddressGB = 'biofoundry-copy-of-e-coli-iram-annotated.gb'
insertPlasmid1AddressFA = 'biofoundry-copy-of-e-coli-iram-annotated.fasta'
vectorPlasmidSeq1 = 'TTCGAGCTCGGTACCGGATCCGTCGACCTGCAGCCAAGCTTAATTAGCTGAGCTTGGACTCCTGTTGATAGATCCAGTAATGACCTCAGAACTCCATCTGGATTTGTTCAGAACGCTCGGTTGCCGCCGGGCGTTTTTTATTGGTGAGAATCCAAGCTAGCTTGGCGAGATTTTCAGGAGCTAAGGAAGCTAAAATGGAGAAAAAAATCACTGGATATACCACCGTTGATATATCCCAATGGCATCGTAAAGAACATTTTGAGGCATTTCAGTCAGTTGCTCAATGTACCTATAACCAGACCGTTCAGCTGGATATTACGGCCTTTTTAAAGACCGTAAAGAAAAATAAGCACAAGTTTTATCCGGCCTTTATTCACATTCTTGCCCGCCTGATGAATGCTCATCCGGAATTTCGTATGGCAATGAAAGACGGTGAGCTGGTGATATGGGATAGTGTTCACCCTTGTTACACCGTTTTCCATGAGCAAACTGAAACGTTTTCATCGCTCTGGAGTGAATACCACGACGATTTCCGGCAGTTTCTACACATATATTCGCAAGATGTGGCGTGTTACGGTGAAAACCTGGCCTATTTCCCTAAAGGGTTTATTGAGAATATGTTTTTCGTCTCAGCCAATCCCTGGGTGAGTTTCACCAGTTTTGATTTAAACGTGGCCAATATGGACAACTTCTTCGCCCCCGTTTTCACCATGGGCAAATATTATACGCAAGGCGACAAGGTGCTGATGCCGCTGGCGATTCAGGTTCATCATGCCGTTTGTGATGGCTTCCATGTCGGCAGAATGCTTAATGAATTACAACAGTACTGCGATGAGTGGCAGGGCGGGGCGTAATTTTTTTAAGGCAGTTATTGGTGCCCTTAAACGCCTGGGGTAATGACTCTCTAGCTTGAGGCATCAAATAAAACGAAAGGCTCAGTCGAAAGACTGGGCCTTTCGTTTTATCTGTTGTTTGTCGGTGAACGCTCTCCTGAGTAGGACAAATCCGCCCTCTAGCAGCCCGGGCTGCggccgcTATTTCTCCTTTCGCGCAGTACGTGGTTCGCGGCTTAATCCTGCTGGCAGCGGTGATCTTCGACCGTTACAAGCAAAAAGCGAAACGCACTGTCTGATGCTTTTTTCTGCAACAATTTAGCGTTTTTTCCCACCATAGCCAACCGCCATAACGGTTGGCTGTTCTTCGTTGCAAATGGCGACCCCCGTCACACTGTCTATACTTACATGTCTGTAAAGCGCGTTCTGCGCAACACAATAAGAAAAGAGAAGGAGGAGAACCGGgtgACAGAACCGTTAACCGAAACCCCTGAACTATCCGCGAAATATGCCTGGTTTTTTGATCTTGATGGAACGCTGGCGGAAATCAAACCGCATCCCGATCAGGTCGTCGTGCCTGACAATATTCTGCAAGGACTACAGCTACTGGCAACCGCAAGTGATGGTGCATTGGCATTGATATCAGGGCGCTCAATGGTGGAGCTTGACGCACTGGCAAAACCTTATCGCTTCCCGTtCTAGATTTAAGAAGGAGATATACATATGAGTAAAGGAGAAGAACTTTTCACTGGAGTTGTCCCAATTCTTGTTGAATTAGATGGTGATGTTAATGGGCACAAATTTTCTGTCAGTGGAGAGGGTGAAGGTGATGCTACATACGGAAAGCTTACCCTTAAATTTATTTGCACTACTGGAAAACTACCTGTTCCATGGCCAACACTTGTCACTACTTTGACCTATGGTGTTCAATGCTTTTCCCGTTATCCGGATCATATGAAACGGCATGACTTTTTCAAGAGTGCCATGCCCGAAGGTTATGTACAGGAACGCACTATATCTTTCAAAGATGACGGGAACTACAAGACGCGTGCTGAAGTCAAGTTTGAAGGTGATACCCTTGTTAATCGTATCGAGTTAAAAGGTATTGATTTTAAAGAAGATGGAAACATTCTCGGACACAAACTCGAGTACAACTATAACTCACACAATGTATACATCACGGCAGACAAACAAAAGAATGGAATCAAAGCTAACTTCAAAATTCGCCACAACATTGAAGATGGATCCGTTCAACTAGCAGACCATTATCAACAAAATACTCCAATTGGCGATGGCCCTGTCCTTTTACCAGACAACCATTACCTGTCGACACAATCTGCCCTTTCGAAAGATCCCAACGAAAAGCGTGACCACATGGTCCTTCTTGAGTTTGTAACTGCTGCTGGGATTACACATGGCATGGATGAGCTCTACAAATAATGAATTCCAGCTGAGCGCCGGTCGCTACCATTACCAGTTGGTCTGGTGTCAAAAATAATAATAACCGGGCAGGCCATGTCTGCCCGTATTTCGCGTAAGGAAATCCATTATGTACTATTTAATTCTTGAAGACGAAAGGGCCTCGTGATACGCCTATTTTTATAGGTTAATGTCATGATAATAATGGTTTCTTAGACGTCAGGTGGCGATATCGGGCTAGCCGGCCCGACGCACTTTGCGCCGAATAAATACCTGTGACGGAAGATCACTTCGCAGAATAAATAAATCCTGGTGTCCCTGTTGATACCGGGAAGCCCTGGGCCAACTTTTGGCGAAAATGAGACGTTGATCGGCACGTAAGAGGTTCCAACTTTCACCATAATGAAATAAGATCACTACCGGGCGTATTTTTTGAGTTATCGAGATTTTCAGGAGCTAAGGAAGCTAAAATGGAGAAAAAAATCACTGGATATACCACCGTTGATATATCCCAATGGCATCGTAAAGAACATTTTGAGGCATTTCAGTCAGTTGCTCAATGTACCTATAACCAGACCGTTCAGCTGGATATTACGGCCTTTTTAAAGACCGTAAAGAAAAATAAGCACAAGTTTTATCCGGCCTTTATTCACATTCTTGCCCGCCTGATGAATGCTCATCCGGAATTCCGTATGGCAATGAAAGACGGTGAGCTGGTGATATGGGATAGTGTTCACCCTTGTTACACCGTTTTCCATGAGCAAACTGAAACGTTTTCATCGCTCTGGAGTGAATACCACGACGATTTCCGGCAGTTTCTACACATATATTCGCAAGATGTGGCGTGTTACGGTGAAAACCTGGCCTATTTCCCTAAAGGGTTTATTGAGAATATGTTTTTCGTCTCAGCCAATCCCTGGGTGAGTTTCACCAGTTTTGATTTAAACGTGGCCAATATGGACAACTTCTTCGCCCCCGTTTTCACCATGGGCAAATATTATACGCAAGGCGACAAGGTGCTGATGCCGCTGGCGATTCAGGTTCATCATGCCGTCTGTGATGGCTTCCATGTCGGCAGAATGCTTAATGAATTACAACAGTACTGCGATGAGTGGCAGGGCGGGGCGTAATTTTTTTAAGGCAGTTATTGGTGCCCTTAAACGCCTGGTGCTACGCCTGAATAAGTGATAATAAGCGGATGAATGGCAGAAATGACGGATATCGTCCATTCCGACAGCATCGCCAGTCACTATGGCGTGCTGCTAGCGCTTTTAGCCGCTTTAGCGGCCTTTCCCCCTACCCGAAGGGTGGGGGCGCGTGTGCAGCCCCGCAGGGCCTGTCTCGGTCGATCATTCAGCCCGGCTCATCCTTCTGGCGTGGCGGCAGACCGAACAAGGCGCGGTCGTGGTCGCGTTCAAGGTACGCATCCATTGCCGCCATGAGCCGATCCTCCGGCCACTCGCTGCTGTTCACCTTGGCCAAAATCATGGCCCCCACCAGCACCTTGCGCCTTGTTTCGTTCTTGCGCTCTTGCTGCTGTTCCCTTGCCCGCACCCGCTGAATTTCGGCATTGATTCGCGCTCGTTGTTCTTCGAGCTTGGCCAGCCGATCCGCCGCCTTGTTGCTCCCCTTAACCATCTTGACACCCCATTGTTAATGTGCTGTCTCGTAGGCTATCATGGAGGCACAGCGGCGGCAATCCCGACCCTACTTTGTAGGGGAGGGCGCACTTACCGGTTTCTCTTCGAGAAACTGGCCTAACGGCCACCCTTCGGGCGGTGCGCTCTCCGAGGGCCATTGCATGGAGCCGAAAAGCAAAAGCAACAGCGAGGCAGCATGGCGATTTATCACCTTACGGCGAAAACCGGCAGCAGGTCGGGCGGCCAATCGGCCAGGGCCAAGGCCGACTACATCCAGCGCGAAGGCAAGTATGCCCGCGACATGGATGAAGTCTTGCACGCCGAATCCGGGCACATGCCGGAGTTCGTCGAGCGGCCCGCCGACTACTGGGATGCTGCCGACCTGTATGAACGCGCCAATGGGCGGCTGTTCAAGGAGGTCGAATTTGCCCTGCCGGTCGAGCTGACCCTCGACCAGCAGAAGGCGCTGGCGTCCGAGTTCGCCCAGCACCTGACCGGTGCCGAGCGCCTGCCGTATACGCTGGCCATCCATGCCGGTGGCGGCGAGAACCCGCACTGCCACCTGATGATCTCCGAGCGGATCAATGACGGCATCGAGCGGCCCGCCGCTCAGTGGTTCAAGCGGTACAACGGCAAGACCCCGGAGAAGGGCGGGGCACAGAAGACCGAAGCGCTCAAGCCCAAGGCATGGCTTGAGCAGACCCGCGAGGCATGGGCCGACCATGCCAACCGGGCATTAGAGCGGGCTGGCCACGACGCCCGCATTGACCACAGAACACTTGAGGCGCAGGGCATCGAGCGCCTGCCCGGTGTTCACCTGGGGCCGAACGTGGTGGAGATGGAAGGCCGGGGCATCCGCACCGACCGGGCAGACGTGGCCCTGAACATCGACACCGCCAACGCCCAGATCATCGACTTACAGGAATACCGGGAGGCAATAGACCATGAACGCAATCGACAGAGTGAAGAAATCCAGAGGCATCAACGAGTTAGCGGAGCAGATCGAACCGCTGGCCCAGAGCATGGCGACACTGGCCGACGAAGCCCGGCAGGTCATGAGCCAGACCCAGCAGGCCAGCGAGGCGCAGGCGGCGGAGTGGCTGAAAGCCCAGCGCCAGACAGGGGCGGCATGGGTGGAGCTGGCCAAAGAGTTGCGGGAGGTAGCCGCCGAGGTGAGCAGCGCCGCGCAGAGCGCCCGGAGCGCGTCGCGGGGGTGGCACTGGAAGCTATGGCTAACCGTGATGCTGGCTTCCATGATGCCTACGGTGGTGCTGCTGATCGCATCGTTGCTCTTGCTCGACCTGACGCCACTGACAACCGAGGACGGCTCGATCTGGCTGCGCTTGGTGGCCCGATGAAGAACGACAGGACTTTGCAGGCCATAGGCCGACAGCTCAAGGCCATGGGCTGTGAGCGCTCTTCCGCTTCCTCGCTCACTGACTCGCTGCGCTCGGTCGTTCGGCTGCGGCGAGCGGTATCAGCTCACTCAAAGGCGGTAATACGGTTATCCACAGAATCAGGGGATAACGCAGGAAAGAACATGTGAGCAAAAGGCCAGCAAAAGGCCAGGAACCGTAAAAAGGCCGCGTTGCTGGCGTTTTTCCATAGGCTCCGCCCCCCTGACGAGCATCACAAAAATCGACGCTCAAGTCAGAGGTGGCGAAACCCGACAGGACTATAAAGATACCAGGCGTTTCCCCCTGGAAGCTCCCTCGTGCGCTCTCCTGTTCCGACCCTGCCGCTTACCGGATACCTGTCCGCCTTTCTCCCTTCGGGAAGCGTGGCGCTTTCTCATAGCTCACGCTGTAGGTATCTCAGTTCGGTGTAGGTCGTTCGCTCCAAGCTGGGCTGTGTGCACGAACCCCCCGTTCAGCCCGACCGCTGCGCCTTATCCGGTAACTATCGTCTTGAGTCCAACCCGGTAAGACACGACTTATCGCCACTGGCAGCAGCCACTGGTAACAGGATTAGCAGAGCGAGGTATGTAGGCGGTGCTACAGAGTTCTTGAAGTGGTGGCCTAACTACGGCTACACTAGAAGGACAGTATTTGGTATCTGCGCTCTGCTGAAGCCAGTTACCTTCGGAAAAAGAGTTGGTAGCTCTTGATCCGGCAAACAAACCACCGCTGGTAGCGGTGGTTTTTTTGTTTGCAAGCAGCAGATTACGCGCAGAAAAAAAGGATCTCAAGAAGATCCTTTGATCTTTTCTACGGGGTCTGACGCTCAGTGGAACGAAAACTCACGTTAAGGGATTTTGGTCATGAGATTATCAAAAAGGATCTTCACCTAGATCCTTTTAAATTAAAAATGAAGTTTTAAATCAATCTAAAGTATATATGAGTAAACTTGGTCTGACAGTTACCAATGCTTAATCAGTGAGGCACCTATCTCAGCGATCTGTCTATTTCGTTCATCCATAGTTGCCTGACTCCCCGTCGTGTAGATAACTACGATACGGGAGGGCTTACCATCTGGCCCCAGTGCTGCAATGATACCGCGAGACCCACGCTCACCGGCTCCAGATTTATCAGCAATAAACCAGCCAGCCGGAAGGGCCGAGCGCAGAAGTGGTCCTGCAACTTTATCCGCCTCCATCCAGTCTATTAATTGTTGCCGGGAAGCTAGAGTAAGTAGTTCGCCAGTTAATAGTTTGCGCAACGTTGTTGCCATTGCTGCAGGCATCGTGGTGTCACGCTCGTCGTTTGGTATGGCTTCATTCAGCTCCGGTTCCCAACGATCAAGGCGAGTTACATGATCCCCCATGTTGTGCAAAAAAGCGGTTAGCTCCTTCGGTCCTCCGATCGTTGTCAGAAGTAAGTTGGCCGCAGTGTTATCACTCATGGTTATGGCAGCACTGCATAATTCTCTTACTGTCATGCCATCCGTAAGATGCTTTTCTGTGACTGGTGAGTACTCAACCAAGTCATTCTGAGAATAGTGTATGCGGCGACCGAGTTGCTCTTGCCCGGCGTCAACACGGGATAATACCGCGCCACATAGCAGAACTTTAAAAGTGCTCATCATTGGAAAACGTTCTTCGGGGCGAAAACTCTCAAGGATCTTACCGCTGTTGAGATCCAGTTCGATGTAACCCACTCGTGCACCCAACTGATCTTCAGCATCTTTTACTTTCACCAGCGTTTCTGGGTGAGCAAAAACAGGAAGGCAAAATGCCGCAAAAAAGGGAATAAGGGCGACACGGAAATGTTGAATACTCATACTCTTCCTTTTTCAATATTATTGAAGCATTTATCAGGGTTATTGTCTCATGAGCGGATACATATTTGAATGTATTTAGAAAAATAAACAAATAGGGGTTCCGCGCACATTTCCCCGAAAAGTGCCACCTGACGTCTAAGAAACCATTATTATCATGACATTAACCTATAAAAATAGGCGTATCACGAGGCCCTTTCGTCTTCAAGAA'
insertPlasmidSeq1 = 'GCTAAAGTTGGATACTTAAGAAATGCTTCATAATTCAGTAAGGCATTAGCATAATGGAAATAAAAGTGCAGAGACTATCTCTATGGATGATTAATACTGTCTTTTTATTGTCACCCATAAATAATCACCAGACTAATACTATCAACTTGATATTTGAAATGTGATCACTTGACTTTTGATACGTTATTTTATAACGGTTAACATATTTATAAAAACAACGGCCGTGCCACACGTCCGTTTCAATACTTAACGCACATGTATTTTGGTTTAGTCATCATCCGGTTATATGTATTTTAGCCAGGAACAGGTTAAATCATTCCTATATAACTCAAAAATTGAAACCTTATTCTCATGTCATGCTTATATTCATTATTATCGTTATATAAAAAGGCAACCATAATGTTTAGCAAATTGGCACAAAGTAGCATAAAGGCTATGTTTTAATTACAGGATGTTCAGTCATTTGAATGTATAACATTATAGCTAAACAAATCTAAAACGAAGTCAATAATTTATTGCTTTCACAAAATCTCATTTTGTTTAACATCCATTGAGATTCCTTGCTTTAAATTTTATTTTATATAAGCCATCATTTTAATTAATTTATTTTTTTGAGGGGGGGGTAATATACTCATATGCAAAATCAAGAAATAAACATCCTAATGAACCATATTAAATACCGTGGGATAAGACATAACAAatgAAGTGGATAGTAATTGACACGGTAATTCAACCTACATGTGGTATATCTTTTTCAGCCATATGGGGTAATATGAAAATGATCATCTGGTATCAATCTACTATATTTCTCCCTCCTGGCAGTATATTTACACCGGTTAAGTCTGGTATTATCCTTAAGGATAAAGAATATCCTATTACTATTTATCACATCGCACCATTCAACAAGGATTTATGGAGTTTACTCAAAAGCAGTCAAGAGTGTCCTCCAGGAGAAAGCAAAATAACAAATAAATGTTTACATAATAGTTGCATTATAAAAATATGCCCATATGGGCTCAAGtaa'
vectorSeq1 = 'CTAGATTTAAGAAGGAGATATACATATGAGTAAAGGAGAAGAACTTTTCACTGGAGTTGTCCCAATTCTTGTTGAATTAGATGGTGATGTTAATGGGCACAAATTTTCTGTCAGTGGAGAGGGTGAAGGTGATGCTACATACGGAAAGCTTACCCTTAAATTTATTTGCACTACTGGAAAACTACCTGTTCCATGGCCAACACTTGTCACTACTTTGACCTATGGTGTTCAATGCTTTTCCCGTTATCCGGATCATATGAAACGGCATGACTTTTTCAAGAGTGCCATGCCCGAAGGTTATGTACAGGAACGCACTATATCTTTCAAAGATGACGGGAACTACAAGACGCGTGCTGAAGTCAAGTTTGAAGGTGATACCCTTGTTAATCGTATCGAGTTAAAAGGTATTGATTTTAAAGAAGATGGAAACATTCTCGGACACAAACTCGAGTACAACTATAACTCACACAATGTATACATCACGGCAGACAAACAAAAGAATGGAATCAAAGCTAACTTCAAAATTCGCCACAACATTGAAGATGGATCCGTTCAACTAGCAGACCATTATCAACAAAATACTCCAATTGGCGATGGCCCTGTCCTTTTACCAGACAACCATTACCTGTCGACACAATCTGCCCTTTCGAAAGATCCCAACGAAAAGCGTGACCACATGGTCCTTCTTGAGTTTGTAACTGCTGCTGGGATTACACATGGCATGGATGAGCTCTACAAATAATGAATTCCAGCTGAGCGCCGGTCGCTACCATTACCAGTTGGTCTGGTGTCAAAAATAATAATAACCGGGCAGGCCATGTCTGCCCGTATTTCGCGTAAGGAAATCCATTATGTACTATTTAATTCTTGAAGACGAAAGGGCCTCGTGATACGCCTATTTTTATAGGTTAATGTCATGATAATAATGGTTTCTTAGACGTCAGGTGGCGATATCGGGCTAGCCGGCCCGACGCACTTTGCGCCGAATAAATACCTGTGACGGAAGATCACTTCGCAGAATAAATAAATCCTGGTGTCCCTGTTGATACCGGGAAGCCCTGGGCCAACTTTTGGCGAAAATGAGACGTTGATCGGCACGTAAGAGGTTCCAACTTTCACCATAATGAAATAAGATCACTACCGGGCGTATTTTTTGAGTTATCGAGATTTTCAGGAGCTAAGGAAGCTAAAATGGAGAAAAAAATCACTGGATATACCACCGTTGATATATCCCAATGGCATCGTAAAGAACATTTTGAGGCATTTCAGTCAGTTGCTCAATGTACCTATAACCAGACCGTTCAGCTGGATATTACGGCCTTTTTAAAGACCGTAAAGAAAAATAAGCACAAGTTTTATCCGGCCTTTATTCACATTCTTGCCCGCCTGATGAATGCTCATCCGGAATTCCGTATGGCAATGAAAGACGGTGAGCTGGTGATATGGGATAGTGTTCACCCTTGTTACACCGTTTTCCATGAGCAAACTGAAACGTTTTCATCGCTCTGGAGTGAATACCACGACGATTTCCGGCAGTTTCTACACATATATTCGCAAGATGTGGCGTGTTACGGTGAAAACCTGGCCTATTTCCCTAAAGGGTTTATTGAGAATATGTTTTTCGTCTCAGCCAATCCCTGGGTGAGTTTCACCAGTTTTGATTTAAACGTGGCCAATATGGACAACTTCTTCGCCCCCGTTTTCACCATGGGCAAATATTATACGCAAGGCGACAAGGTGCTGATGCCGCTGGCGATTCAGGTTCATCATGCCGTCTGTGATGGCTTCCATGTCGGCAGAATGCTTAATGAATTACAACAGTACTGCGATGAGTGGCAGGGCGGGGCGTAATTTTTTTAAGGCAGTTATTGGTGCCCTTAAACGCCTGGTGCTACGCCTGAATAAGTGATAATAAGCGGATGAATGGCAGAAATGACGGATATCGTCCATTCCGACAGCATCGCCAGTCACTATGGCGTGCTGCTAGCGCTTTTAGCCGCTTTAGCGGCCTTTCCCCCTACCCGAAGGGTGGGGGCGCGTGTGCAGCCCCGCAGGGCCTGTCTCGGTCGATCATTCAGCCCGGCTCATCCTTCTGGCGTGGCGGCAGACCGAACAAGGCGCGGTCGTGGTCGCGTTCAAGGTACGCATCCATTGCCGCCATGAGCCGATCCTCCGGCCACTCGCTGCTGTTCACCTTGGCCAAAATCATGGCCCCCACCAGCACCTTGCGCCTTGTTTCGTTCTTGCGCTCTTGCTGCTGTTCCCTTGCCCGCACCCGCTGAATTTCGGCATTGATTCGCGCTCGTTGTTCTTCGAGCTTGGCCAGCCGATCCGCCGCCTTGTTGCTCCCCTTAACCATCTTGACACCCCATTGTTAATGTGCTGTCTCGTAGGCTATCATGGAGGCACAGCGGCGGCAATCCCGACCCTACTTTGTAGGGGAGGGCGCACTTACCGGTTTCTCTTCGAGAAACTGGCCTAACGGCCACCCTTCGGGCGGTGCGCTCTCCGAGGGCCATTGCATGGAGCCGAAAAGCAAAAGCAACAGCGAGGCAGCATGGCGATTTATCACCTTACGGCGAAAACCGGCAGCAGGTCGGGCGGCCAATCGGCCAGGGCCAAGGCCGACTACATCCAGCGCGAAGGCAAGTATGCCCGCGACATGGATGAAGTCTTGCACGCCGAATCCGGGCACATGCCGGAGTTCGTCGAGCGGCCCGCCGACTACTGGGATGCTGCCGACCTGTATGAACGCGCCAATGGGCGGCTGTTCAAGGAGGTCGAATTTGCCCTGCCGGTCGAGCTGACCCTCGACCAGCAGAAGGCGCTGGCGTCCGAGTTCGCCCAGCACCTGACCGGTGCCGAGCGCCTGCCGTATACGCTGGCCATCCATGCCGGTGGCGGCGAGAACCCGCACTGCCACCTGATGATCTCCGAGCGGATCAATGACGGCATCGAGCGGCCCGCCGCTCAGTGGTTCAAGCGGTACAACGGCAAGACCCCGGAGAAGGGCGGGGCACAGAAGACCGAAGCGCTCAAGCCCAAGGCATGGCTTGAGCAGACCCGCGAGGCATGGGCCGACCATGCCAACCGGGCATTAGAGCGGGCTGGCCACGACGCCCGCATTGACCACAGAACACTTGAGGCGCAGGGCATCGAGCGCCTGCCCGGTGTTCACCTGGGGCCGAACGTGGTGGAGATGGAAGGCCGGGGCATCCGCACCGACCGGGCAGACGTGGCCCTGAACATCGACACCGCCAACGCCCAGATCATCGACTTACAGGAATACCGGGAGGCAATAGACCATGAACGCAATCGACAGAGTGAAGAAATCCAGAGGCATCAACGAGTTAGCGGAGCAGATCGAACCGCTGGCCCAGAGCATGGCGACACTGGCCGACGAAGCCCGGCAGGTCATGAGCCAGACCCAGCAGGCCAGCGAGGCGCAGGCGGCGGAGTGGCTGAAAGCCCAGCGCCAGACAGGGGCGGCATGGGTGGAGCTGGCCAAAGAGTTGCGGGAGGTAGCCGCCGAGGTGAGCAGCGCCGCGCAGAGCGCCCGGAGCGCGTCGCGGGGGTGGCACTGGAAGCTATGGCTAACCGTGATGCTGGCTTCCATGATGCCTACGGTGGTGCTGCTGATCGCATCGTTGCTCTTGCTCGACCTGACGCCACTGACAACCGAGGACGGCTCGATCTGGCTGCGCTTGGTGGCCCGATGAAGAACGACAGGACTTTGCAGGCCATAGGCCGACAGCTCAAGGCCATGGGCTGTGAGCGCTCTTCCGCTTCCTCGCTCACTGACTCGCTGCGCTCGGTCGTTCGGCTGCGGCGAGCGGTATCAGCTCACTCAAAGGCGGTAATACGGTTATCCACAGAATCAGGGGATAACGCAGGAAAGAACATGTGAGCAAAAGGCCAGCAAAAGGCCAGGAACCGTAAAAAGGCCGCGTTGCTGGCGTTTTTCCATAGGCTCCGCCCCCCTGACGAGCATCACAAAAATCGACGCTCAAGTCAGAGGTGGCGAAACCCGACAGGACTATAAAGATACCAGGCGTTTCCCCCTGGAAGCTCCCTCGTGCGCTCTCCTGTTCCGACCCTGCCGCTTACCGGATACCTGTCCGCCTTTCTCCCTTCGGGAAGCGTGGCGCTTTCTCATAGCTCACGCTGTAGGTATCTCAGTTCGGTGTAGGTCGTTCGCTCCAAGCTGGGCTGTGTGCACGAACCCCCCGTTCAGCCCGACCGCTGCGCCTTATCCGGTAACTATCGTCTTGAGTCCAACCCGGTAAGACACGACTTATCGCCACTGGCAGCAGCCACTGGTAACAGGATTAGCAGAGCGAGGTATGTAGGCGGTGCTACAGAGTTCTTGAAGTGGTGGCCTAACTACGGCTACACTAGAAGGACAGTATTTGGTATCTGCGCTCTGCTGAAGCCAGTTACCTTCGGAAAAAGAGTTGGTAGCTCTTGATCCGGCAAACAAACCACCGCTGGTAGCGGTGGTTTTTTTGTTTGCAAGCAGCAGATTACGCGCAGAAAAAAAGGATCTCAAGAAGATCCTTTGATCTTTTCTACGGGGTCTGACGCTCAGTGGAACGAAAACTCACGTTAAGGGATTTTGGTCATGAGATTATCAAAAAGGATCTTCACCTAGATCCTTTTAAATTAAAAATGAAGTTTTAAATCAATCTAAAGTATATATGAGTAAACTTGGTCTGACAGTTACCAATGCTTAATCAGTGAGGCACCTATCTCAGCGATCTGTCTATTTCGTTCATCCATAGTTGCCTGACTCCCCGTCGTGTAGATAACTACGATACGGGAGGGCTTACCATCTGGCCCCAGTGCTGCAATGATACCGCGAGACCCACGCTCACCGGCTCCAGATTTATCAGCAATAAACCAGCCAGCCGGAAGGGCCGAGCGCAGAAGTGGTCCTGCAACTTTATCCGCCTCCATCCAGTCTATTAATTGTTGCCGGGAAGCTAGAGTAAGTAGTTCGCCAGTTAATAGTTTGCGCAACGTTGTTGCCATTGCTGCAGGCATCGTGGTGTCACGCTCGTCGTTTGGTATGGCTTCATTCAGCTCCGGTTCCCAACGATCAAGGCGAGTTACATGATCCCCCATGTTGTGCAAAAAAGCGGTTAGCTCCTTCGGTCCTCCGATCGTTGTCAGAAGTAAGTTGGCCGCAGTGTTATCACTCATGGTTATGGCAGCACTGCATAATTCTCTTACTGTCATGCCATCCGTAAGATGCTTTTCTGTGACTGGTGAGTACTCAACCAAGTCATTCTGAGAATAGTGTATGCGGCGACCGAGTTGCTCTTGCCCGGCGTCAACACGGGATAATACCGCGCCACATAGCAGAACTTTAAAAGTGCTCATCATTGGAAAACGTTCTTCGGGGCGAAAACTCTCAAGGATCTTACCGCTGTTGAGATCCAGTTCGATGTAACCCACTCGTGCACCCAACTGATCTTCAGCATCTTTTACTTTCACCAGCGTTTCTGGGTGAGCAAAAACAGGAAGGCAAAATGCCGCAAAAAAGGGAATAAGGGCGACACGGAAATGTTGAATACTCATACTCTTCCTTTTTCAATATTATTGAAGCATTTATCAGGGTTATTGTCTCATGAGCGGATACATATTTGAATGTATTTAGAAAAATAAACAAATAGGGGTTCCGCGCACATTTCCCCGAAAAGTGCCACCTGACGTCTAAGAAACCATTATTATCATGACATTAACCTATAAAAATAGGCGTATCACGAGGCCCTTTCGTCTTCAAGAATTCGAGCTCGGTACCGGATCCGTCGACCTGCAGCCAAGCTTAATTAGCTGAGCTTGGACTCCTGTTGATAGATCCAGTAATGACCTCAGAACTCCATCTGGATTTGTTCAGAACGCTCGGTTGCCGCCGGGCGTTTTTTATTGGTGAGAATCCAAGCTAGCTTGGCGAGATTTTCAGGAGCTAAGGAAGCTAAAATGGAGAAAAAAATCACTGGATATACCACCGTTGATATATCCCAATGGCATCGTAAAGAACATTTTGAGGCATTTCAGTCAGTTGCTCAATGTACCTATAACCAGACCGTTCAGCTGGATATTACGGCCTTTTTAAAGACCGTAAAGAAAAATAAGCACAAGTTTTATCCGGCCTTTATTCACATTCTTGCCCGCCTGATGAATGCTCATCCGGAATTTCGTATGGCAATGAAAGACGGTGAGCTGGTGATATGGGATAGTGTTCACCCTTGTTACACCGTTTTCCATGAGCAAACTGAAACGTTTTCATCGCTCTGGAGTGAATACCACGACGATTTCCGGCAGTTTCTACACATATATTCGCAAGATGTGGCGTGTTACGGTGAAAACCTGGCCTATTTCCCTAAAGGGTTTATTGAGAATATGTTTTTCGTCTCAGCCAATCCCTGGGTGAGTTTCACCAGTTTTGATTTAAACGTGGCCAATATGGACAACTTCTTCGCCCCCGTTTTCACCATGGGCAAATATTATACGCAAGGCGACAAGGTGCTGATGCCGCTGGCGATTCAGGTTCATCATGCCGTTTGTGATGGCTTCCATGTCGGCAGAATGCTTAATGAATTACAACAGTACTGCGATGAGTGGCAGGGCGGGGCGTAATTTTTTTAAGGCAGTTATTGGTGCCCTTAAACGCCTGGGGTAATGACTCTCTAGCTTGAGGCATCAAATAAAACGAAAGGCTCAGTCGAAAGACTGGGCCTTTCGTTTTATCTGTTGTTTGTCGGTGAACGCTCTCCTGAGTAGGACAAATCCGCCCTCTAGCAGCCCGGGCTGC'
insertSeq1 = 'TCACTTGACTTTTGATACGTTATTTTATAACGGTTAACATATTTATAAAAACAACGGCCGTGCCACACGTCCGTTTCAATACTTAACGCACATGTATTTTGGTTTAGTCATCATCCGGTTATATGTATTTTAGCCAGGAACAGGTTAAATCATTCCTATATAACTCAAAAATTGAAACCTTATTCTCATGTCATGCTTATATTCATTATTATCGTTATATAAAAAGGCAACCATAATGTTTAGCAAATTGGCACAAAGTAGCATAAAGGCTATGTTTTAATTACAGGATGTTCAGTCATTTGAATGTATAACATTATAGCTAAACAAATCTAAAACGAAGTCAATAATTTATTGCTTTCACAAAATCTCATTTTGTTTAACATCCATTGAGATTCCTTGCTTTAAATTTTATTTTATATAAGCCATCATTTTAATTAATTTATTTTTTTGAGGGGGGGGTAATATACTCATATGCAAAATCAAGAAATAAACATCCTAATGAACCATATTAAATACCGTGGGATAAGACATAACAA'
vectorSeq1X = 'CTAGATTTAAGAAGGAGATATACATATGAGTAAAGGAGAAGAACTTTTCACTGGAGTTGTCCCAATTCTTGTTGAATTAGATGGTGATGTTAATGGGCACAAATTTTCTGTCAGTG'
insertSeq1X = 'TCACTTGACTTTTGATACGTTATTTTATAACGGTTAACATATTTATAAAAACAACGGCCGTGCCACACGTCCGTTTCAATACTTAACGCACATGTATTTTGGTTTAGTCATCATCCGGTTATATGTATTTTAGCCAGGAACAGGTTAAATCATTCCTATATAACTCAAAAATTGAAACCTTATTCTCATGTCATGCTTATATTCATTATTATCGTTATATAAAAAGGCAACCATAATGTTTAGCAAATTGGCACAAAGTAGCATAAAGG'
testOutput1 = 'ggccgcTATTTCTCCTTTCGCGCAGTACGTGGTTCGCGGCTTAATCCTGCTGGCAGCGGTGATCTTCGACCGTTACAAGCAAAAAGCGAAACGCACTGTCTGATGCTTTTTTCTGCAACAATTTAGCGTTTTTTCCCACCATAGCCAACCGCCATAACGGTTGGCTGTTCTTCGTTGCAAATGGCGACCCCCGTCACACTGTCTATACTTACATGTCTGTAAAGCGCGTTCTGCGCAACACAATAAGAAAACTAGATTTAAGAAGGAGATATACATATGAGTAAAGGAGAAGAACTTTTCACTGGAGTTGTCCCAATTCTTGTTGAATTAGATGGTGATGTTAATGGGCACAAATTTTCTGTCAGTGGAGAGGGTGAAGGTGATGCTACATACGGAAAGCTTACCCTTAAATTTATTTGCACTACTGGAAAACTACCTGTTCCATGGCCAACACTTGTCACTACTTTGACCTATGGTGTTCAATGCTTTTCCCGTTATCCGGATCATATGAAACGGCATGACTTTTTCAAGAGTGCCATGCCCGAAGGTTATGTACAGGAACGCACTATATCTTTCAAAGATGACGGGAACTACAAGACGCGTGCTGAAGTCAAGTTTGAAGGTGATACCCTTGTTAATCGTATCGAGTTAAAAGGTATTGATTTTAAAGAAGATGGAAACATTCTCGGACACAAACTCGAGTACAACTATAACTCACACAATGTATACATCACGGCAGACAAACAAAAGAATGGAATCAAAGCTAACTTCAAAATTCGCCACAACATTGAAGATGGATCCGTTCAACTAGCAGACCATTATCAACAAAATACTCCAATTGGCGATGGCCCTGTCCTTTTACCAGACAACCATTACCTGTCGACACAATCTGCCCTTTCGAAAGATCCCAACGAAAAGCGTGACCACATGGTCCTTCTTGAGTTTGTAACTGCTGCTGGGATTACACATGGCATGGATGAGCTCTACAAATAATGAATTCCAGCTGAGCGCCGGTCGCTACCATTACCAGTTGGTCTGGTGTCAAAAATAATAATAACCGGGCAGGCCATGTCTGCCCGTATTTCGCGTAAGGAAATCCATTATGTACTATTTAATTCTTGAAGACGAAAGGGCCTCGTGATACGCCTATTTTTATAGGTTAATGTCATGATAATAATGGTTTCTTAGACGTCAGGTGGCGATATCGGGCTAGCCGGCCCGACGCACTTTGCGCCGAATAAATACCTGTGACGGAAGATCACTTCGCAGAATAAATAAATCCTGGTGTCCCTGTTGATACCGGGAAGCCCTGGGCCAACTTTTGGCGAAAATGAGACGTTGATCGGCACGTAAGAGGTTCCAACTTTCACCATAATGAAATAAGATCACTACCGGGCGTATTTTTTGAGTTATCGAGATTTTCAGGAGCTAAGGAAGCTAAAATGGAGAAAAAAATCACTGGATATACCACCGTTGATATATCCCAATGGCATCGTAAAGAACATTTTGAGGCATTTCAGTCAGTTGCTCAATGTACCTATAACCAGACCGTTCAGCTGGATATTACGGCCTTTTTAAAGACCGTAAAGAAAAATAAGCACAAGTTTTATCCGGCCTTTATTCACATTCTTGCCCGCCTGATGAATGCTCATCCGGAATTCCGTATGGCAATGAAAGACGGTGAGCTGGTGATATGGGATAGTGTTCACCCTTGTTACACCGTTTTCCATGAGCAAACTGAAACGTTTTCATCGCTCTGGAGTGAATACCACGACGATTTCCGGCAGTTTCTACACATATATTCGCAAGATGTGGCGTGTTACGGTGAAAACCTGGCCTATTTCCCTAAAGGGTTTATTGAGAATATGTTTTTCGTCTCAGCCAATCCCTGGGTGAGTTTCACCAGTTTTGATTTAAACGTGGCCAATATGGACAACTTCTTCGCCCCCGTTTTCACCATGGGCAAATATTATACGCAAGGCGACAAGGTGCTGATGCCGCTGGCGATTCAGGTTCATCATGCCGTCTGTGATGGCTTCCATGTCGGCAGAATGCTTAATGAATTACAACAGTACTGCGATGAGTGGCAGGGCGGGGCGTAATTTTTTTAAGGCAGTTATTGGTGCCCTTAAACGCCTGGTGCTACGCCTGAATAAGTGATAATAAGCGGATGAATGGCAGAAATGACGGATATCGTCCATTCCGACAGCATCGCCAGTCACTATGGCGTGCTGCTAGCGCTTTTAGCCGCTTTAGCGGCCTTTCCCCCTACCCGAAGGGTGGGGGCGCGTGTGCAGCCCCGCAGGGCCTGTCTCGGTCGATCATTCAGCCCGGCTCATCCTTCTGGCGTGGCGGCAGACCGAACAAGGCGCGGTCGTGGTCGCGTTCAAGGTACGCATCCATTGCCGCCATGAGCCGATCCTCCGGCCACTCGCTGCTGTTCACCTTGGCCAAAATCATGGCCCCCACCAGCACCTTGCGCCTTGTTTCGTTCTTGCGCTCTTGCTGCTGTTCCCTTGCCCGCACCCGCTGAATTTCGGCATTGATTCGCGCTCGTTGTTCTTCGAGCTTGGCCAGCCGATCCGCCGCCTTGTTGCTCCCCTTAACCATCTTGACACCCCATTGTTAATGTGCTGTCTCGTAGGCTATCATGGAGGCACAGCGGCGGCAATCCCGACCCTACTTTGTAGGGGAGGGCGCACTTACCGGTTTCTCTTCGAGAAACTGGCCTAACGGCCACCCTTCGGGCGGTGCGCTCTCCGAGGGCCATTGCATGGAGCCGAAAAGCAAAAGCAACAGCGAGGCAGCATGGCGATTTATCACCTTACGGCGAAAACCGGCAGCAGGTCGGGCGGCCAATCGGCCAGGGCCAAGGCCGACTACATCCAGCGCGAAGGCAAGTATGCCCGCGACATGGATGAAGTCTTGCACGCCGAATCCGGGCACATGCCGGAGTTCGTCGAGCGGCCCGCCGACTACTGGGATGCTGCCGACCTGTATGAACGCGCCAATGGGCGGCTGTTCAAGGAGGTCGAATTTGCCCTGCCGGTCGAGCTGACCCTCGACCAGCAGAAGGCGCTGGCGTCCGAGTTCGCCCAGCACCTGACCGGTGCCGAGCGCCTGCCGTATACGCTGGCCATCCATGCCGGTGGCGGCGAGAACCCGCACTGCCACCTGATGATCTCCGAGCGGATCAATGACGGCATCGAGCGGCCCGCCGCTCAGTGGTTCAAGCGGTACAACGGCAAGACCCCGGAGAAGGGCGGGGCACAGAAGACCGAAGCGCTCAAGCCCAAGGCATGGCTTGAGCAGACCCGCGAGGCATGGGCCGACCATGCCAACCGGGCATTAGAGCGGGCTGGCCACGACGCCCGCATTGACCACAGAACACTTGAGGCGCAGGGCATCGAGCGCCTGCCCGGTGTTCACCTGGGGCCGAACGTGGTGGAGATGGAAGGCCGGGGCATCCGCACCGACCGGGCAGACGTGGCCCTGAACATCGACACCGCCAACGCCCAGATCATCGACTTACAGGAATACCGGGAGGCAATAGACCATGAACGCAATCGACAGAGTGAAGAAATCCAGAGGCATCAACGAGTTAGCGGAGCAGATCGAACCGCTGGCCCAGAGCATGGCGACACTGGCCGACGAAGCCCGGCAGGTCATGAGCCAGACCCAGCAGGCCAGCGAGGCGCAGGCGGCGGAGTGGCTGAAAGCCCAGCGCCAGACAGGGGCGGCATGGGTGGAGCTGGCCAAAGAGTTGCGGGAGGTAGCCGCCGAGGTGAGCAGCGCCGCGCAGAGCGCCCGGAGCGCGTCGCGGGGGTGGCACTGGAAGCTATGGCTAACCGTGATGCTGGCTTCCATGATGCCTACGGTGGTGCTGCTGATCGCATCGTTGCTCTTGCTCGACCTGACGCCACTGACAACCGAGGACGGCTCGATCTGGCTGCGCTTGGTGGCCCGATGAAGAACGACAGGACTTTGCAGGCCATAGGCCGACAGCTCAAGGCCATGGGCTGTGAGCGCTCTTCCGCTTCCTCGCTCACTGACTCGCTGCGCTCGGTCGTTCGGCTGCGGCGAGCGGTATCAGCTCACTCAAAGGCGGTAATACGGTTATCCACAGAATCAGGGGATAACGCAGGAAAGAACATGTGAGCAAAAGGCCAGCAAAAGGCCAGGAACCGTAAAAAGGCCGCGTTGCTGGCGTTTTTCCATAGGCTCCGCCCCCCTGACGAGCATCACAAAAATCGACGCTCAAGTCAGAGGTGGCGAAACCCGACAGGACTATAAAGATACCAGGCGTTTCCCCCTGGAAGCTCCCTCGTGCGCTCTCCTGTTCCGACCCTGCCGCTTACCGGATACCTGTCCGCCTTTCTCCCTTCGGGAAGCGTGGCGCTTTCTCATAGCTCACGCTGTAGGTATCTCAGTTCGGTGTAGGTCGTTCGCTCCAAGCTGGGCTGTGTGCACGAACCCCCCGTTCAGCCCGACCGCTGCGCCTTATCCGGTAACTATCGTCTTGAGTCCAACCCGGTAAGACACGACTTATCGCCACTGGCAGCAGCCACTGGTAACAGGATTAGCAGAGCGAGGTATGTAGGCGGTGCTACAGAGTTCTTGAAGTGGTGGCCTAACTACGGCTACACTAGAAGGACAGTATTTGGTATCTGCGCTCTGCTGAAGCCAGTTACCTTCGGAAAAAGAGTTGGTAGCTCTTGATCCGGCAAACAAACCACCGCTGGTAGCGGTGGTTTTTTTGTTTGCAAGCAGCAGATTACGCGCAGAAAAAAAGGATCTCAAGAAGATCCTTTGATCTTTTCTACGGGGTCTGACGCTCAGTGGAACGAAAACTCACGTTAAGGGATTTTGGTCATGAGATTATCAAAAAGGATCTTCACCTAGATCCTTTTAAATTAAAAATGAAGTTTTAAATCAATCTAAAGTATATATGAGTAAACTTGGTCTGACAGTTACCAATGCTTAATCAGTGAGGCACCTATCTCAGCGATCTGTCTATTTCGTTCATCCATAGTTGCCTGACTCCCCGTCGTGTAGATAACTACGATACGGGAGGGCTTACCATCTGGCCCCAGTGCTGCAATGATACCGCGAGACCCACGCTCACCGGCTCCAGATTTATCAGCAATAAACCAGCCAGCCGGAAGGGCCGAGCGCAGAAGTGGTCCTGCAACTTTATCCGCCTCCATCCAGTCTATTAATTGTTGCCGGGAAGCTAGAGTAAGTAGTTCGCCAGTTAATAGTTTGCGCAACGTTGTTGCCATTGCTGCAGGCATCGTGGTGTCACGCTCGTCGTTTGGTATGGCTTCATTCAGCTCCGGTTCCCAACGATCAAGGCGAGTTACATGATCCCCCATGTTGTGCAAAAAAGCGGTTAGCTCCTTCGGTCCTCCGATCGTTGTCAGAAGTAAGTTGGCCGCAGTGTTATCACTCATGGTTATGGCAGCACTGCATAATTCTCTTACTGTCATGCCATCCGTAAGATGCTTTTCTGTGACTGGTGAGTACTCAACCAAGTCATTCTGAGAATAGTGTATGCGGCGACCGAGTTGCTCTTGCCCGGCGTCAACACGGGATAATACCGCGCCACATAGCAGAACTTTAAAAGTGCTCATCATTGGAAAACGTTCTTCGGGGCGAAAACTCTCAAGGATCTTACCGCTGTTGAGATCCAGTTCGATGTAACCCACTCGTGCACCCAACTGATCTTCAGCATCTTTTACTTTCACCAGCGTTTCTGGGTGAGCAAAAACAGGAAGGCAAAATGCCGCAAAAAAGGGAATAAGGGCGACACGGAAATGTTGAATACTCATACTCTTCCTTTTTCAATATTATTGAAGCATTTATCAGGGTTATTGTCTCATGAGCGGATACATATTTGAATGTATTTAGAAAAATAAACAAATAGGGGTTCCGCGCACATTTCCCCGAAAAGTGCCACCTGACGTCTAAGAAACCATTATTATCATGACATTAACCTATAAAAATAGGCGTATCACGAGGCCCTTTCGTCTTCAAGAATTCGAGCTCGGTACCGGATCCGTCGACCTGCAGCCAAGCTTAATTAGCTGAGCTTGGACTCCTGTTGATAGATCCAGTAATGACCTCAGAACTCCATCTGGATTTGTTCAGAACGCTCGGTTGCCGCCGGGCGTTTTTTATTGGTGAGAATCCAAGCTAGCTTGGCGAGATTTTCAGGAGCTAAGGAAGCTAAAATGGAGAAAAAAATCACTGGATATACCACCGTTGATATATCCCAATGGCATCGTAAAGAACATTTTGAGGCATTTCAGTCAGTTGCTCAATGTACCTATAACCAGACCGTTCAGCTGGATATTACGGCCTTTTTAAAGACCGTAAAGAAAAATAAGCACAAGTTTTATCCGGCCTTTATTCACATTCTTGCCCGCCTGATGAATGCTCATCCGGAATTTCGTATGGCAATGAAAGACGGTGAGCTGGTGATATGGGATAGTGTTCACCCTTGTTACACCGTTTTCCATGAGCAAACTGAAACGTTTTCATCGCTCTGGAGTGAATACCACGACGATTTCCGGCAGTTTCTACACATATATTCGCAAGATGTGGCGTGTTACGGTGAAAACCTGGCCTATTTCCCTAAAGGGTTTATTGAGAATATGTTTTTCGTCTCAGCCAATCCCTGGGTGAGTTTCACCAGTTTTGATTTAAACGTGGCCAATATGGACAACTTCTTCGCCCCCGTTTTCACCATGGGCAAATATTATACGCAAGGCGACAAGGTGCTGATGCCGCTGGCGATTCAGGTTCATCATGCCGTTTGTGATGGCTTCCATGTCGGCAGAATGCTTAATGAATTACAACAGTACTGCGATGAGTGGCAGGGCGGGGCGTAATTTTTTTAAGGCAGTTATTGGTGCCCTTAAACGCCTGGGGTAATGACTCTCTAGCTTGAGGCATCAAATAAAACGAAAGGCTCAGTCGAAAGACTGGGCCTTTCGTTTTATCTGTTGTTTGTCGGTGAACGCTCTCCTGAGTAGGACAAATCCGCCCTCTAGCAGCCCGGGCTGCGAGAAGGAGGAGAACCGGgtgACAGAACCGTTAACCGAAACCCCTGAACTATCCGCGAAATATGCCTGGTTTTTTGATCTTGATGGAACGCTGGCGGAAATCAAACCGCATCCCGATCAGGTCGTCGTGCCTGACAATATTCTGCAAGGACTACAGCTACTGGCAACCGCAAGTGATGGTGCATTGGCATTGATATCAGGGCGCTCAATGGTGGAGCTTGACGCACTGGCAAAACCTTATCGCTTCCCGTt'
###################
### PARAMETERS ###
###################
# (copied from primer3 with subtle changes according to our first primer designed)
SEQUENCE_ID = 'MH1000'
PRIMER_OPT_TM = 59.0
PRIMER_MIN_TM = 50.0
PRIMER_MAX_TM = 70.0
PRIMER_PRODUCT_SIZE_RANGE = [[100, 300], [150, 250], [301, 400], [
401, 500], [501, 600], [601, 700], [701, 850], [851, 1000]]
MAX_TEMP_DIFF = 7.0
PRIMER_MIN_SIZE = 18
# dictionary of delta H and delta S values for pairs of sequences,
enthalpyEntropyValuesSequencePairs = {
'AA':(-7.9, -22.2),
'AA':(-7.9,-22.2),
'AT':( -7.2, -20.4),
'TA':(-7.2, -21.3),
'CA':(-8.5, -22.7),
'GT':(-8.4, -22.4),
'CT':(-7.8, -21.0),
'GA':(-8.2, -22.2),
'CG':(-10.6, -27.2),
'GC':(-9.8, -24.4),
'GG':(-8.0, -19.9),
'TT':(-7.9, -22.2),
'CC':(-8.0, -19.9),
'CA':(-8.5, -22.7),
'TG':(-8.5, -22.7),
'AC':(-8.4, -22.4),
'AG':(-7.8, -21.0),
'TC':(-8.2, -22.2),
}
###################
### WEBSCRAPING ###
###################
# CHANGE
def primerDictToNEBPrimerSeq(primerDict):
"""turn a primer dict from primer3 in fastCloningPrimer to the NEB readdable format"""
NEBPrimerString = ""
for primerPairName, primerPairInfo in primerDict.items():
currentLPrimerName = str(primerPairName) + "Left"
currentLPrimerSeq = primerPairInfo[0][2]
currentRPrimerName = str(primerPairName) + "Right"
currentRPrimerSeq = primerPairInfo[1][2]
NEBPrimerString += currentLPrimerName + "; " + currentLPrimerSeq + \
"; " + currentRPrimerName + "; " + currentRPrimerSeq + "\n"
return NEBPrimerString
def NEBWebscraper(primersSeq, phusionprimerOptTm):
"""Use NEB to check the melting temperature and annealing temperature of all primers"""
# open the tm calculator headlessly
options = webdriver.chrome.options.Options()
# options.headless = True
cwd = os.getcwd() + '/chromedriver'
driver = webdriver.Chrome(options=options, executable_path=cwd)
driver.get("https://tmcalculator.neb.com/#!/batch")
time.sleep(1)
# set the enzyme to phusion
driver.find_element_by_xpath(
"/html/body/div[3]/div[2]/div/div/div/div[2]/div[1]/form/div/div[1]/div/select[1]").send_keys("P\n")
time.sleep(1)
# set the primer input
driver.find_element_by_id("batchinput").send_keys(
primersSeq)
# set the primer concentration
driver.find_element_by_id("ct").clear()
driver.find_element_by_id("ct").send_keys(100)
# blur the focus to produce outputs
driver.execute_script("document.getElementById('batchinput').blur()")
# fetch the result table
rows = driver.find_elements_by_css_selector(
"table.batchresultstablex>tbody>tr")
table = [[col.get_attribute("innerHTML").splitlines(
) for col in row.find_elements_by_css_selector("td")] for row in rows]
# close the chrome driver
# turn into a dictionary for easier manipulation
NEBprimerDict = {}
farthestTempDist = 0
for primerIndex in range(len(table)):
if primerIndex % 2 == 0:
# left primer
Lprimer = table[primerIndex]
currentLPrimerName = Lprimer[0][0]
currentLPrimerSeq = Lprimer[1][0][1:]
currentLPrimerTm = Lprimer[2][0]
currentLPrimerTa = float(Lprimer[3][0])
# right primers
Rprimer = table[primerIndex+1]
currentRPrimerName = Rprimer[0][0]
currentRPrimerSeq = Rprimer[1][0][1:]
currentRPrimerTm = Rprimer[2][0]
currentRPrimerTa = float(Rprimer[3][0])
# primer pair name
primerPairName = currentLPrimerName[:-4]
phusionPrimerLowerBound = float(phusionprimerOptTm)-5
phusionPrimerUpperBound = float(phusionprimerOptTm)+5
if (currentLPrimerTa >= phusionPrimerLowerBound) and (currentLPrimerTa <= phusionPrimerUpperBound):
if (currentRPrimerTa >= phusionPrimerLowerBound) and (currentRPrimerTa <= phusionPrimerUpperBound):
currentfarthestTempDist = max(abs(
currentLPrimerTa-phusionprimerOptTm), abs(currentRPrimerTa-phusionprimerOptTm))
NEBprimerDict.update(
{primerPairName: [['left', currentLPrimerTa, currentLPrimerSeq], ['right', currentRPrimerTa, currentRPrimerSeq]]})
if farthestTempDist < currentfarthestTempDist:
farthestTempDist = currentfarthestTempDist
time.sleep(5)
driver.close()
return NEBprimerDict, farthestTempDist
###########################
### SEQUENCE PROCESSING ###
###########################
def fileParsing(vectorPlasmidAddress, insertPlasmidAddress):
"""Take in two addresses, one for vector plasmid and one for insert plasmid,
turn into biopython seq objects"""
if vectorPlasmidAddress[-5:] == 'fasta':
vectorPlasmidSeq = SeqIO.read(vectorPlasmidAddress, "fasta").seq
insertPlasmidSeq = SeqIO.read(insertPlasmidAddress, "fasta").seq
return vectorPlasmidSeq, insertPlasmidSeq
elif (vectorPlasmidAddress[-3:] == '.gb') or (vectorPlasmidAddress[-3:] == 'gbk'):
vectorPlasmidSeq = SeqIO.read(vectorPlasmidAddress, "genbank").seq
insertPlasmidSeq = SeqIO.read(insertPlasmidAddress, "genbank").seq
return vectorPlasmidSeq, insertPlasmidSeq
else:
sys.exit('Unsupported file format.')
return
def pseudoCircularizePlasmid(plasmidSeq, goalSeq):
"""Reorder (pseudo-circularize) a plasmid sequence so that it is essentially
still the same plasmid but contains the complete goalSeq. Note that there are two
scenarios:
(1) plasmidSeq = vectorPlasmidSeq; goalSeq = insertPlasmidSeq
(2) plasmidSeq = vectorSeq; goalSeq = insertSeq
We assume that the non-vector section will be longer than 2*17=34 bases.
The first output will be a pseudo-circularized DNA sequence which is essentially the same as
the input plasmidSeq, but will be prepared to be put into primer3. We also output the
starting and ending indexes of the goalSeq in the pseudo-circularized DNA sequence.
"""
# 1. get two segments of goalSeq separated by lineared plasmid seq
finalPart1 = ''
finalPart2 = ''
for index in range(len(goalSeq)):
currentPart1 = goalSeq[0:index]
currentPart2 = goalSeq[index:]
if (currentPart1 in plasmidSeq) and (currentPart2 in plasmidSeq):
finalPart1 = currentPart1
finalPart2 = currentPart2
break
# 2. get the indexes of the two parts in the plasmid seq
part1StartInPlasmid = plasmidSeq.find(finalPart1)
part1EndInPlasmid = part1StartInPlasmid + len(finalPart1)
part2StartInPlasmid = plasmidSeq.find(finalPart2)
part2EndInPlasmid = part2StartInPlasmid + len(finalPart2)
# 3. generate pseudo-circularized plasmid
# 3.1 part 1 is at the end of the plasmid sequence
if part1EndInPlasmid == len(plasmidSeq):
nonVectorSegment = plasmidSeq[part2EndInPlasmid:part1StartInPlasmid]
arbitraryMiddleIndex = len(nonVectorSegment)//2
output = nonVectorSegment[arbitraryMiddleIndex:] + finalPart1 + \
finalPart2 + nonVectorSegment[:arbitraryMiddleIndex]
# 3.2 part 2 is at the end of the plasmid sequence
elif part2EndInPlasmid == len(plasmidSeq):
nonVectorSegment = plasmidSeq[part1EndInPlasmid:part2StartInPlasmid]
arbitraryMiddleIndex = len(nonVectorSegment)//2
output = nonVectorSegment[:arbitraryMiddleIndex] + finalPart2 + \
finalPart1 + nonVectorSegment[arbitraryMiddleIndex:]
# 3.3 the plasmid sequence already contains the complete goalSeq
else:
output = plasmidSeq
# figure out the starting and ending indexes of goalSeq in the output sequence
outputStart = output.find(goalSeq)
outputEnd = outputStart + len(goalSeq)
return output, outputStart, outputEnd
def primer3ShortCut(seq, goalStart, goalEnd, primerOptTm=PRIMER_OPT_TM, primerMinTm=PRIMER_MIN_TM, primerMaxTm=PRIMER_MAX_TM, primerMinSize=PRIMER_MIN_SIZE):
"""Take in three outputs of pseudoCircularizePlasmid, call primer3 to create primers,
with parameters if needed"""
goalLen = goalEnd - goalStart
usedLen = goalLen
if 100 < goalLen:
usedLen = 100
LsequenceMap = {
'SEQUENCE_ID': SEQUENCE_ID,
'SEQUENCE_TEMPLATE': seq,
'SEQUENCE_TARGET': [goalStart, usedLen]
}
LparamMap = {
'PRIMER_OPT_TM': primerOptTm,
'PRIMER_MIN_TM': primerMinTm,
'PRIMER_MAX_TM': primerMaxTm,
'PRIMER_MIN_SIZE': primerMinSize,
}
RsequenceMap = {
'SEQUENCE_ID': SEQUENCE_ID,
'SEQUENCE_TEMPLATE': seq,
'SEQUENCE_TARGET': [goalEnd-usedLen, usedLen]
}
RparamMap = {
'PRIMER_OPT_TM': primerOptTm,
'PRIMER_MIN_TM': primerMinTm,
'PRIMER_MAX_TM': primerMaxTm,
'PRIMER_MIN_SIZE': primerMinSize,
}
return primer3.bindings.designPrimers(LsequenceMap, LparamMap), primer3.bindings.designPrimers(RsequenceMap, RparamMap)
def plasmidPrimerDesign(plasmidSeq, goalSeq, primerOptTm=PRIMER_OPT_TM, primerMinSize=PRIMER_MIN_SIZE):
"""Uses the primer3-py api to find the primer info for isolating the current
goalSeq from the plasmidSeq"""
preppedPlasmidSeq, goalSeqStart, goalSeqEnd = pseudoCircularizePlasmid(
plasmidSeq, goalSeq)
leftPrimerInfo, rightPrimerInfo = primer3ShortCut(
preppedPlasmidSeq, goalSeqStart, goalSeqEnd, primerOptTm, primerMinSize)
return leftPrimerInfo, rightPrimerInfo
def cleanPrimerInfo(leftPrimerInfo, rightPrimerInfo):
"""read primerInfo, the output of the previous function, and turn it into a more
readable and analyzable data structure"""
primerPairDict = {}
leftPrimerL = []
rightPrimerL = []
for key in leftPrimerInfo:
if key[-8:] == 'SEQUENCE' and key[:11] == 'PRIMER_LEFT':
currentSequence = leftPrimerInfo[key]
primerNum = key[12]
if int(primerNum) <= 2:
primerTM = leftPrimerInfo[key[:13]+'_TM']
leftPrimerL.append(
['leftPrimer'+str(primerNum), primerTM, currentSequence])
for key in rightPrimerInfo:
if key[-8:] == 'SEQUENCE' and key[:12] == 'PRIMER_RIGHT':
currentSequence = rightPrimerInfo[key]
primerNum = key[13]
if int(primerNum) <= 2:
primerTM = rightPrimerInfo[key[:14]+'_TM']
rightPrimerL.append(
['rightPrimer'+str(primerNum), primerTM, currentSequence])
# update resultant dict
primerPairNum = 0
for leftPrimer in leftPrimerL:
for rightPrimer in rightPrimerL:
primerPairNum += 1
primerPairKey = "primerPair" + str(primerPairNum)
leftPrimerCopy = copy.deepcopy(leftPrimer)
leftPrimerCopy[0] = 'leftPrimer' + str(primerPairNum)
rightPrimerCopy = copy.deepcopy(rightPrimer)
rightPrimerCopy[0] = 'rightPrimer' + str(primerPairNum)
primerPairDict.update(
{primerPairKey: [leftPrimerCopy, rightPrimerCopy]})
return primerPairDict
def primer3Only(plasmidSeq, goalSeq, primerOptTm=PRIMER_OPT_TM, primerMinSize=PRIMER_MIN_SIZE):
"""A quick wrapper for non-fastCloning specific primer design"""
leftPrimerInfo, rightPrimerInfo = plasmidPrimerDesign(
plasmidSeq, goalSeq, primerOptTm, primerMinSize)
print(' _________\n / \\\n | /\\ /\\ |\n | - |\n | \\___/ |\n \\_________/')
print('PROCESSING')
print('author: Tom Fu, Richard Chang; HMC BioMakerspace')
return cleanPrimerInfo(leftPrimerInfo, rightPrimerInfo)
def tempDiffRestrict(primerInfo, maxTempDiff=MAX_TEMP_DIFF):
"""Checks the differnce in annealing temperatures between two primers.
Difference should not be greater than 5 degrees."""
for key in primerInfo.copy():
if abs(primerInfo[key][0][1] - primerInfo[key][1][1]) > maxTempDiff:
del primerInfo[key]
return primerInfo
def TaqvectorPrimerDesign(vectorPlasmidSeq, vectorSeq, maxTempDiff=MAX_TEMP_DIFF, primerOptTm=PRIMER_OPT_TM, primerMinSize=PRIMER_MIN_SIZE):
"""Find the primers isolating vectorSeq from vectorPlasmidSeq; meanwhile
getting two overhang sequences that need to be attached to the insert primer
pairs"""
cleanedPrimerInfo = primer3Only(
vectorPlasmidSeq, vectorSeq, primerOptTm, primerMinSize)
rightTempPrimerInfo = tempDiffRestrict(cleanedPrimerInfo, maxTempDiff)
for key, val in rightTempPrimerInfo.copy().items():
currentLeftPrimer = val[0][2]
currentRightPrimer = val[1][2]
if (len(currentLeftPrimer) >= 18) and (len(currentRightPrimer) >= 18):
leftOverHang = currentLeftPrimer[:16]
rightOverHang = currentRightPrimer[:16]
val[0].append(leftOverHang)
val[1].append(rightOverHang)
else:
sys.exit(
"The following primer pair is not long enough for FastCloning, thus removed", str(val))
return rightTempPrimerInfo
def TaqinsertPrimerDesign(rightTempVectorPrimerInfoWOverhang, insertPlasmidSeq, insertSeq, maxTempDiff=MAX_TEMP_DIFF, primerOptTm=PRIMER_OPT_TM, primerMinSize=PRIMER_MIN_SIZE):
"""Find the primers isolating insertSeq from insertPlasmidSeq; meanwhile attaching
the two overhang sequences to the insert primer pairs"""
cleanedInsertPrimerInfo = primer3Only(
insertPlasmidSeq, insertSeq, primerOptTm, primerMinSize)
rightTempInsertPrimerInfo = tempDiffRestrict(
cleanedInsertPrimerInfo, maxTempDiff)
outputDict = {}
outputL = []
primer4Num = 1
for vkey, currentVPrimerPair in rightTempVectorPrimerInfoWOverhang.items():
for ikey, currentIPrimerPair in rightTempInsertPrimerInfo.items():
# vector primers
vcurrentLSeq = currentVPrimerPair[0][2]
vcurrentLTemp = currentVPrimerPair[0][1]
vcurrentLOverhang = currentVPrimerPair[0][3]
vcurrentRSeq = currentVPrimerPair[1][2]
vcurrentRTemp = currentVPrimerPair[1][1]
vcurrentROverhang = currentVPrimerPair[1][3]
# insert primers
icurrentLSeq = currentIPrimerPair[0][2]
icurrentLTemp = currentIPrimerPair[0][1]
icurrentRSeq = currentIPrimerPair[1][2]
icurrentRTemp = currentIPrimerPair[1][1]
# attach the left overhang to right iprimers and vice versa
newiCurrentLSeq = vcurrentROverhang.lower() + icurrentLSeq
newiCurrentRSeq = vcurrentLOverhang.lower() + icurrentRSeq
# save current info
outputDict.update(
{('vectorLeftPrimer' + str(primer4Num)): [vcurrentLTemp, vcurrentLSeq]})
outputDict.update(
{('vectorRightPrimer' + str(primer4Num)): [vcurrentRTemp, vcurrentRSeq]})
outputDict.update(
{('insertLeftPrimer' + str(primer4Num)): [icurrentLTemp, newiCurrentLSeq]})
outputDict.update(
{('insertRightPrimer' + str(primer4Num)): [icurrentRTemp, newiCurrentRSeq]})
outputL.append(
[('vectorLeftPrimer' + str(primer4Num)), vcurrentLTemp, vcurrentLSeq])
outputL.append(
[('vectorRightPrimer' + str(primer4Num)), vcurrentRTemp, vcurrentRSeq])
outputL.append(
[('insertLeftPrimer' + str(primer4Num)), icurrentLTemp, newiCurrentLSeq])
outputL.append(
[('insertRightPrimer' + str(primer4Num)), icurrentRTemp, newiCurrentRSeq])
primer4Num += 1
return outputDict, outputL
def primerTemp(primerSeq, primerConcentration = 500e-9, saltConcentration = 50e-3, magnesiumConcentration = 0):
"""Calculates the annealing temperature of a primer using the NEB calculator formula
"""
temp = 0
seq = Seq(primerSeq)
dH = 0
dS = 0
symmetryFactor = 0
initial_Thermodynamic_Penalty = [0.2, -5.7]
symmetry_Thermodynamic_Penalty = [0, -1.4]
termial_AT_Thermodynamic_Penalty = [2.2, 6.9]
gasConstant = 1.9872
dH += initial_Thermodynamic_Penalty[0]
dS += initial_Thermodynamic_Penalty[1]
if primerSeq == seq.reverse_complement():
dH += symmetry_Thermodynamic_Penalty[0]
dS += symmetry_Thermodynamic_Penalty[1]
symmetryFactor = 1
else:
symmetryFactor = 4
if primerSeq[len(primerSeq)-1] == 'A' or primerSeq[len(primerSeq)-1] == 'T':
dH += termial_AT_Thermodynamic_Penalty[0]
dS += termial_AT_Thermodynamic_Penalty[1]
saltEffect = saltConcentration + (magnesiumConcentration * 140)
dS += (0.368 * (len(primerSeq)-1) * math.log10(saltEffect))
for i in range(len(primerSeq)-1):
dH += enthalpyEntropyValuesSequencePairs[primerSeq[i:i+2]][0]
dS += enthalpyEntropyValuesSequencePairs[primerSeq[i:i+2]][1]
temp = dH*1000/(dS + gasConstant*math.log(primerConcentration/symmetryFactor)) - 273.15
return temp
def vectorPrimerDesign(vectorPlasmidSeq, vectorSeq, maxTempDiff=MAX_TEMP_DIFF, primerOptTm=PRIMER_OPT_TM, primerMinSize=PRIMER_MIN_SIZE):
"""Find the primers isolating vectorSeq from vectorPlasmidSeq; meanwhile
getting two overhang sequences that need to be attached to the insert primer
pairs"""
currentLen = 0
rightTempPrimerInfo = {}
bestFarthestTempDist = float("inf")
# for value in range(-5, -3):
for value in range(-5, 5):
print("VECTOR")
print(value)
cleanedPrimerInfo = primer3Only(
vectorPlasmidSeq, vectorSeq, primerOptTm+value, primerMinSize)
temprightTempPrimerInfo = tempDiffRestrict(
cleanedPrimerInfo, maxTempDiff)
# check phusion for temperature
primerSeqNEB = primerDictToNEBPrimerSeq(
temprightTempPrimerInfo)
temprightTempPrimerInfo, currentfarthestTempDist = NEBWebscraper(
primerSeqNEB, primerOptTm)
if temprightTempPrimerInfo != {}:
if bestFarthestTempDist > currentfarthestTempDist or len(temprightTempPrimerInfo) > currentLen:
bestFarthestTempDist = currentfarthestTempDist
print(bestFarthestTempDist)
rightTempPrimerInfo = temprightTempPrimerInfo
currentLen = len(rightTempPrimerInfo)
print("UPDATE")
print(rightTempPrimerInfo)
# go on and find overhang
# rightTempPrimerInfoNoOverhang = copy.deepcopy(rightTempPrimerInfo)
rightTempPrimerInfoOverhang = rightTempPrimerInfo.copy()
for key, val in rightTempPrimerInfo.items():
currentLeftPrimer = val[0][2]
currentRightPrimer = val[1][2]
if (len(currentLeftPrimer) >= 18) and (len(currentRightPrimer) >= 18):
leftOverHang = currentLeftPrimer[:16]
rightOverHang = currentRightPrimer[:16]
val[0].append(leftOverHang)
val[1].append(rightOverHang)
else:
sys.exit(
"The following primer pair is not long enough for FastCloning, thus removed", str(val))
print(rightTempPrimerInfoOverhang)
return rightTempPrimerInfoOverhang
# elif enzyme == "phusion":
# return rightTempPrimerInfo
def insertPrimerDesign(rightTempVectorPrimerInfoWOverhang, insertPlasmidSeq, insertSeq, maxTempDiff=MAX_TEMP_DIFF, primerOptTm=PRIMER_OPT_TM, primerMinSize=PRIMER_MIN_SIZE):
"""Find the primers isolating insertSeq from insertPlasmidSeq; meanwhile attaching
the two overhang sequences to the insert primer pairs"""
currentLen = 0
rightTempInsertPrimerInfo = {}
bestFarthestTempDist = float("inf")
for value in range(-5, 5):
# for value in range(-5, 6):
print("INSERT")
print(value)
cleanedPrimerInfo = primer3Only(
insertPlasmidSeq, insertSeq, primerOptTm+value, primerMinSize)
temprightTempPrimerInfo = tempDiffRestrict(
cleanedPrimerInfo, maxTempDiff)
# check phusion for temperature
primerSeqNEB = primerDictToNEBPrimerSeq(
temprightTempPrimerInfo)
temprightTempPrimerInfo, currentfarthestTempDist = NEBWebscraper(
primerSeqNEB, primerOptTm)
if temprightTempPrimerInfo != {}:
if bestFarthestTempDist > currentfarthestTempDist or len(temprightTempPrimerInfo) > currentLen:
bestFarthestTempDist = currentfarthestTempDist
print(bestFarthestTempDist)
rightTempInsertPrimerInfo = temprightTempPrimerInfo
currentLen = len(rightTempInsertPrimerInfo)
print("UPDATE")
print(rightTempInsertPrimerInfo)
# go on
outputDict = {}
outputL = []
primer4Num = 1
for vkey, currentVPrimerPair in rightTempVectorPrimerInfoWOverhang.items():
for ikey, currentIPrimerPair in rightTempInsertPrimerInfo.items():
# vector primers
vcurrentLSeq = currentVPrimerPair[0][2]
vcurrentLTemp = currentVPrimerPair[0][1]
vcurrentLOverhang = currentVPrimerPair[0][3]
vcurrentRSeq = currentVPrimerPair[1][2]
vcurrentRTemp = currentVPrimerPair[1][1]
vcurrentROverhang = currentVPrimerPair[1][3]
# insert primers
icurrentLSeq = currentIPrimerPair[0][2]
icurrentLTemp = currentIPrimerPair[0][1]
icurrentRSeq = currentIPrimerPair[1][2]
icurrentRTemp = currentIPrimerPair[1][1]
# attach the left overhang to right iprimers and vice versa
newiCurrentLSeq = vcurrentROverhang.lower() + icurrentLSeq
newiCurrentRSeq = vcurrentLOverhang.lower() + icurrentRSeq
# save current info
outputDict.update(
{('vectorLeftPrimer' + str(primer4Num)): [vcurrentLTemp, vcurrentLSeq]})
outputDict.update(
{('vectorRightPrimer' + str(primer4Num)): [vcurrentRTemp, vcurrentRSeq]})
outputDict.update(
{('insertLeftPrimer' + str(primer4Num)): [icurrentLTemp, newiCurrentLSeq]})
outputDict.update(
{('insertRightPrimer' + str(primer4Num)): [icurrentRTemp, newiCurrentRSeq]})
outputL.append(
[('vectorLeftPrimer' + str(primer4Num)), vcurrentLTemp, vcurrentLSeq])
outputL.append(
[('vectorRightPrimer' + str(primer4Num)), vcurrentRTemp, vcurrentRSeq])
outputL.append(
[('insertLeftPrimer' + str(primer4Num)), icurrentLTemp, newiCurrentLSeq])
outputL.append(
[('insertRightPrimer' + str(primer4Num)), icurrentRTemp, newiCurrentRSeq])
primer4Num += 1
return outputDict, outputL
# WRAPPER FUNCTIONS
def plasmidPrimers(plasmidSeq, goalSeq, benchling=True, destinationAddress='plasmidPrimerInfo.csv', benchlingAddress='benchlingPlasmidPrimerInfo.csv', primerOptTm=PRIMER_OPT_TM, primerMinSize=PRIMER_MIN_SIZE, enzyme="Taq", maxTempDiff=MAX_TEMP_DIFF):
# Use NEB to check temp
if enzyme == "Taq":
primersDict = primer3Only(
plasmidSeq, goalSeq, primerOptTm, primerMinSize)
tempString = "meltingTemp (in degree C)"
elif enzyme == "phusion":
tempString = 'annealingTemp (in degree C)'
currentLen = 0
primersDict = {}
bestFarthestTempDist = float("inf")
for value in range(-5, 5):
cleanedPrimerInfo = primer3Only(
plasmidSeq, goalSeq, primerOptTm+value, primerMinSize)
temprightTempPrimerInfo = tempDiffRestrict(
cleanedPrimerInfo, maxTempDiff)
# check phusion for temperature
primerSeqNEB = primerDictToNEBPrimerSeq(
temprightTempPrimerInfo)
print(primerSeqNEB)
temprightTempPrimerInfo, currentfarthestTempDist = NEBWebscraper(
primerSeqNEB, primerOptTm)
print(currentfarthestTempDist)
if temprightTempPrimerInfo != {}:
if bestFarthestTempDist > currentfarthestTempDist or len(temprightTempPrimerInfo) > currentLen:
bestFarthestTempDist = currentfarthestTempDist
print(bestFarthestTempDist)
currentLen = len(temprightTempPrimerInfo)
primersDict = temprightTempPrimerInfo
print(temprightTempPrimerInfo)
print("FinalPrimersDict")
print(primersDict)
# go on
outputL = []
primerPairNum = 1
for key, currentPrimerPair in primersDict.items():
currentLeftPrimerSeq = currentPrimerPair[0][2]
currentLeftPrimerTemp = currentPrimerPair[0][1]
currentRightPrimerSeq = currentPrimerPair[1][2]
currentRightPrimerTemp = currentPrimerPair[1][1]
outputL.append([('leftPrimer' + str(primerPairNum)),
currentLeftPrimerTemp, currentLeftPrimerSeq])
outputL.append([('rightPrimer' + str(primerPairNum)),
currentRightPrimerTemp, currentRightPrimerSeq])
primerPairNum += 1
currentDF = pd.DataFrame(
outputL, columns=['primerInfo', tempString, 'sequence'])
currentDF.to_csv(destinationAddress)
print("Check out the following file for your primers:")
print(destinationAddress)
if benchling == True:
benchlingL = [[currentPrimer[0], currentPrimer[2]]
for currentPrimer in outputL]
benchlingDF = pd.DataFrame(
benchlingL)
benchlingDF.to_csv(benchlingAddress, index=False)
print("Your benchling-ready csv file is:")
print('benchling'+destinationAddress)
return
def plasmidPrimersFile(plasmidSeqFile, goalSeq, benchling=True, destinationAddress='plasmidPrimerInfo.csv', benchlingAddress='benchlingPlasmidPrimerInfo.csv', primerOptTm=PRIMER_OPT_TM, primerMinSize=PRIMER_MIN_SIZE, enzyme="Taq"):
if plasmidSeqFile[-5:] == 'fasta':
plasmidSeq = str(SeqIO.read(plasmidSeqFile, "fasta").seq)
elif (plasmidSeqFile[-3:] == '.gb') or (plasmidSeqFile[-3:] == 'gbk'):
plasmidSeq = str(SeqIO.read(plasmidSeqFile, "genbank").seq)
else:
sys.exit('Unsupported file format.')
return plasmidPrimers(plasmidSeq, goalSeq, benchling, destinationAddress, benchlingAddress, primerOptTm, primerMinSize, enzyme)
def fastCloningPrimers(vectorPlasmidSeq, insertPlasmidSeq, vectorSeq, insertSeq, maxTempDiff=MAX_TEMP_DIFF, destinationAddress='fastCloningPrimerInfo.csv', benchlingAddress='benchlingfastCloningPrimerInfo.csv', benchling=True, primerOptTm=PRIMER_OPT_TM, primerMinSize=PRIMER_MIN_SIZE, enzyme="phusion"):
"""Wrapper function that generates 2 primer pairs for the given circular
raw vector and insert sequences
Args:
vectorPlasmidSeq ([str]): vector plasmid
insertPlasmidSeq ([str]): insert plasmid
vectorSeq ([str]): vector sequence
insertSeq ([str]): insert sequence
"""
if enzyme == "phusion":
rightTempVectorPrimerInfoWOverhang = vectorPrimerDesign(
vectorPlasmidSeq, vectorSeq, maxTempDiff, primerOptTm, primerMinSize)
outputDict, outputL = insertPrimerDesign(
rightTempVectorPrimerInfoWOverhang, insertPlasmidSeq, insertSeq, maxTempDiff, primerOptTm, primerMinSize)
currentDF = pd.DataFrame(
outputL, columns=['primerInfo', 'annealingTemp (in degree C)', 'sequence'])
currentDF.to_csv(destinationAddress)
print("Check out the following file for your primers:")
print(destinationAddress)
if benchling == True:
benchlingL = [[currentPrimer[0], currentPrimer[2]]
for currentPrimer in outputL]
benchlingDF = pd.DataFrame(
benchlingL)
benchlingDF.to_csv(benchlingAddress, index=False)
print("Your benchling-ready csv file is:")
print(benchlingAddress)
elif enzyme == "Taq":
rightTempVectorPrimerInfoWOverhang = TaqvectorPrimerDesign(
vectorPlasmidSeq, vectorSeq, maxTempDiff, primerOptTm, primerMinSize)
outputDict, outputL = TaqinsertPrimerDesign(
rightTempVectorPrimerInfoWOverhang, insertPlasmidSeq, insertSeq, maxTempDiff, primerOptTm, primerMinSize)
currentDF = pd.DataFrame(
outputL, columns=['primerInfo', 'annealingTemp (in degree C)', 'sequence'])
currentDF.to_csv(destinationAddress)
print("Check out the following file for your primers:")
print(destinationAddress)
if benchling == True:
benchlingL = [[currentPrimer[0], currentPrimer[2]]
for currentPrimer in outputL]
benchlingDF = pd.DataFrame(
benchlingL)
benchlingDF.to_csv(benchlingAddress, index=False)
print("Your benchling-ready csv file is:")
print(benchlingAddress)
return
def fastCloningPrimersFile(vectorPlasmidAddress, insertPlasmidAddress, vectorSeq, insertSeq, maxTempDiff=MAX_TEMP_DIFF, destinationAddress='fastCloningPrimerInfo.csv', benchlingAddress='benchlingfastCloningPrimerInfo.csv', benchling=True, primerOptTm=PRIMER_OPT_TM, primerMinSize=PRIMER_MIN_SIZE, enzyme="phusion"):
"""Wrapper function that generates 2 primer pairs for the given circular
raw vector and insert sequences given fasta/gb files
Args:
vectorPlasmidAddress ([str]): address for vector plasmid
insertPlasmidAddress ([str]): address for insert plasmid
vectorSeq ([str]): vector sequence
insertSeq ([str]): insert sequence
"""
vectorPlasmidSeq, insertPlasmidSeq = fileParsing(
vectorPlasmidAddress, insertPlasmidAddress)
vectorPlasmidSeq = str(vectorPlasmidSeq)
insertPlasmidSeq = str(insertPlasmidSeq)
return fastCloningPrimers(vectorPlasmidSeq, insertPlasmidSeq, vectorSeq, insertSeq, maxTempDiff, destinationAddress, benchlingAddress, benchling, primerOptTm, primerMinSize, enzyme)
| 100.38674
| 7,342
| 0.852862
| 2,962
| 72,680
| 20.861242
| 0.206955
| 0.002767
| 0.00267
| 0.004272
| 0.167986
| 0.154747
| 0.149342
| 0.141509
| 0.139826
| 0.136654
| 0
| 0.008093
| 0.10066
| 72,680
| 723
| 7,343
| 100.525588
| 0.937244
| 0.075894
| 0
| 0.415385
| 0
| 0.003846
| 0.635265
| 0.610716
| 0
| 1
| 0
| 0.001383
| 0
| 1
| 0.034615
| false
| 0
| 0.021154
| 0
| 0.094231
| 0.061538
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.