hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
69eb0df38e04510307aabbf9eea284b81de21051
| 146
|
py
|
Python
|
julie/logic/tests.py
|
MarcelloBB/julieutils
|
cffba53a1561d05660c2274ce0a9485bf9e0ddcf
|
[
"MIT"
] | 2
|
2021-08-23T15:16:43.000Z
|
2021-11-01T15:29:02.000Z
|
julie/logic/tests.py
|
MarcelloBB/julieutils
|
cffba53a1561d05660c2274ce0a9485bf9e0ddcf
|
[
"MIT"
] | null | null | null |
julie/logic/tests.py
|
MarcelloBB/julieutils
|
cffba53a1561d05660c2274ce0a9485bf9e0ddcf
|
[
"MIT"
] | null | null | null |
import operators
def TEST_():
"""
[FUNC] TEST_:
Silly tests
"""
return operators.OR(True, False), operators.AND(True, False)
| 16.222222
| 64
| 0.609589
| 17
| 146
| 5.117647
| 0.705882
| 0.206897
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.246575
| 146
| 9
| 64
| 16.222222
| 0.790909
| 0.171233
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
387d39744fa0d596892a938a890a05e357a42997
| 93
|
py
|
Python
|
app/views.py
|
sanjeevkumar12/flask-app-apispecs
|
c3ab260e2dd533f647224337fcbab6e8e22dba5b
|
[
"MIT"
] | null | null | null |
app/views.py
|
sanjeevkumar12/flask-app-apispecs
|
c3ab260e2dd533f647224337fcbab6e8e22dba5b
|
[
"MIT"
] | null | null | null |
app/views.py
|
sanjeevkumar12/flask-app-apispecs
|
c3ab260e2dd533f647224337fcbab6e8e22dba5b
|
[
"MIT"
] | null | null | null |
from flask import render_template
def home_page():
return render_template("home.html")
| 15.5
| 39
| 0.763441
| 13
| 93
| 5.230769
| 0.769231
| 0.411765
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.150538
| 93
| 5
| 40
| 18.6
| 0.860759
| 0
| 0
| 0
| 0
| 0
| 0.096774
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 8
|
3893de1d57570c7acb2718b553bb0e182b74ea78
| 4,080
|
py
|
Python
|
a10sdk/core/so/so_counters.py
|
deepfield/a10sdk-python
|
bfaa58099f51f085d5e91652d1d1a3fd5c529d5d
|
[
"Apache-2.0"
] | 16
|
2015-05-20T07:26:30.000Z
|
2021-01-23T11:56:57.000Z
|
a10sdk/core/so/so_counters.py
|
deepfield/a10sdk-python
|
bfaa58099f51f085d5e91652d1d1a3fd5c529d5d
|
[
"Apache-2.0"
] | 6
|
2015-03-24T22:07:11.000Z
|
2017-03-28T21:31:18.000Z
|
a10sdk/core/so/so_counters.py
|
deepfield/a10sdk-python
|
bfaa58099f51f085d5e91652d1d1a3fd5c529d5d
|
[
"Apache-2.0"
] | 23
|
2015-03-29T15:43:01.000Z
|
2021-06-02T17:12:01.000Z
|
from a10sdk.common.A10BaseClass import A10BaseClass
class SamplingEnable(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param counters1: {"enum": ["all", "so_pkts_conn_in", "so_pkts_conn_redirect", "so_pkts_dropped", "so_pkts_errors", "so_pkts_in", "so_pkts_new_conn_in", "so_pkts_new_conn_redirect", "so_pkts_out", "so_pkts_redirect", "so_pkts_conn_sync_fail", "so_pkts_nat_reserve_fail", "so_pkts_nat_release_fail", "so_pkts_conn_l7_sync", "so_pkts_conn_l4_sync", "so_pkts_redirect_conn_aged_out"], "type": "string", "description": "'all': all; 'so_pkts_conn_in': Total packets processed for an established connection; 'so_pkts_conn_redirect': Total packets redirected for an established connection; 'so_pkts_dropped': Total packets dropped; 'so_pkts_errors': Total packet errors; 'so_pkts_in': Total packets in-coming; 'so_pkts_new_conn_in': Total packets processed for a new connection; 'so_pkts_new_conn_redirect': Total packets redirected for a new connection; 'so_pkts_out': Total packets sent out; 'so_pkts_redirect': Total packets redirected; 'so_pkts_conn_sync_fail': Total connection sync failures; 'so_pkts_nat_reserve_fail': Total NAT reserve failures; 'so_pkts_nat_release_fail': Total NAT release failures; 'so_pkts_conn_l7_sync': Total L7 connection syncs; 'so_pkts_conn_l4_sync': Total L4 connection syncs; 'so_pkts_redirect_conn_aged_out': Total redirect conns aged out; ", "format": "enum"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "sampling-enable"
self.DeviceProxy = ""
self.counters1 = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class SoCounters(A10BaseClass):
""" :param sampling_enable: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"optional": true, "counters1": {"enum": ["all", "so_pkts_conn_in", "so_pkts_conn_redirect", "so_pkts_dropped", "so_pkts_errors", "so_pkts_in", "so_pkts_new_conn_in", "so_pkts_new_conn_redirect", "so_pkts_out", "so_pkts_redirect", "so_pkts_conn_sync_fail", "so_pkts_nat_reserve_fail", "so_pkts_nat_release_fail", "so_pkts_conn_l7_sync", "so_pkts_conn_l4_sync", "so_pkts_redirect_conn_aged_out"], "type": "string", "description": "'all': all; 'so_pkts_conn_in': Total packets processed for an established connection; 'so_pkts_conn_redirect': Total packets redirected for an established connection; 'so_pkts_dropped': Total packets dropped; 'so_pkts_errors': Total packet errors; 'so_pkts_in': Total packets in-coming; 'so_pkts_new_conn_in': Total packets processed for a new connection; 'so_pkts_new_conn_redirect': Total packets redirected for a new connection; 'so_pkts_out': Total packets sent out; 'so_pkts_redirect': Total packets redirected; 'so_pkts_conn_sync_fail': Total connection sync failures; 'so_pkts_nat_reserve_fail': Total NAT reserve failures; 'so_pkts_nat_release_fail': Total NAT release failures; 'so_pkts_conn_l7_sync': Total L7 connection syncs; 'so_pkts_conn_l4_sync': Total L4 connection syncs; 'so_pkts_redirect_conn_aged_out': Total redirect conns aged out; ", "format": "enum"}}}]}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
Class Description::
Show scaleout statistics.
Class so-counters supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/so-counters`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "so-counters"
self.a10_url="/axapi/v3/so-counters"
self.DeviceProxy = ""
self.sampling_enable = []
for keys, value in kwargs.items():
setattr(self,keys, value)
| 70.344828
| 1,451
| 0.738971
| 588
| 4,080
| 4.765306
| 0.181973
| 0.12848
| 0.071378
| 0.037116
| 0.79015
| 0.79015
| 0.79015
| 0.79015
| 0.766595
| 0.766595
| 0
| 0.009195
| 0.147059
| 4,080
| 57
| 1,452
| 71.578947
| 0.795977
| 0.812255
| 0
| 0.526316
| 0
| 0
| 0.066478
| 0.029703
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0.052632
| 0
| 0.263158
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2a0daee13d87ce298996504e904d61508e61d26f
| 33
|
py
|
Python
|
tree_bark_synthesis/__init__.py
|
laitoch/tree-bark-synthesis
|
0bd43d6699d2e05f62d144f310874f986bbd91d2
|
[
"MIT"
] | null | null | null |
tree_bark_synthesis/__init__.py
|
laitoch/tree-bark-synthesis
|
0bd43d6699d2e05f62d144f310874f986bbd91d2
|
[
"MIT"
] | null | null | null |
tree_bark_synthesis/__init__.py
|
laitoch/tree-bark-synthesis
|
0bd43d6699d2e05f62d144f310874f986bbd91d2
|
[
"MIT"
] | null | null | null |
from generate_tree_bark import *
| 16.5
| 32
| 0.848485
| 5
| 33
| 5.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 33
| 1
| 33
| 33
| 0.896552
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
2a17bd0ebabee198a58e7c9783bc6e9d39997369
| 14,266
|
py
|
Python
|
src/abaqus/Job/JobFromInputFile.py
|
Haiiliin/PyAbaqus
|
f20db6ebea19b73059fe875a53be370253381078
|
[
"MIT"
] | 7
|
2022-01-21T09:15:45.000Z
|
2022-02-15T09:31:58.000Z
|
src/abaqus/Job/JobFromInputFile.py
|
Haiiliin/PyAbaqus
|
f20db6ebea19b73059fe875a53be370253381078
|
[
"MIT"
] | null | null | null |
src/abaqus/Job/JobFromInputFile.py
|
Haiiliin/PyAbaqus
|
f20db6ebea19b73059fe875a53be370253381078
|
[
"MIT"
] | null | null | null |
from abaqusConstants import *
from .Job import Job
from .MessageArray import MessageArray
class JobFromInputFile(Job):
"""The JobFromInputFile object defines a Job object which analyzes a model contained in an
input file.
The JobFromInputFile object is derived from the Job object.
Attributes
----------
getMemoryFromAnalysis: Boolean
A Boolean specifying whether to retrieve the recommended memory settings from the last
datacheck or analysis run and use those values in subsequent submissions. The default
value is ON.
analysis: SymbolicConstant
A SymbolicConstant specifying whe:py:class:`~.the`r :py:class:`~.the` job will be analyzed by Abaqus/Standard or
Abaqus/Explicit. Possible values are STANDARD, EXPLICIT, and UNKNOWN.If :py:class:`~.the` object has
:py:class:`~.the` type JobFromInputFile, **analysis=UNKNOWN**.
status: SymbolicConstant
A SymbolicConstant specifying the status of the analysis. Possible values are SUBMITTED,
RUNNING, ABORTED, TERMINATED, COMPLETED, CHECK_RUNNING, and CHECK_COMPLETED.If the
**message** member is empty, **status** is set to NONE.
messages: MessageArray
A :py:class:`~abaqus.Job.MessageArray.MessageArray` object specifying the messages received during an analysis.
environment: tuple
A tuple of Strings specifying the environment variables and their values.
Notes
-----
This object can be accessed by:
.. code-block:: python
import job
mdb.jobs[name]
"""
# A Boolean specifying whether to retrieve the recommended memory settings from the last
# datacheck or analysis run and use those values in subsequent submissions. The default
# value is ON.
getMemoryFromAnalysis: Boolean = ON
# A SymbolicConstant specifying whether the job will be analyzed by Abaqus/Standard or
# Abaqus/Explicit. Possible values are STANDARD, EXPLICIT, and UNKNOWN.If the object has
# the type JobFromInputFile, *analysis*=UNKNOWN.
analysis: SymbolicConstant = None
# A SymbolicConstant specifying the status of the analysis. Possible values are SUBMITTED,
# RUNNING, ABORTED, TERMINATED, COMPLETED, CHECK_RUNNING, and CHECK_COMPLETED.If the
# *message* member is empty, *status* is set to NONE.
status: SymbolicConstant = None
# A MessageArray object specifying the messages received during an analysis.
messages: MessageArray = MessageArray()
# A tuple of Strings specifying the environment variables and their values.
environment: tuple = ()
def __init__(self, name: str, inputFileName: str, type: SymbolicConstant = ANALYSIS, queue: str = '',
waitHours: int = 0, waitMinutes: int = 0, atTime: str = '', scratch: str = '',
userSubroutine: str = '', numCpus: int = 1, memory: int = 90,
memoryUnits: SymbolicConstant = PERCENTAGE,
explicitPrecision: SymbolicConstant = SINGLE,
nodalOutputPrecision: SymbolicConstant = SINGLE,
parallelizationMethodExplicit: SymbolicConstant = DOMAIN, numDomains: int = 1,
activateLoadBalancing: Boolean = OFF, multiprocessingMode: SymbolicConstant = DEFAULT,
licenseType: SymbolicConstant = DEFAULT):
"""This method creates an analysis job using an input file for the model definition.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.JobFromInputFile
Parameters
----------
name
A String specifying the name of the new job. The name must be a valid Abaqus/CAE object
name.
inputFileName
A String specifying the input file to read. Possible values are any valid file name. If
the .inp extension is not included in the value of the argument, the system will append
it for the user.
type
A SymbolicConstant specifying the type of job. Possible values are ANALYSIS,
SYNTAXCHECK, and RECOVER. The default value is ANALYSIS.For theJobFromInputFile object,
*type*=RESTART is not currently supported.
queue
A String specifying the name of the queue to which to submit the job. The default value
is an empty string.Note: You can use the *queue* argument when creating a Job object on
a Windows workstation; however, remote queues are available only on Linux platforms.
waitHours
An Int specifying the number of hours to wait before submitting the job. This argument
is ignored if *queue* is set. The default value is 0.This argument works in conjunction
with *waitMinutes*. *waitHours* and *atTime* are mutually exclusive.
waitMinutes
An Int specifying the number of minutes to wait before submitting the job. This argument
is ignored if *queue* is set. The default value is 0.This argument works in conjunction
with *waitHours*. *waitMinutes* and *atTime* are mutually exclusive.
atTime
A String specifying the time at which to submit the job. If *queue* is empty, the string
syntax must be valid for the Linux `at` command. If *queue* is set, the syntax must be
valid according to the system administrator. The default value is an empty string.Note:
You can use the *atTime* argument when creating a Job object on a Windows workstation;
however, the `at` command is available only on Linux platforms.
scratch
A String specifying the location of the scratch directory. The default value is an empty
string.
userSubroutine
A String specifying the file containing the user's subroutine definitions. The default
value is an empty string.
numCpus
An Int specifying the number of CPUs to use for this analysis if parallel processing is
available. Possible values are *numCpus* >> 0. The default value is 1.
memory
An Int specifying the amount of memory available to Abaqus analysis. The value should be
expressed in the units supplied in *memoryUnits*. The default value is 90.
memoryUnits
A SymbolicConstant specifying the units for the amount of memory used in an Abaqus
analysis. Possible values are PERCENTAGE, MEGA_BYTES, and GIGA_BYTES. The default value
is PERCENTAGE.
explicitPrecision
A SymbolicConstant specifying whether to use the double precision version of
Abaqus/Explicit. Possible values are SINGLE, FORCE_SINGLE, DOUBLE,
DOUBLE_CONSTRAINT_ONLY, and DOUBLE_PLUS_PACK. The default value is SINGLE.
nodalOutputPrecision
A SymbolicConstant specifying the precision of the nodal output written to the output
database. Possible values are SINGLE and FULL. The default value is SINGLE.
parallelizationMethodExplicit
A SymbolicConstant specifying the parallelization method for Abaqus/Explicit. This value
is ignored for Abaqus/Standard. Possible values are DOMAIN and LOOP. The default value
is DOMAIN.
numDomains
An Int specifying the number of domains for parallel execution in Abaqus/Explicit. When
*parallelizationMethodExplicit*=DOMAIN, *numDomains* must be a multiple of *numCpus*.
The default value is 1.
activateLoadBalancing
A Boolean specifying whether to activate dyanmic load balancing for jobs running on
multiple processors with multiple domains in Abaqus/Explicit. The default value is OFF.
multiprocessingMode
A SymbolicConstant specifying whether an analysis is decomposed into threads or into
multiple processes that communicate through a message passing interface (MPI). Possible
values are DEFAULT, THREADS, and MPI. The default value is DEFAULT.
licenseType
A SymbolicConstant specifying the type of license type being used in the case of the
DSLS SimUnit license model. Possible values are DEFAULT, TOKEN, and CREDIT. The default
value is DEFAULT.If the license model is not the DSLS SimUnit, the licenseType is not
available.
Returns
-------
A JobFromInputFile object.
Raises
------
AbaqusException
ValueError
- If the user attempts to provide RESTART as a value to argument type:
ValueError: RESTART of input file job is not currently supported
"""
super().__init__()
pass
def setValues(self, type: SymbolicConstant = ANALYSIS, queue: str = '', waitHours: int = 0,
waitMinutes: int = 0, atTime: str = '', scratch: str = '', userSubroutine: str = '',
numCpus: int = 1, memory: int = 90, memoryUnits: SymbolicConstant = PERCENTAGE,
explicitPrecision: SymbolicConstant = SINGLE,
nodalOutputPrecision: SymbolicConstant = SINGLE,
parallelizationMethodExplicit: SymbolicConstant = DOMAIN, numDomains: int = 1,
activateLoadBalancing: Boolean = OFF, multiprocessingMode: SymbolicConstant = DEFAULT,
licenseType: SymbolicConstant = DEFAULT):
"""This method modifies the JobFromInputFile object.
Parameters
----------
type
A SymbolicConstant specifying the type of job. Possible values are ANALYSIS,
SYNTAXCHECK, and RECOVER. The default value is ANALYSIS.For theJobFromInputFile object,
*type*=RESTART is not currently supported.
queue
A String specifying the name of the queue to which to submit the job. The default value
is an empty string.Note: You can use the *queue* argument when creating a Job object on
a Windows workstation; however, remote queues are available only on Linux platforms.
waitHours
An Int specifying the number of hours to wait before submitting the job. This argument
is ignored if *queue* is set. The default value is 0.This argument works in conjunction
with *waitMinutes*. *waitHours* and *atTime* are mutually exclusive.
waitMinutes
An Int specifying the number of minutes to wait before submitting the job. This argument
is ignored if *queue* is set. The default value is 0.This argument works in conjunction
with *waitHours*. *waitMinutes* and *atTime* are mutually exclusive.
atTime
A String specifying the time at which to submit the job. If *queue* is empty, the string
syntax must be valid for the Linux `at` command. If *queue* is set, the syntax must be
valid according to the system administrator. The default value is an empty string.Note:
You can use the *atTime* argument when creating a Job object on a Windows workstation;
however, the `at` command is available only on Linux platforms.
scratch
A String specifying the location of the scratch directory. The default value is an empty
string.
userSubroutine
A String specifying the file containing the user's subroutine definitions. The default
value is an empty string.
numCpus
An Int specifying the number of CPUs to use for this analysis if parallel processing is
available. Possible values are *numCpus* >> 0. The default value is 1.
memory
An Int specifying the amount of memory available to Abaqus analysis. The value should be
expressed in the units supplied in *memoryUnits*. The default value is 90.
memoryUnits
A SymbolicConstant specifying the units for the amount of memory used in an Abaqus
analysis. Possible values are PERCENTAGE, MEGA_BYTES, and GIGA_BYTES. The default value
is PERCENTAGE.
explicitPrecision
A SymbolicConstant specifying whether to use the double precision version of
Abaqus/Explicit. Possible values are SINGLE, FORCE_SINGLE, DOUBLE,
DOUBLE_CONSTRAINT_ONLY, and DOUBLE_PLUS_PACK. The default value is SINGLE.
nodalOutputPrecision
A SymbolicConstant specifying the precision of the nodal output written to the output
database. Possible values are SINGLE and FULL. The default value is SINGLE.
parallelizationMethodExplicit
A SymbolicConstant specifying the parallelization method for Abaqus/Explicit. This value
is ignored for Abaqus/Standard. Possible values are DOMAIN and LOOP. The default value
is DOMAIN.
numDomains
An Int specifying the number of domains for parallel execution in Abaqus/Explicit. When
*parallelizationMethodExplicit*=DOMAIN, *numDomains* must be a multiple of *numCpus*.
The default value is 1.
activateLoadBalancing
A Boolean specifying whether to activate dyanmic load balancing for jobs running on
multiple processors with multiple domains in Abaqus/Explicit. The default value is OFF.
multiprocessingMode
A SymbolicConstant specifying whether an analysis is decomposed into threads or into
multiple processes that communicate through a message passing interface (MPI). Possible
values are DEFAULT, THREADS, and MPI. The default value is DEFAULT.
licenseType
A SymbolicConstant specifying the type of license type being used in the case of the
DSLS SimUnit license model. Possible values are DEFAULT, TOKEN, and CREDIT. The default
value is DEFAULT.If the license model is not the DSLS SimUnit, the licenseType is not
available.
"""
pass
| 57.293173
| 120
| 0.669704
| 1,701
| 14,266
| 5.601411
| 0.146972
| 0.027918
| 0.056675
| 0.064232
| 0.867233
| 0.859257
| 0.859257
| 0.849916
| 0.849916
| 0.836692
| 0
| 0.002551
| 0.285644
| 14,266
| 248
| 121
| 57.524194
| 0.932391
| 0.770363
| 0
| 0.413793
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068966
| false
| 0.068966
| 0.103448
| 0
| 0.37931
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
2a2d1e759d24ed5f8134621f8feb6d1df44bde42
| 492
|
py
|
Python
|
DailyChallenge/LC_231.py
|
iphyer/LeetcodeSummary
|
ad5229bbb8e76083e5c7f0312fa0c8ff78d516a9
|
[
"MIT"
] | null | null | null |
DailyChallenge/LC_231.py
|
iphyer/LeetcodeSummary
|
ad5229bbb8e76083e5c7f0312fa0c8ff78d516a9
|
[
"MIT"
] | null | null | null |
DailyChallenge/LC_231.py
|
iphyer/LeetcodeSummary
|
ad5229bbb8e76083e5c7f0312fa0c8ff78d516a9
|
[
"MIT"
] | null | null | null |
"""
n & (n - 1) == 0
"""
class Solution(object):
def isPowerOfTwo(self, n):
if n == 0:
return False
return n & (n - 1) == 0
"""
n&(-n) == n
"""
class Solution(object):
def isPowerOfTwo(self, n):
if n == 0:
return False
return n & (-n) == n
"""
log N
"""
class Solution(object):
def isPowerOfTwo(self, n):
if n == 0:
return False
while n % 2 == 0:
n /= 2
return n == 1
| 14.057143
| 31
| 0.426829
| 63
| 492
| 3.333333
| 0.238095
| 0.057143
| 0.271429
| 0.314286
| 0.857143
| 0.857143
| 0.857143
| 0.857143
| 0.857143
| 0.857143
| 0
| 0.037931
| 0.410569
| 492
| 34
| 32
| 14.470588
| 0.686207
| 0.03252
| 0
| 0.705882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.176471
| false
| 0
| 0
| 0
| 0.705882
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 10
|
2a36f4a3abfaeb7421e57bae23c4d4c1c797108a
| 1,407
|
py
|
Python
|
test/input/test_nestedsuppression.py
|
peternewman/pychecker
|
725fcd43ec0fd324641b6a29a81155cf8a8698b7
|
[
"BSD-3-Clause"
] | 18
|
2015-07-21T12:29:58.000Z
|
2021-06-06T10:06:03.000Z
|
test/input/test_nestedsuppression.py
|
peternewman/pychecker
|
725fcd43ec0fd324641b6a29a81155cf8a8698b7
|
[
"BSD-3-Clause"
] | 1
|
2016-12-06T13:56:04.000Z
|
2016-12-06T13:56:04.000Z
|
test/input/test_nestedsuppression.py
|
peternewman/pychecker
|
725fcd43ec0fd324641b6a29a81155cf8a8698b7
|
[
"BSD-3-Clause"
] | 11
|
2015-06-17T08:40:22.000Z
|
2022-03-21T01:00:43.000Z
|
# -*- Mode: Python -*-
# vi:si:et:sw=4:sts=4:ts=4
class O(object):
pass
def containerFirst():
def first():
__pychecker__ = 'no-objattrs'
a = O()
# this one should not trigger a warning since __pychecker__ hides it
print a.nonexistent
def second():
b = O()
# this one should trigger a warning
print b.nonexistent
first()
second()
def containerSecond():
def first():
a = O()
# this one should trigger a warning
print a.nonexistent
def second():
__pychecker__ = 'no-objattrs'
b = O()
# this one should not trigger a warning since __pychecker__ hides it
print b.nonexistent
first()
second()
def containerNeither():
def first():
a = O()
# this one should trigger a warning
print a.nonexistent
def second():
b = O()
# this one should trigger a warning
print b.nonexistent
first()
second()
def containerBoth():
def first():
__pychecker__ = 'no-objattrs'
a = O()
# this one should not trigger a warning since __pychecker__ hides it
print a.nonexistent
def second():
__pychecker__ = 'no-objattrs'
b = O()
# this one should not trigger a warning since __pychecker__ hides it
print b.nonexistent
first()
second()
| 21.318182
| 76
| 0.571429
| 167
| 1,407
| 4.622754
| 0.215569
| 0.051813
| 0.082902
| 0.145078
| 0.86658
| 0.86658
| 0.862694
| 0.862694
| 0.862694
| 0.862694
| 0
| 0.003219
| 0.337598
| 1,407
| 65
| 77
| 21.646154
| 0.825107
| 0.319119
| 0
| 0.857143
| 0
| 0
| 0.046463
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.02381
| 0
| null | null | 0.190476
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
2a3c412f45c83d44be346081a5ecab511fd94cd1
| 93
|
py
|
Python
|
pants-plugins/grapl_setup_py/register.py
|
msilvey/grapl
|
142dc8068d7955e3e4d24221aa94c236745d5faa
|
[
"Apache-2.0"
] | 313
|
2018-10-15T05:58:39.000Z
|
2020-04-21T20:31:39.000Z
|
pants-plugins/grapl_setup_py/register.py
|
msilvey/grapl
|
142dc8068d7955e3e4d24221aa94c236745d5faa
|
[
"Apache-2.0"
] | 848
|
2020-04-26T19:23:37.000Z
|
2022-03-31T16:57:39.000Z
|
pants-plugins/grapl_setup_py/register.py
|
msilvey/grapl
|
142dc8068d7955e3e4d24221aa94c236745d5faa
|
[
"Apache-2.0"
] | 43
|
2020-04-27T20:59:18.000Z
|
2022-03-29T21:56:09.000Z
|
from grapl_setup_py import grapl_setupargs
def rules():
return grapl_setupargs.rules()
| 15.5
| 42
| 0.784946
| 13
| 93
| 5.307692
| 0.692308
| 0.405797
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.150538
| 93
| 5
| 43
| 18.6
| 0.873418
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 8
|
2a4bde5f8df1a0029e18be4788278bfeb2d4699f
| 163
|
py
|
Python
|
tests/facettools/test_print2elog.py
|
joelfrederico/mytools
|
7bf57c49c7dde0a8b0aa337fbd2fbd527ce7a67f
|
[
"MIT"
] | 1
|
2021-03-31T23:27:09.000Z
|
2021-03-31T23:27:09.000Z
|
tests/facettools/test_print2elog.py
|
joelfrederico/mytools
|
7bf57c49c7dde0a8b0aa337fbd2fbd527ce7a67f
|
[
"MIT"
] | null | null | null |
tests/facettools/test_print2elog.py
|
joelfrederico/mytools
|
7bf57c49c7dde0a8b0aa337fbd2fbd527ce7a67f
|
[
"MIT"
] | null | null | null |
import mytools.facettools as mtft
import datetime as dt
def test_print2elog():
mtft.print2elog(author='Joel Frederico', title='Test', text='This is a test')
| 23.285714
| 81
| 0.748466
| 24
| 163
| 5.041667
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014286
| 0.141104
| 163
| 6
| 82
| 27.166667
| 0.85
| 0
| 0
| 0
| 0
| 0
| 0.196319
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.5
| 0
| 0.75
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 7
|
2a6763bd81acc5145f2c5c1d2dc3a57c980619e8
| 154
|
py
|
Python
|
lib/init.py
|
CDAT/changelogger
|
ac08b1afac63e26bfcf248b8526539644369cc19
|
[
"BSD-2-Clause"
] | null | null | null |
lib/init.py
|
CDAT/changelogger
|
ac08b1afac63e26bfcf248b8526539644369cc19
|
[
"BSD-2-Clause"
] | 1
|
2020-07-28T00:05:42.000Z
|
2020-07-28T00:05:42.000Z
|
lib/init.py
|
CDAT/changelogger
|
ac08b1afac63e26bfcf248b8526539644369cc19
|
[
"BSD-2-Clause"
] | null | null | null |
from . import gh
import os
import sys
if sys.version_info < (2,7,9):
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
| 17.111111
| 48
| 0.779221
| 24
| 154
| 4.875
| 0.666667
| 0.239316
| 0.393162
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.044444
| 0.123377
| 154
| 8
| 49
| 19.25
| 0.822222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
aac14cd858e0d2984444936f0bef4552974b3122
| 2,692
|
py
|
Python
|
dataactcore/migrations/versions/3f24399ddd1b_add_high_comp_officer_fields_to_.py
|
brianherman/data-act-broker-backend
|
80eb055b9d245046192f7ad4fd0be7d0e11d2dec
|
[
"CC0-1.0"
] | 1
|
2019-06-22T21:53:16.000Z
|
2019-06-22T21:53:16.000Z
|
dataactcore/migrations/versions/3f24399ddd1b_add_high_comp_officer_fields_to_.py
|
brianherman/data-act-broker-backend
|
80eb055b9d245046192f7ad4fd0be7d0e11d2dec
|
[
"CC0-1.0"
] | 3
|
2021-08-22T11:47:45.000Z
|
2022-03-29T22:06:49.000Z
|
dataactcore/migrations/versions/3f24399ddd1b_add_high_comp_officer_fields_to_.py
|
brianherman/data-act-broker-backend
|
80eb055b9d245046192f7ad4fd0be7d0e11d2dec
|
[
"CC0-1.0"
] | 1
|
2020-07-17T23:50:56.000Z
|
2020-07-17T23:50:56.000Z
|
"""Add high comp officer fields to detached_award_procurement table
Revision ID: 3f24399ddd1b
Revises: ad3dd1c0cf20
Create Date: 2019-05-24 09:31:12.678128
"""
# revision identifiers, used by Alembic.
revision = '3f24399ddd1b'
down_revision = 'ad3dd1c0cf20'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('detached_award_procurement', sa.Column('high_comp_officer1_amount', sa.Text(), nullable=True))
op.add_column('detached_award_procurement', sa.Column('high_comp_officer1_full_na', sa.Text(), nullable=True))
op.add_column('detached_award_procurement', sa.Column('high_comp_officer2_amount', sa.Text(), nullable=True))
op.add_column('detached_award_procurement', sa.Column('high_comp_officer2_full_na', sa.Text(), nullable=True))
op.add_column('detached_award_procurement', sa.Column('high_comp_officer3_amount', sa.Text(), nullable=True))
op.add_column('detached_award_procurement', sa.Column('high_comp_officer3_full_na', sa.Text(), nullable=True))
op.add_column('detached_award_procurement', sa.Column('high_comp_officer4_amount', sa.Text(), nullable=True))
op.add_column('detached_award_procurement', sa.Column('high_comp_officer4_full_na', sa.Text(), nullable=True))
op.add_column('detached_award_procurement', sa.Column('high_comp_officer5_amount', sa.Text(), nullable=True))
op.add_column('detached_award_procurement', sa.Column('high_comp_officer5_full_na', sa.Text(), nullable=True))
# ### end Alembic commands ###
def downgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('detached_award_procurement', 'high_comp_officer5_full_na')
op.drop_column('detached_award_procurement', 'high_comp_officer5_amount')
op.drop_column('detached_award_procurement', 'high_comp_officer4_full_na')
op.drop_column('detached_award_procurement', 'high_comp_officer4_amount')
op.drop_column('detached_award_procurement', 'high_comp_officer3_full_na')
op.drop_column('detached_award_procurement', 'high_comp_officer3_amount')
op.drop_column('detached_award_procurement', 'high_comp_officer2_full_na')
op.drop_column('detached_award_procurement', 'high_comp_officer2_amount')
op.drop_column('detached_award_procurement', 'high_comp_officer1_full_na')
op.drop_column('detached_award_procurement', 'high_comp_officer1_amount')
# ### end Alembic commands ###
| 45.627119
| 114
| 0.773031
| 364
| 2,692
| 5.318681
| 0.186813
| 0.086777
| 0.260331
| 0.309917
| 0.778926
| 0.778926
| 0.766529
| 0.766529
| 0.766529
| 0.600207
| 0
| 0.026446
| 0.10104
| 2,692
| 58
| 115
| 46.413793
| 0.773554
| 0.128529
| 0
| 0
| 0
| 0
| 0.466205
| 0.446274
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.0625
| 0
| 0.1875
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
2ac81758f4aea43ca240e483956271d86c2211ae
| 11,065
|
py
|
Python
|
src/pyensae/languages/SimpleWorkflowListener.py
|
sdpython/pyensae
|
ada4dbb0b9901bf481eff2ea239e74ed964d93b0
|
[
"MIT"
] | 28
|
2015-07-19T21:20:51.000Z
|
2022-02-16T11:50:53.000Z
|
src/pyensae/languages/SimpleWorkflowListener.py
|
sdpython/pyensae
|
ada4dbb0b9901bf481eff2ea239e74ed964d93b0
|
[
"MIT"
] | 34
|
2015-06-16T15:38:25.000Z
|
2021-12-29T11:04:01.000Z
|
src/pyensae/languages/SimpleWorkflowListener.py
|
sdpython/pyensae
|
ada4dbb0b9901bf481eff2ea239e74ed964d93b0
|
[
"MIT"
] | 27
|
2015-01-13T08:24:22.000Z
|
2022-03-31T14:51:23.000Z
|
# Generated from \SimpleWorkflow.g4 by ANTLR 4.9
from antlr4 import *
if __name__ is not None and "." in __name__:
from .SimpleWorkflowParser import SimpleWorkflowParser
else:
from SimpleWorkflowParser import SimpleWorkflowParser
# This class defines a complete listener for a parse tree produced by SimpleWorkflowParser.
class SimpleWorkflowListener(ParseTreeListener):
# Enter a parse tree produced by SimpleWorkflowParser#parse.
def enterParse(self, ctx: SimpleWorkflowParser.ParseContext):
pass
# Exit a parse tree produced by SimpleWorkflowParser#parse.
def exitParse(self, ctx: SimpleWorkflowParser.ParseContext):
pass
# Enter a parse tree produced by SimpleWorkflowParser#final_stmt.
def enterFinal_stmt(self, ctx: SimpleWorkflowParser.Final_stmtContext):
pass
# Exit a parse tree produced by SimpleWorkflowParser#final_stmt.
def exitFinal_stmt(self, ctx: SimpleWorkflowParser.Final_stmtContext):
pass
# Enter a parse tree produced by SimpleWorkflowParser#affectation_stmt_comma.
def enterAffectation_stmt_comma(self, ctx: SimpleWorkflowParser.Affectation_stmt_commaContext):
pass
# Exit a parse tree produced by SimpleWorkflowParser#affectation_stmt_comma.
def exitAffectation_stmt_comma(self, ctx: SimpleWorkflowParser.Affectation_stmt_commaContext):
pass
# Enter a parse tree produced by SimpleWorkflowParser#affectation_stmt.
def enterAffectation_stmt(self, ctx: SimpleWorkflowParser.Affectation_stmtContext):
pass
# Exit a parse tree produced by SimpleWorkflowParser#affectation_stmt.
def exitAffectation_stmt(self, ctx: SimpleWorkflowParser.Affectation_stmtContext):
pass
# Enter a parse tree produced by SimpleWorkflowParser#for_stmt.
def enterFor_stmt(self, ctx: SimpleWorkflowParser.For_stmtContext):
pass
# Exit a parse tree produced by SimpleWorkflowParser#for_stmt.
def exitFor_stmt(self, ctx: SimpleWorkflowParser.For_stmtContext):
pass
# Enter a parse tree produced by SimpleWorkflowParser#if_stmt.
def enterIf_stmt(self, ctx: SimpleWorkflowParser.If_stmtContext):
pass
# Exit a parse tree produced by SimpleWorkflowParser#if_stmt.
def exitIf_stmt(self, ctx: SimpleWorkflowParser.If_stmtContext):
pass
# Enter a parse tree produced by SimpleWorkflowParser#evaluation_function.
def enterEvaluation_function(self, ctx: SimpleWorkflowParser.Evaluation_functionContext):
pass
# Exit a parse tree produced by SimpleWorkflowParser#evaluation_function.
def exitEvaluation_function(self, ctx: SimpleWorkflowParser.Evaluation_functionContext):
pass
# Enter a parse tree produced by SimpleWorkflowParser#expression.
def enterExpression(self, ctx: SimpleWorkflowParser.ExpressionContext):
pass
# Exit a parse tree produced by SimpleWorkflowParser#expression.
def exitExpression(self, ctx: SimpleWorkflowParser.ExpressionContext):
pass
# Enter a parse tree produced by SimpleWorkflowParser#expression_no_binary.
def enterExpression_no_binary(self, ctx: SimpleWorkflowParser.Expression_no_binaryContext):
pass
# Exit a parse tree produced by SimpleWorkflowParser#expression_no_binary.
def exitExpression_no_binary(self, ctx: SimpleWorkflowParser.Expression_no_binaryContext):
pass
# Enter a parse tree produced by SimpleWorkflowParser#function_call.
def enterFunction_call(self, ctx: SimpleWorkflowParser.Function_callContext):
pass
# Exit a parse tree produced by SimpleWorkflowParser#function_call.
def exitFunction_call(self, ctx: SimpleWorkflowParser.Function_callContext):
pass
# Enter a parse tree produced by SimpleWorkflowParser#variable_name.
def enterVariable_name(self, ctx: SimpleWorkflowParser.Variable_nameContext):
pass
# Exit a parse tree produced by SimpleWorkflowParser#variable_name.
def exitVariable_name(self, ctx: SimpleWorkflowParser.Variable_nameContext):
pass
# Enter a parse tree produced by SimpleWorkflowParser#binary_operator.
def enterBinary_operator(self, ctx: SimpleWorkflowParser.Binary_operatorContext):
pass
# Exit a parse tree produced by SimpleWorkflowParser#binary_operator.
def exitBinary_operator(self, ctx: SimpleWorkflowParser.Binary_operatorContext):
pass
# Enter a parse tree produced by SimpleWorkflowParser#unary_operator.
def enterUnary_operator(self, ctx: SimpleWorkflowParser.Unary_operatorContext):
pass
# Exit a parse tree produced by SimpleWorkflowParser#unary_operator.
def exitUnary_operator(self, ctx: SimpleWorkflowParser.Unary_operatorContext):
pass
# Enter a parse tree produced by SimpleWorkflowParser#stmt_comma.
def enterStmt_comma(self, ctx: SimpleWorkflowParser.Stmt_commaContext):
pass
# Exit a parse tree produced by SimpleWorkflowParser#stmt_comma.
def exitStmt_comma(self, ctx: SimpleWorkflowParser.Stmt_commaContext):
pass
# Enter a parse tree produced by SimpleWorkflowParser#stmt.
def enterStmt(self, ctx: SimpleWorkflowParser.StmtContext):
pass
# Exit a parse tree produced by SimpleWorkflowParser#stmt.
def exitStmt(self, ctx: SimpleWorkflowParser.StmtContext):
pass
# Enter a parse tree produced by SimpleWorkflowParser#connect_stmt.
def enterConnect_stmt(self, ctx: SimpleWorkflowParser.Connect_stmtContext):
pass
# Exit a parse tree produced by SimpleWorkflowParser#connect_stmt.
def exitConnect_stmt(self, ctx: SimpleWorkflowParser.Connect_stmtContext):
pass
# Enter a parse tree produced by SimpleWorkflowParser#data_or_module_output.
def enterData_or_module_output(self, ctx: SimpleWorkflowParser.Data_or_module_outputContext):
pass
# Exit a parse tree produced by SimpleWorkflowParser#data_or_module_output.
def exitData_or_module_output(self, ctx: SimpleWorkflowParser.Data_or_module_outputContext):
pass
# Enter a parse tree produced by SimpleWorkflowParser#module_input.
def enterModule_input(self, ctx: SimpleWorkflowParser.Module_inputContext):
pass
# Exit a parse tree produced by SimpleWorkflowParser#module_input.
def exitModule_input(self, ctx: SimpleWorkflowParser.Module_inputContext):
pass
# Enter a parse tree produced by SimpleWorkflowParser#data_stmt.
def enterData_stmt(self, ctx: SimpleWorkflowParser.Data_stmtContext):
pass
# Exit a parse tree produced by SimpleWorkflowParser#data_stmt.
def exitData_stmt(self, ctx: SimpleWorkflowParser.Data_stmtContext):
pass
# Enter a parse tree produced by SimpleWorkflowParser#module_stmt.
def enterModule_stmt(self, ctx: SimpleWorkflowParser.Module_stmtContext):
pass
# Exit a parse tree produced by SimpleWorkflowParser#module_stmt.
def exitModule_stmt(self, ctx: SimpleWorkflowParser.Module_stmtContext):
pass
# Enter a parse tree produced by SimpleWorkflowParser#module_call.
def enterModule_call(self, ctx: SimpleWorkflowParser.Module_callContext):
pass
# Exit a parse tree produced by SimpleWorkflowParser#module_call.
def exitModule_call(self, ctx: SimpleWorkflowParser.Module_callContext):
pass
# Enter a parse tree produced by SimpleWorkflowParser#element_name.
def enterElement_name(self, ctx: SimpleWorkflowParser.Element_nameContext):
pass
# Exit a parse tree produced by SimpleWorkflowParser#element_name.
def exitElement_name(self, ctx: SimpleWorkflowParser.Element_nameContext):
pass
# Enter a parse tree produced by SimpleWorkflowParser#list_param_affectation.
def enterList_param_affectation(self, ctx: SimpleWorkflowParser.List_param_affectationContext):
pass
# Exit a parse tree produced by SimpleWorkflowParser#list_param_affectation.
def exitList_param_affectation(self, ctx: SimpleWorkflowParser.List_param_affectationContext):
pass
# Enter a parse tree produced by SimpleWorkflowParser#param_affectation.
def enterParam_affectation(self, ctx: SimpleWorkflowParser.Param_affectationContext):
pass
# Exit a parse tree produced by SimpleWorkflowParser#param_affectation.
def exitParam_affectation(self, ctx: SimpleWorkflowParser.Param_affectationContext):
pass
# Enter a parse tree produced by SimpleWorkflowParser#param_name.
def enterParam_name(self, ctx: SimpleWorkflowParser.Param_nameContext):
pass
# Exit a parse tree produced by SimpleWorkflowParser#param_name.
def exitParam_name(self, ctx: SimpleWorkflowParser.Param_nameContext):
pass
# Enter a parse tree produced by SimpleWorkflowParser#inout_name.
def enterInout_name(self, ctx: SimpleWorkflowParser.Inout_nameContext):
pass
# Exit a parse tree produced by SimpleWorkflowParser#inout_name.
def exitInout_name(self, ctx: SimpleWorkflowParser.Inout_nameContext):
pass
# Enter a parse tree produced by SimpleWorkflowParser#module_name.
def enterModule_name(self, ctx: SimpleWorkflowParser.Module_nameContext):
pass
# Exit a parse tree produced by SimpleWorkflowParser#module_name.
def exitModule_name(self, ctx: SimpleWorkflowParser.Module_nameContext):
pass
# Enter a parse tree produced by SimpleWorkflowParser#data_name.
def enterData_name(self, ctx: SimpleWorkflowParser.Data_nameContext):
pass
# Exit a parse tree produced by SimpleWorkflowParser#data_name.
def exitData_name(self, ctx: SimpleWorkflowParser.Data_nameContext):
pass
# Enter a parse tree produced by SimpleWorkflowParser#constant.
def enterConstant(self, ctx: SimpleWorkflowParser.ConstantContext):
pass
# Exit a parse tree produced by SimpleWorkflowParser#constant.
def exitConstant(self, ctx: SimpleWorkflowParser.ConstantContext):
pass
# Enter a parse tree produced by SimpleWorkflowParser#string_literal.
def enterString_literal(self, ctx: SimpleWorkflowParser.String_literalContext):
pass
# Exit a parse tree produced by SimpleWorkflowParser#string_literal.
def exitString_literal(self, ctx: SimpleWorkflowParser.String_literalContext):
pass
# Enter a parse tree produced by SimpleWorkflowParser#integer_number.
def enterInteger_number(self, ctx: SimpleWorkflowParser.Integer_numberContext):
pass
# Exit a parse tree produced by SimpleWorkflowParser#integer_number.
def exitInteger_number(self, ctx: SimpleWorkflowParser.Integer_numberContext):
pass
# Enter a parse tree produced by SimpleWorkflowParser#real_number.
def enterReal_number(self, ctx: SimpleWorkflowParser.Real_numberContext):
pass
# Exit a parse tree produced by SimpleWorkflowParser#real_number.
def exitReal_number(self, ctx: SimpleWorkflowParser.Real_numberContext):
pass
del SimpleWorkflowParser
| 40.830258
| 99
| 0.767555
| 1,204
| 11,065
| 6.88289
| 0.106312
| 0.047062
| 0.078436
| 0.141185
| 0.883553
| 0.846869
| 0.842042
| 0.609992
| 0.580669
| 0.160372
| 0
| 0.00044
| 0.178943
| 11,065
| 270
| 100
| 40.981481
| 0.911723
| 0.388251
| 0
| 0.474074
| 1
| 0
| 0.000151
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.474074
| false
| 0.474074
| 0.022222
| 0
| 0.503704
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 7
|
630b6da74dd3249e14036913cb96b73984f51600
| 7,538
|
py
|
Python
|
ss7/game-soloban-2.py
|
DuongVu39/C4E10_Duong
|
60ec59bddbb3397b5a1804930d5bdfd81107dcae
|
[
"MIT"
] | null | null | null |
ss7/game-soloban-2.py
|
DuongVu39/C4E10_Duong
|
60ec59bddbb3397b5a1804930d5bdfd81107dcae
|
[
"MIT"
] | null | null | null |
ss7/game-soloban-2.py
|
DuongVu39/C4E10_Duong
|
60ec59bddbb3397b5a1804930d5bdfd81107dcae
|
[
"MIT"
] | null | null | null |
<<<<<<< HEAD
#pusher
#map
#box
#destination
import time
#set pusher coordinate
#rep: P
pusher ={
"x":1,
"y":0
}
#set box coordinate
#rep: B
boxes = [
{
"x": 3,
"y": 2
},
{
"x": 1,
"y": 3
}]
#set destination coordinate
#rep: D
gates = [
{
"x": 3,
"y": 3
},
{
"x": 0,
"y": 0
},
]
#set map size
size = {
"x": 6,
"y": 7
}
#level saved
saved_pusher = pusher.copy()
saved_boxes = [box.copy() for box in boxes]
def reset_level(saved_pusher,saved_boxes):
global pusher, boxes
pusher = saved_pusher
boxes = saved_boxes
def in_map(x, y, size):
return 0 <= x < size["x"] and 0<= y < size["y"]
def check_overlap (x, y, items):
for item in items:
if x == item["x"] and y == item["y"]:
return True
return False
def map(pusher, boxes, gates):
for i in range (size["y"]):
for j in range (size["x"]):
if i == pusher["y"] and j == pusher["x"]:
print (" P ", end = '')
elif check_overlap(j, i, boxes):
print (" B ", end = '')
elif check_overlap(j, i, gates):
print (" D ", end = '')
else:
print (" - ", end = '')
print()
def check_box(pusher,dx,dy,items):
for item in items:
if item["x"] == pusher["x"] + dx and item["y"] == pusher["y"] + dy:
return item
return None
def move(item, dx, dy):
item["x"] += dx
item["y"] += dy
return item
def check_win(boxes, gates):
count = 0
for box in boxes:
if check_overlap(box["x"],box["y"], gates):
count += 1
if count == len(boxes):
return True
else:
return False
map(pusher, boxes, gates)
def input_process(direction):
dx = 0
dy = 0
if direction == "W":
dy -=1
elif direction == "A":
dx -=1
elif direction == "S":
dy +=1
elif direction == "D":
dx +=1
else:
print("Wrong button,bro!")
time.sleep(0.5)
return dx, dy
undo_pusher = 0
undo_boxes = 0
# main GAME_LOOP
while True:
command = input("What's your next move? W/A/S/D/U \n Enter R to reset the GAME").upper()
if command == "R":
reset_level(saved_pusher,saved_boxes)
map(pusher, boxes, gates)
continue
if command == "U":
if undo_pusher != 0:
reset_level(undo_pusher, undo_boxes)
map(pusher, boxes, gates)
continue
else:
print("There's nothing to undo, bro!")
time.sleep(0.5)
map(pusher, boxes, gates)
continue
# luu du lieu truoc khi di chuyen:
undo_pusher = pusher.copy()
undo_boxes = [box.copy() for box in boxes]
# xu ly dau vao
dx, dy = input_process(command)
box_ = check_box(pusher,dx,dy,boxes)
if box_ is not None:
if check_overlap (box_["x"]+dx, box_["y"]+dy, boxes):
print("You can't go there, bro!")
else:
if in_map(box_["x"]+dx,box_["y"]+dy,size):
box_ = move(box_, dx, dy)
pusher = move(pusher, dx, dy)
else:
print ("Box will fall out bro")
time.sleep(0.5)
elif in_map(pusher["x"] + dx, pusher["y"] + dy, size):
pusher = move(pusher, dx, dy)
else:
print ("You can't go there, bro")
time.sleep(.5)
map(pusher, boxes, gates)
if check_win(boxes, gates):
print("Win")
break
print ("You won!")
=======
#pusher
#map
#box
#destination
import time
#set pusher coordinate
#rep: P
pusher ={
"x":1,
"y":0
}
#set box coordinate
#rep: B
boxes = [
{
"x": 3,
"y": 2
},
{
"x": 1,
"y": 3
}]
#set destination coordinate
#rep: D
gates = [
{
"x": 3,
"y": 3
},
{
"x": 0,
"y": 0
},
]
#set map size
size = {
"x": 6,
"y": 7
}
#level saved
saved_pusher = pusher.copy()
saved_boxes = [box.copy() for box in boxes]
def reset_level(saved_pusher,saved_boxes):
global pusher, boxes
pusher = saved_pusher
boxes = saved_boxes
def in_map(x, y, size):
return 0 <= x < size["x"] and 0<= y < size["y"]
def check_overlap (x, y, items):
for item in items:
if x == item["x"] and y == item["y"]:
return True
return False
def map(pusher, boxes, gates):
for i in range (size["y"]):
for j in range (size["x"]):
if i == pusher["y"] and j == pusher["x"]:
print (" P ", end = '')
elif check_overlap(j, i, boxes):
print (" B ", end = '')
elif check_overlap(j, i, gates):
print (" D ", end = '')
else:
print (" - ", end = '')
print()
def check_box(pusher,dx,dy,items):
for item in items:
if item["x"] == pusher["x"] + dx and item["y"] == pusher["y"] + dy:
return item
return None
def move(item, dx, dy):
item["x"] += dx
item["y"] += dy
return item
def check_win(boxes, gates):
count = 0
for box in boxes:
if check_overlap(box["x"],box["y"], gates):
count += 1
if count == len(boxes):
return True
else:
return False
map(pusher, boxes, gates)
def input_process(direction):
dx = 0
dy = 0
if direction == "W":
dy -=1
elif direction == "A":
dx -=1
elif direction == "S":
dy +=1
elif direction == "D":
dx +=1
else:
print("Wrong button,bro!")
time.sleep(0.5)
return dx, dy
undo_pusher = 0
undo_boxes = 0
# main GAME_LOOP
while True:
command = input("What's your next move? W/A/S/D/U \n Enter R to reset the GAME").upper()
if command == "R":
reset_level(saved_pusher,saved_boxes)
map(pusher, boxes, gates)
continue
if command == "U":
if undo_pusher != 0:
reset_level(undo_pusher, undo_boxes)
map(pusher, boxes, gates)
continue
else:
print("There's nothing to undo, bro!")
time.sleep(0.5)
map(pusher, boxes, gates)
continue
# luu du lieu truoc khi di chuyen:
undo_pusher = pusher.copy()
undo_boxes = [box.copy() for box in boxes]
# xu ly dau vao
dx, dy = input_process(command)
box_ = check_box(pusher,dx,dy,boxes)
if box_ is not None:
if check_overlap (box_["x"]+dx, box_["y"]+dy, boxes):
print("You can't go there, bro!")
else:
if in_map(box_["x"]+dx,box_["y"]+dy,size):
box_ = move(box_, dx, dy)
pusher = move(pusher, dx, dy)
else:
print ("Box will fall out bro")
time.sleep(0.5)
elif in_map(pusher["x"] + dx, pusher["y"] + dy, size):
pusher = move(pusher, dx, dy)
else:
print ("You can't go there, bro")
time.sleep(.5)
map(pusher, boxes, gates)
if check_win(boxes, gates):
print("Win")
break
print ("You won!")
>>>>>>> 687005e51286e9522a42a2d33dcef452fb0a05b2
| 21.66092
| 92
| 0.480499
| 990
| 7,538
| 3.575758
| 0.109091
| 0.049718
| 0.047458
| 0.064407
| 0.987571
| 0.987571
| 0.987571
| 0.987571
| 0.987571
| 0.987571
| 0
| 0.019374
| 0.37689
| 7,538
| 347
| 93
| 21.723343
| 0.734298
| 0.050677
| 0
| 0.911877
| 0
| 0.007663
| 0.066798
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.007663
| null | null | 0.091954
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
632e62984fcd1b96bb77eb1a3df08e72bb961d83
| 182
|
py
|
Python
|
tests/refresh_token/mutations.py
|
kozickikarol/strawberry-django-jwt
|
9e99a2b61db87a9ec0466cbeefd694a65b641612
|
[
"MIT"
] | 17
|
2021-06-20T21:46:18.000Z
|
2022-02-16T07:47:40.000Z
|
tests/refresh_token/mutations.py
|
kozickikarol/strawberry-django-jwt
|
9e99a2b61db87a9ec0466cbeefd694a65b641612
|
[
"MIT"
] | 249
|
2021-06-21T17:43:00.000Z
|
2022-03-29T05:20:07.000Z
|
tests/refresh_token/mutations.py
|
kozickikarol/strawberry-django-jwt
|
9e99a2b61db87a9ec0466cbeefd694a65b641612
|
[
"MIT"
] | 5
|
2021-06-24T18:30:00.000Z
|
2022-03-17T17:23:00.000Z
|
from strawberry_django_jwt.mixins import JSONWebTokenMixin
from strawberry_django_jwt.mixins import RefreshTokenMixin
class Refresh(RefreshTokenMixin, JSONWebTokenMixin):
pass
| 26
| 58
| 0.868132
| 19
| 182
| 8.105263
| 0.578947
| 0.181818
| 0.25974
| 0.298701
| 0.454545
| 0.454545
| 0
| 0
| 0
| 0
| 0
| 0
| 0.098901
| 182
| 6
| 59
| 30.333333
| 0.939024
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.25
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 7
|
2d52fd42a8ff39cb6fc126b9181fef49a98c8561
| 38,459
|
py
|
Python
|
fsmpy/tests/similarities/test_deng_jiang_fu.py
|
GCidd/fsmpy
|
131e81925481b3fe608f2c1945bbb00a8b674e72
|
[
"BSD-3-Clause"
] | 1
|
2022-01-31T07:01:59.000Z
|
2022-01-31T07:01:59.000Z
|
fsmpy/tests/similarities/test_deng_jiang_fu.py
|
GCidd/fsmpy
|
131e81925481b3fe608f2c1945bbb00a8b674e72
|
[
"BSD-3-Clause"
] | null | null | null |
fsmpy/tests/similarities/test_deng_jiang_fu.py
|
GCidd/fsmpy
|
131e81925481b3fe608f2c1945bbb00a8b674e72
|
[
"BSD-3-Clause"
] | null | null | null |
from numpy.testing import assert_almost_equal
from fsmpy.sets import IntuitionisticFuzzySet
from fsmpy.datasets import load_patients_diagnoses
from fsmpy.similarities import deng_jiang_fu
from fsmpy import DENG_JIANG_FU_MONOTONIC_TYPE_1_1, DENG_JIANG_FU_MONOTONIC_TYPE_1_2, \
DENG_JIANG_FU_MONOTONIC_TYPE_1_3, DENG_JIANG_FU_MONOTONIC_TYPE_1_4, DENG_JIANG_FU_MONOTONIC_TYPE_2_1, \
DENG_JIANG_FU_MONOTONIC_TYPE_2_2, DENG_JIANG_FU_MONOTONIC_TYPE_2_3, DENG_JIANG_FU_MONOTONIC_TYPE_2_4, \
DENG_JIANG_FU_MONOTONIC_TYPE_3_1, DENG_JIANG_FU_MONOTONIC_TYPE_3_2, DENG_JIANG_FU_MONOTONIC_TYPE_3_3
def test_deng_jiang_fu_1_1():
A1 = IntuitionisticFuzzySet([1.0, 0.8, 0.7], [0.0, 0.0, 0.1])
A2 = IntuitionisticFuzzySet([0.8, 1.0, 0.9], [0.1, 0.0, 0.0])
A3 = IntuitionisticFuzzySet([0.6, 0.8, 1.0], [0.2, 0.0, 0.0])
B = IntuitionisticFuzzySet([0.5, 0.6, 0.8], [0.3, 0.2, 0.1])
# Example 2
assert_almost_equal(deng_jiang_fu(A1, B, DENG_JIANG_FU_MONOTONIC_TYPE_1_1), 0.489, decimal=3)
assert_almost_equal(deng_jiang_fu(A2, B, DENG_JIANG_FU_MONOTONIC_TYPE_1_1), 0.458, decimal=3)
assert_almost_equal(deng_jiang_fu(A3, B, DENG_JIANG_FU_MONOTONIC_TYPE_1_1), 0.546, decimal=3)
# Example 3
diagnoses, patients = load_patients_diagnoses()
viral_fever, malaria, typhoid, stomach_problem, chest_problem = diagnoses
al, bob, joe, ted = patients
assert_almost_equal(deng_jiang_fu(al, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_1, ), 0.467,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_1, ), 0.517,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_1, ), 0.544,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_1, ), 0.216,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_1, ), 0.26,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_1, ), 0.348,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_1, ), 0.3, decimal=3)
assert_almost_equal(deng_jiang_fu(bob, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_1, ), 0.415,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_1, ), 0.641,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_1, ), 0.371,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_1, ), 0.363,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_1, ), 0.344,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_1, ), 0.498,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_1, ), 0.32,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_1, ), 0.277,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_1, ), 0.198,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_1, ), 0.264,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_1, ), 0.318,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_1, ), 0.421,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_1, ), 0.407,
decimal=3)
def test_deng_jiang_fu_1_2():
A1 = IntuitionisticFuzzySet([1.0, 0.8, 0.7], [0.0, 0.0, 0.1])
A2 = IntuitionisticFuzzySet([0.8, 1.0, 0.9], [0.1, 0.0, 0.0])
A3 = IntuitionisticFuzzySet([0.6, 0.8, 1.0], [0.2, 0.0, 0.0])
B = IntuitionisticFuzzySet([0.5, 0.6, 0.8], [0.3, 0.2, 0.1])
assert_almost_equal(deng_jiang_fu(A1, B, DENG_JIANG_FU_MONOTONIC_TYPE_1_2), 0.454, decimal=3)
assert_almost_equal(deng_jiang_fu(A2, B, DENG_JIANG_FU_MONOTONIC_TYPE_1_2), 0.444, decimal=3)
assert_almost_equal(deng_jiang_fu(A3, B, DENG_JIANG_FU_MONOTONIC_TYPE_1_2), 0.541, decimal=3)
# Example 3
diagnoses, patients = load_patients_diagnoses()
viral_fever, malaria, typhoid, stomach_problem, chest_problem = diagnoses
al, bob, joe, ted = patients
assert_almost_equal(deng_jiang_fu(al, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_2, ), 0.437,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_2, ), 0.489,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_2, ), 0.474,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_2, ), 0.186,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_2, ), 0.184,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_2, ), 0.28,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_2, ), 0.21,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_2, ), 0.366,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_2, ), 0.635,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_2, ), 0.309,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_2, ), 0.348,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_2, ), 0.308,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_2, ), 0.241,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_2, ), 0.214,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_2, ), 0.47,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_2, ), 0.189,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_2, ), 0.243,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_2, ), 0.31,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_2, ), 0.401,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_2, ), 0.403,
decimal=3)
def test_deng_jiang_fu_1_3():
A1 = IntuitionisticFuzzySet([1.0, 0.8, 0.7], [0.0, 0.0, 0.1])
A2 = IntuitionisticFuzzySet([0.8, 1.0, 0.9], [0.1, 0.0, 0.0])
A3 = IntuitionisticFuzzySet([0.6, 0.8, 1.0], [0.2, 0.0, 0.0])
B = IntuitionisticFuzzySet([0.5, 0.6, 0.8], [0.3, 0.2, 0.1])
assert_almost_equal(deng_jiang_fu(A1, B, DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1), 0.625, decimal=3)
assert_almost_equal(deng_jiang_fu(A2, B, DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1), 0.615, decimal=3)
assert_almost_equal(deng_jiang_fu(A3, B, DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1), 0.702, decimal=3)
# Example 3
diagnoses, patients = load_patients_diagnoses()
viral_fever, malaria, typhoid, stomach_problem, chest_problem = diagnoses
al, bob, joe, ted = patients
assert_almost_equal(deng_jiang_fu(al, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1), 0.608,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1), 0.657,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1), 0.643,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1),
0.313, decimal=3)
assert_almost_equal(deng_jiang_fu(al, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1), 0.311,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1), 0.437,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1), 0.348,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1), 0.536,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1),
0.777, decimal=3)
assert_almost_equal(deng_jiang_fu(joe, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1), 0.516,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1), 0.472,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1), 0.471,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1), 0.639,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1),
0.388, decimal=3)
assert_almost_equal(deng_jiang_fu(joe, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1), 0.353,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1), 0.574,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1), 0.572,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1), 0.474,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1),
0.391, decimal=3)
assert_almost_equal(deng_jiang_fu(ted, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1), 0.319,
decimal=3)
def test_deng_jiang_fu_2_1():
A1 = IntuitionisticFuzzySet([1.0, 0.8, 0.7], [0.0, 0.0, 0.1])
A2 = IntuitionisticFuzzySet([0.8, 1.0, 0.9], [0.1, 0.0, 0.0])
A3 = IntuitionisticFuzzySet([0.6, 0.8, 1.0], [0.2, 0.0, 0.0])
B = IntuitionisticFuzzySet([0.5, 0.6, 0.8], [0.3, 0.2, 0.1])
assert_almost_equal(deng_jiang_fu(A1, B, DENG_JIANG_FU_MONOTONIC_TYPE_2_1), 0.681, decimal=3)
assert_almost_equal(deng_jiang_fu(A2, B, DENG_JIANG_FU_MONOTONIC_TYPE_2_1), 0.668, decimal=3)
assert_almost_equal(deng_jiang_fu(A3, B, DENG_JIANG_FU_MONOTONIC_TYPE_2_1), 0.745, decimal=3)
diagnoses, patients = load_patients_diagnoses()
viral_fever, malaria, typhoid, stomach_problem, chest_problem = diagnoses
al, bob, joe, ted = patients
assert_almost_equal(deng_jiang_fu(al, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_1, ), 0.698,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_1, ), 0.709,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_1, ), 0.698,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_1, ), 0.393,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_1, ), 0.375,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_1, ), 0.518,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_1, ), 0.419,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_1, ), 0.594,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_1, ), 0.826,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_1, ), 0.509,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_1, ), 0.618,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_1, ), 0.533,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_1, ), 0.712,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_1, ), 0.512,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_1, ), 0.449,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_1, ), 0.672,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_1, ), 0.624,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_1, ), 0.541,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_1, ), 0.481,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_1, ), 0.376,
decimal=3)
def test_deng_jiang_fu_2_2():
A1 = IntuitionisticFuzzySet([1.0, 0.8, 0.7], [0.0, 0.0, 0.1])
A2 = IntuitionisticFuzzySet([0.8, 1.0, 0.9], [0.1, 0.0, 0.0])
A3 = IntuitionisticFuzzySet([0.6, 0.8, 1.0], [0.2, 0.0, 0.0])
B = IntuitionisticFuzzySet([0.5, 0.6, 0.8], [0.3, 0.2, 0.1])
assert_almost_equal(deng_jiang_fu(A1, B, DENG_JIANG_FU_MONOTONIC_TYPE_2_2), 0.658, decimal=3)
assert_almost_equal(deng_jiang_fu(A2, B, DENG_JIANG_FU_MONOTONIC_TYPE_2_2), 0.658, decimal=3)
assert_almost_equal(deng_jiang_fu(A3, B, DENG_JIANG_FU_MONOTONIC_TYPE_2_2), 0.743, decimal=3)
diagnoses, patients = load_patients_diagnoses()
viral_fever, malaria, typhoid, stomach_problem, chest_problem = diagnoses
al, bob, joe, ted = patients
assert_almost_equal(deng_jiang_fu(al, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_2, ), 0.683,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_2, ), 0.69, decimal=3)
assert_almost_equal(deng_jiang_fu(al, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_2, ), 0.661,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_2, ), 0.361,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_2, ), 0.324,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_2, ), 0.476,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_2, ), 0.352,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_2, ), 0.567,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_2, ), 0.825,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_2, ), 0.463,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_2, ), 0.603,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_2, ), 0.492,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_2, ), 0.7, decimal=3)
assert_almost_equal(deng_jiang_fu(joe, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_2, ), 0.452,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_2, ), 0.387,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_2, ), 0.672,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_2, ), 0.61,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_2, ), 0.532,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_2, ), 0.464,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_2, ), 0.366,
decimal=3)
def test_deng_jiang_fu_2_3():
A1 = IntuitionisticFuzzySet([1.0, 0.8, 0.7], [0.0, 0.0, 0.1])
A2 = IntuitionisticFuzzySet([0.8, 1.0, 0.9], [0.1, 0.0, 0.0])
A3 = IntuitionisticFuzzySet([0.6, 0.8, 1.0], [0.2, 0.0, 0.0])
B = IntuitionisticFuzzySet([0.5, 0.6, 0.8], [0.3, 0.2, 0.1])
assert_almost_equal(deng_jiang_fu(A1, B, DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1), 0.783, decimal=3)
assert_almost_equal(deng_jiang_fu(A2, B, DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1), 0.783, decimal=3)
assert_almost_equal(deng_jiang_fu(A3, B, DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1), 0.850, decimal=3)
diagnoses, patients = load_patients_diagnoses()
viral_fever, malaria, typhoid, stomach_problem, chest_problem = diagnoses
al, bob, joe, ted = patients
assert_almost_equal(deng_jiang_fu(al, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1), 0.81,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1), 0.82,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1), 0.8,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1), 0.54,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1), 0.5,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1), 0.67,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1), 0.54,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1), 0.74,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1), 0.9,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1), 0.64,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1), 0.75,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1), 0.68,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1), 0.82,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1), 0.6,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1), 0.54,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1), 0.8,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1), 0.77,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1), 0.71,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1),
0.63, decimal=3)
assert_almost_equal(deng_jiang_fu(ted, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1), 0.55,
decimal=3)
def test_deng_jiang_fu_2_4():
A1 = IntuitionisticFuzzySet([1.0, 0.8, 0.7], [0.0, 0.0, 0.1])
A2 = IntuitionisticFuzzySet([0.8, 1.0, 0.9], [0.1, 0.0, 0.0])
A3 = IntuitionisticFuzzySet([0.6, 0.8, 1.0], [0.2, 0.0, 0.0])
B = IntuitionisticFuzzySet([0.5, 0.6, 0.8], [0.3, 0.2, 0.1])
assert_almost_equal(deng_jiang_fu(A1, B, DENG_JIANG_FU_MONOTONIC_TYPE_2_4), 0.644, decimal=3)
assert_almost_equal(deng_jiang_fu(A2, B, DENG_JIANG_FU_MONOTONIC_TYPE_2_4), 0.644, decimal=3)
assert_almost_equal(deng_jiang_fu(A3, B, DENG_JIANG_FU_MONOTONIC_TYPE_2_4), 0.739, decimal=3)
diagnoses, patients = load_patients_diagnoses()
viral_fever, malaria, typhoid, stomach_problem, chest_problem = diagnoses
al, bob, joe, ted = patients
assert_almost_equal(deng_jiang_fu(al, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_4, ), 0.681,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_4, ), 0.695,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_4, ), 0.667,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_4, ), 0.37,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_4, ), 0.333,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_4, ), 0.504,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_4, ), 0.37,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_4, ), 0.587,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_4, ), 0.818,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_4, ), 0.471,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_4, ), 0.6,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_4, ), 0.515,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_4, ), 0.695,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_4, ), 0.429,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_4, ), 0.37,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_4, ), 0.667,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_4, ), 0.626,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_4, ), 0.55,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_4, ), 0.46,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_4, ), 0.379,
decimal=3)
def test_deng_jiang_fu_3_1():
A1 = IntuitionisticFuzzySet([1.0, 0.8, 0.7], [0.0, 0.0, 0.1])
A2 = IntuitionisticFuzzySet([0.8, 1.0, 0.9], [0.1, 0.0, 0.0])
A3 = IntuitionisticFuzzySet([0.6, 0.8, 1.0], [0.2, 0.0, 0.0])
B = IntuitionisticFuzzySet([0.5, 0.6, 0.8], [0.3, 0.2, 0.1])
assert_almost_equal(deng_jiang_fu(A1, B, DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1), 0.593, decimal=3) # fails
assert_almost_equal(deng_jiang_fu(A2, B, DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1), 0.593, decimal=3) # fails
assert_almost_equal(deng_jiang_fu(A3, B, DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1), 0.700, decimal=3) # fails
diagnoses, patients = load_patients_diagnoses()
viral_fever, malaria, typhoid, stomach_problem, chest_problem = diagnoses
al, bob, joe, ted = patients
assert_almost_equal(deng_jiang_fu(al, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1), 0.634, decimal=3) # fails
assert_almost_equal(deng_jiang_fu(al, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1), 0.65, decimal=3) # fails
assert_almost_equal(deng_jiang_fu(al, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1), 0.619, decimal=3) # fails
assert_almost_equal(deng_jiang_fu(al, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1), 0.304, decimal=3) # fails
assert_almost_equal(deng_jiang_fu(al, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1), 0.269, decimal=3) # fails
assert_almost_equal(deng_jiang_fu(bob, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1), 0.441, decimal=3) # fails
assert_almost_equal(deng_jiang_fu(bob, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1), 0.304, decimal=3) # fails
assert_almost_equal(deng_jiang_fu(bob, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1), 0.531, decimal=3) # fails
assert_almost_equal(deng_jiang_fu(bob, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1), 0.79, decimal=3) # fails
assert_almost_equal(deng_jiang_fu(bob, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1), 0.406, decimal=3) # fails
assert_almost_equal(deng_jiang_fu(joe, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1), 0.545, decimal=3) # fails
assert_almost_equal(deng_jiang_fu(joe, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1), 0.453, decimal=3) # fails
assert_almost_equal(deng_jiang_fu(joe, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1), 0.65, decimal=3) # fails
assert_almost_equal(deng_jiang_fu(joe, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1), 0.363, decimal=3) # fails
assert_almost_equal(deng_jiang_fu(joe, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1), 0.304, decimal=3) # fails
assert_almost_equal(deng_jiang_fu(ted, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1), 0.619, decimal=3) # fails
assert_almost_equal(deng_jiang_fu(ted, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1), 0.574, decimal=3) # fails
assert_almost_equal(deng_jiang_fu(ted, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1), 0.491, decimal=3) # fails
assert_almost_equal(deng_jiang_fu(ted, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1), 0.395, decimal=3) # fails
assert_almost_equal(deng_jiang_fu(ted, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1), 0.314, decimal=3) # fails
def test_deng_jiang_fu_3_2():
A1 = IntuitionisticFuzzySet([1.0, 0.8, 0.7], [0.0, 0.0, 0.1])
A2 = IntuitionisticFuzzySet([0.8, 1.0, 0.9], [0.1, 0.0, 0.0])
A3 = IntuitionisticFuzzySet([0.6, 0.8, 1.0], [0.2, 0.0, 0.0])
B = IntuitionisticFuzzySet([0.5, 0.6, 0.8], [0.3, 0.2, 0.1])
assert_almost_equal(deng_jiang_fu(A1, B, DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5), 0.928, decimal=3)
assert_almost_equal(deng_jiang_fu(A2, B, DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5), 0.941, decimal=3)
assert_almost_equal(deng_jiang_fu(A3, B, DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5), 0.975, decimal=3)
diagnoses, patients = load_patients_diagnoses()
viral_fever, malaria, typhoid, stomach_problem, chest_problem = diagnoses
al, bob, joe, ted = patients
assert_almost_equal(
deng_jiang_fu(al, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5), 0.947,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5),
0.946, decimal=3)
assert_almost_equal(deng_jiang_fu(al, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5),
0.92, decimal=3)
assert_almost_equal(
deng_jiang_fu(al, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5), 0.736,
decimal=3)
assert_almost_equal(
deng_jiang_fu(al, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5), 0.678,
decimal=3)
assert_almost_equal(
deng_jiang_fu(bob, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5), 0.831,
decimal=3)
assert_almost_equal(
deng_jiang_fu(bob, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5), 0.694,
decimal=3)
assert_almost_equal(
deng_jiang_fu(bob, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5), 0.898,
decimal=3)
assert_almost_equal(
deng_jiang_fu(bob, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5), 0.986,
decimal=3)
assert_almost_equal(
deng_jiang_fu(bob, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5), 0.802,
decimal=3)
assert_almost_equal(
deng_jiang_fu(joe, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5), 0.915,
decimal=3)
assert_almost_equal(
deng_jiang_fu(joe, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5), 0.844,
decimal=3)
assert_almost_equal(
deng_jiang_fu(joe, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5), 0.944,
decimal=3)
assert_almost_equal(
deng_jiang_fu(joe, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5), 0.762,
decimal=3)
assert_almost_equal(
deng_jiang_fu(joe, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5), 0.7,
decimal=3)
assert_almost_equal(
deng_jiang_fu(ted, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5), 0.954,
decimal=3)
assert_almost_equal(
deng_jiang_fu(ted, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5), 0.927,
decimal=3)
assert_almost_equal(
deng_jiang_fu(ted, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5), 0.897,
decimal=3)
assert_almost_equal(
deng_jiang_fu(ted, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5), 0.829,
decimal=3)
assert_almost_equal(
deng_jiang_fu(ted, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5), 0.773,
decimal=3)
def test_deng_jiang_fu_3_3():
A1 = IntuitionisticFuzzySet([1.0, 0.8, 0.7], [0.0, 0.0, 0.1])
A2 = IntuitionisticFuzzySet([0.8, 1.0, 0.9], [0.1, 0.0, 0.0])
A3 = IntuitionisticFuzzySet([0.6, 0.8, 1.0], [0.2, 0.0, 0.0])
B = IntuitionisticFuzzySet([0.5, 0.6, 0.8], [0.3, 0.2, 0.1])
assert_almost_equal(deng_jiang_fu(A1, B, DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1), 0.667, decimal=3)
assert_almost_equal(deng_jiang_fu(A2, B, DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1), 0.667, decimal=3)
assert_almost_equal(deng_jiang_fu(A3, B, DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1), 0.766, decimal=3)
diagnoses, patients = load_patients_diagnoses()
viral_fever, malaria, typhoid, stomach_problem, chest_problem = diagnoses
al, bob, joe, ted = patients
assert_almost_equal(deng_jiang_fu(al, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1), 0.706,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1), 0.721,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1), 0.691,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1),
0.339, decimal=3)
assert_almost_equal(deng_jiang_fu(al, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1), 0.293,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1), 0.508,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1), 0.34,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1), 0.605,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1),
0.844, decimal=3)
assert_almost_equal(deng_jiang_fu(bob, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1), 0.464,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1), 0.617,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1), 0.52,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1), 0.721,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1),
0.415, decimal=3)
assert_almost_equal(deng_jiang_fu(joe, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1), 0.34,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1), 0.691,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1), 0.648,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1), 0.561,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1),
0.451, decimal=3)
assert_almost_equal(deng_jiang_fu(ted, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1), 0.351,
decimal=3)
| 67.709507
| 141
| 0.705349
| 6,215
| 38,459
| 3.915527
| 0.038294
| 0.178262
| 0.217875
| 0.198069
| 0.972427
| 0.971933
| 0.969838
| 0.958373
| 0.949497
| 0.948675
| 0
| 0.07576
| 0.193453
| 38,459
| 567
| 142
| 67.828924
| 0.708759
| 0.004602
| 0
| 0.495127
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.450292
| 1
| 0.019493
| false
| 0
| 0.009747
| 0
| 0.02924
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
93597dbc14ca397692c49227fc452e9e651f3cf3
| 147
|
py
|
Python
|
diazotheme/bootstrap/browser/interfaces.py
|
CMcStone/diazotheme.bootstrap
|
156b0f802400e99ab0aa7d1912b9cc093a4c4a96
|
[
"Apache-2.0"
] | null | null | null |
diazotheme/bootstrap/browser/interfaces.py
|
CMcStone/diazotheme.bootstrap
|
156b0f802400e99ab0aa7d1912b9cc093a4c4a96
|
[
"Apache-2.0"
] | null | null | null |
diazotheme/bootstrap/browser/interfaces.py
|
CMcStone/diazotheme.bootstrap
|
156b0f802400e99ab0aa7d1912b9cc093a4c4a96
|
[
"Apache-2.0"
] | null | null | null |
class ITopBanner(Interface):
"""marker interface for Front Page"""
class IThemeSpecific(Interface):
"""marker interface for theme layer"""
| 29.4
| 42
| 0.727891
| 16
| 147
| 6.6875
| 0.625
| 0.280374
| 0.448598
| 0.504673
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.14966
| 147
| 5
| 42
| 29.4
| 0.856
| 0.435374
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
faa34a1338bab490be1426b717eef68d8e24bc44
| 163
|
py
|
Python
|
muDIC/IO/__init__.py
|
diehlpk/muDIC
|
b5d90aa62267b4bd0b88ae0a989cf09a51990654
|
[
"MIT"
] | 70
|
2019-04-15T08:08:23.000Z
|
2022-03-23T08:24:25.000Z
|
muDIC/IO/__init__.py
|
diehlpk/muDIC
|
b5d90aa62267b4bd0b88ae0a989cf09a51990654
|
[
"MIT"
] | 34
|
2019-05-03T18:09:43.000Z
|
2022-02-10T11:36:29.000Z
|
muDIC/IO/__init__.py
|
diehlpk/muDIC
|
b5d90aa62267b4bd0b88ae0a989cf09a51990654
|
[
"MIT"
] | 37
|
2019-04-25T15:39:23.000Z
|
2022-03-28T21:40:24.000Z
|
from __future__ import absolute_import
from .image_stack import image_stack_from_folder, ImageStack, image_stack_from_list
from .readWriteUtils import save, load
| 32.6
| 83
| 0.871166
| 23
| 163
| 5.652174
| 0.521739
| 0.230769
| 0.215385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09816
| 163
| 4
| 84
| 40.75
| 0.884354
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
faadb20149467d67ebd525dbc83feae5cd6ef545
| 104
|
py
|
Python
|
agnes/algos/__init__.py
|
rotinov/CITUS
|
3d58794cfd5abf0b4f8b8eeb420af161c58de44e
|
[
"MIT"
] | 24
|
2019-09-26T09:53:56.000Z
|
2021-11-04T02:31:41.000Z
|
agnes/algos/__init__.py
|
rotinov/CITUS
|
3d58794cfd5abf0b4f8b8eeb420af161c58de44e
|
[
"MIT"
] | 2
|
2019-09-23T07:24:01.000Z
|
2019-09-23T18:24:05.000Z
|
agnes/algos/__init__.py
|
rotinov/AGNES
|
3d58794cfd5abf0b4f8b8eeb420af161c58de44e
|
[
"MIT"
] | null | null | null |
from agnes.algos.a2c import A2C
from agnes.algos.ppo import PPO
from agnes.algos.ppo_rnd import PPORND
| 20.8
| 38
| 0.817308
| 19
| 104
| 4.421053
| 0.421053
| 0.321429
| 0.5
| 0.404762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021978
| 0.125
| 104
| 4
| 39
| 26
| 0.901099
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
87abcedc34d1433fbf100e35f0170c56263ad892
| 161
|
py
|
Python
|
rhobot/components/storage/payload/__init__.py
|
rerobins/rhobot_framework
|
f97d1cedc929387f69448e41346a0d15fe202eef
|
[
"BSD-3-Clause"
] | null | null | null |
rhobot/components/storage/payload/__init__.py
|
rerobins/rhobot_framework
|
f97d1cedc929387f69448e41346a0d15fe202eef
|
[
"BSD-3-Clause"
] | null | null | null |
rhobot/components/storage/payload/__init__.py
|
rerobins/rhobot_framework
|
f97d1cedc929387f69448e41346a0d15fe202eef
|
[
"BSD-3-Clause"
] | null | null | null |
from rhobot.components.storage.payload.storage import StoragePayload
from rhobot.components.storage.payload.result import ResultPayload, ResultCollectionPayload
| 53.666667
| 91
| 0.888199
| 17
| 161
| 8.411765
| 0.588235
| 0.13986
| 0.27972
| 0.377622
| 0.475524
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.055901
| 161
| 2
| 92
| 80.5
| 0.940789
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
87f5683797a1fcdc0520cb9c858a21f68ed8a89a
| 76
|
py
|
Python
|
isle/_config.py
|
dmkskn/isle
|
81397e6e8c75543f9fd2efd2c34928077542da2a
|
[
"MIT"
] | null | null | null |
isle/_config.py
|
dmkskn/isle
|
81397e6e8c75543f9fd2efd2c34928077542da2a
|
[
"MIT"
] | null | null | null |
isle/_config.py
|
dmkskn/isle
|
81397e6e8c75543f9fd2efd2c34928077542da2a
|
[
"MIT"
] | null | null | null |
def tmdb_api_key():
from . import TMDB_API_KEY
return TMDB_API_KEY
| 15.2
| 30
| 0.723684
| 13
| 76
| 3.769231
| 0.538462
| 0.428571
| 0.612245
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.223684
| 76
| 4
| 31
| 19
| 0.830508
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
35644935240481a217866ca9f60abae64c26357f
| 16,546
|
py
|
Python
|
src/openprocurement/tender/openua/tests/post_blanks.py
|
pontostroy/openprocurement.api
|
6651ef29413d155c83f893ee64a611cf75f4daaf
|
[
"Apache-2.0"
] | null | null | null |
src/openprocurement/tender/openua/tests/post_blanks.py
|
pontostroy/openprocurement.api
|
6651ef29413d155c83f893ee64a611cf75f4daaf
|
[
"Apache-2.0"
] | null | null | null |
src/openprocurement/tender/openua/tests/post_blanks.py
|
pontostroy/openprocurement.api
|
6651ef29413d155c83f893ee64a611cf75f4daaf
|
[
"Apache-2.0"
] | null | null | null |
from datetime import timedelta
import mock
from openprocurement.api.utils import get_now
from openprocurement.tender.core.tests.base import change_auth
RELEASE_2020_04_19_TEST_ENABLED = get_now() - timedelta(days=1)
RELEASE_2020_04_19_TEST_DISABLED = get_now() + timedelta(days=1)
@mock.patch("openprocurement.tender.openua.validation.RELEASE_2020_04_19", RELEASE_2020_04_19_TEST_DISABLED)
def create_complaint_post_release_forbidden(self):
# try in draft
with change_auth(self.app, ("Basic", ("reviewer", ""))):
response = self.post_post({
"title": "Lorem ipsum",
"description": "Lorem ipsum dolor sit amet",
"recipient": "complaint_owner",
}, status=403)
self.assertEqual(response.status, "403 Forbidden")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(
response.json["errors"][0]["description"],
"Forbidden"
)
@mock.patch("openprocurement.tender.openua.validation.RELEASE_2020_04_19", RELEASE_2020_04_19_TEST_ENABLED)
def create_complaint_post_status_forbidden(self):
# try in draft
with change_auth(self.app, ("Basic", ("reviewer", ""))):
response = self.post_post({
"title": "Lorem ipsum",
"description": "Lorem ipsum dolor sit amet",
"recipient": "complaint_owner",
}, status=403)
self.assertEqual(response.status, "403 Forbidden")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(
response.json["errors"][0]["description"],
"Can't add post in current (draft) complaint status"
)
@mock.patch("openprocurement.tender.openua.validation.RELEASE_2020_04_19", RELEASE_2020_04_19_TEST_ENABLED)
def create_complaint_post_claim_forbidden(self):
# make complaint type claim
response = self.post_claim()
self.assertEqual(response.status, "201 Created")
self.assertEqual(response.json["data"]["status"], "claim")
# try in claim
with change_auth(self.app, ("Basic", ("reviewer", ""))):
response = self.post_post({
"title": "Lorem ipsum",
"description": "Lorem ipsum dolor sit amet",
"recipient": "complaint_owner",
}, status=403)
self.assertEqual(response.status, "403 Forbidden")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(
response.json["errors"][0]["description"],
"Can't add post in current (claim) complaint status"
)
@mock.patch("openprocurement.tender.openua.validation.RELEASE_2020_04_19", RELEASE_2020_04_19_TEST_ENABLED)
def create_complaint_post_complaint_owner(self):
# make complaint type complaint
response = self.patch_complaint({"type": "complaint", "status": "pending"}, self.complaint_owner_token)
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.json["data"]["status"], "pending")
# create post by reviewer
with change_auth(self.app, ("Basic", ("reviewer", ""))):
response = self.post_post({
"title": "Lorem ipsum",
"description": "Lorem ipsum dolor sit amet",
"recipient": "complaint_owner",
"documents": [{
"title": "lorem.doc",
"url": self.generate_docservice_url(),
"hash": "md5:" + "0" * 32,
"format": "application/msword",
}],
})
self.assertEqual(response.status, "201 Created")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["data"]["author"], "aboveThresholdReviewers")
post = response.json["data"]
# create answer by complaint owner
response = self.post_post({
"title": "Lorem ipsum",
"description": "Lorem ipsum dolor sit amet",
"recipient": "aboveThresholdReviewers",
"relatedPost": post["id"],
}, acc_token=self.complaint_owner_token)
self.assertEqual(response.status, "201 Created")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["data"]["author"], "complaint_owner")
@mock.patch("openprocurement.tender.openua.validation.RELEASE_2020_04_19", RELEASE_2020_04_19_TEST_ENABLED)
def create_complaint_post_tender_owner(self):
# make complaint type complaint
response = self.patch_complaint({"type": "complaint", "status": "pending"}, self.complaint_owner_token)
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.json["data"]["status"], "pending")
# create post by reviewer
with change_auth(self.app, ("Basic", ("reviewer", ""))):
response = self.post_post({
"title": "Lorem ipsum",
"description": "Lorem ipsum dolor sit amet",
"recipient": "tender_owner",
"documents": [{
"title": "lorem.doc",
"url": self.generate_docservice_url(),
"hash": "md5:" + "0" * 32,
"format": "application/msword",
}],
})
self.assertEqual(response.status, "201 Created")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["data"]["author"], "aboveThresholdReviewers")
post = response.json["data"]
# create answer by complaint owner
response = self.post_post({
"title": "Lorem ipsum",
"description": "Lorem ipsum dolor sit amet",
"recipient": "aboveThresholdReviewers",
"relatedPost": post["id"],
}, acc_token=self.tender_token)
self.assertEqual(response.status, "201 Created")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["data"]["author"], "tender_owner")
@mock.patch("openprocurement.tender.openua.validation.RELEASE_2020_04_19", RELEASE_2020_04_19_TEST_ENABLED)
def create_complaint_post_validate_recipient(self):
# make complaint type complaint
response = self.patch_complaint({"type": "complaint", "status": "pending"}, self.complaint_owner_token)
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.json["data"]["status"], "pending")
# create post by reviewer with invalid recipient
with change_auth(self.app, ("Basic", ("reviewer", ""))):
response = self.post_post({
"title": "Lorem ipsum",
"description": "Lorem ipsum dolor sit amet",
"recipient": "aboveThresholdReviewers",
}, status=422)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertIn("Value must be one of ['complaint_owner', 'tender_owner'].", str(response.json["errors"]))
# create post by reviewer
with change_auth(self.app, ("Basic", ("reviewer", ""))):
response = self.post_post({
"title": "Lorem ipsum",
"description": "Lorem ipsum dolor sit amet",
"recipient": "complaint_owner",
})
self.assertEqual(response.status, "201 Created")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["data"]["author"], "aboveThresholdReviewers")
post = response.json["data"]
# create answer by complaint owner invalid recipient
response = self.post_post({
"title": "Lorem ipsum",
"description": "Lorem ipsum dolor sit amet",
"recipient": "complaint_owner",
"relatedPost": post["id"]
}, acc_token=self.complaint_owner_token, status=422)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertIn("Value must be one of ['aboveThresholdReviewers'].", str(response.json["errors"]))
@mock.patch("openprocurement.tender.openua.validation.RELEASE_2020_04_19", RELEASE_2020_04_19_TEST_ENABLED)
def create_complaint_post_validate_related_post(self):
# make complaint type complaint
response = self.patch_complaint({"type": "complaint", "status": "pending"}, self.complaint_owner_token)
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.json["data"]["status"], "pending")
# create post by reviewer
with change_auth(self.app, ("Basic", ("reviewer", ""))):
response = self.post_post({
"title": "Lorem ipsum",
"description": "Lorem ipsum dolor sit amet",
"recipient": "tender_owner",
})
self.assertEqual(response.status, "201 Created")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["data"]["author"], "aboveThresholdReviewers")
post = response.json["data"]
# create answer by complaint owner invalid recipient
response = self.post_post({
"title": "Lorem ipsum",
"description": "Lorem ipsum dolor sit amet",
"recipient": "aboveThresholdReviewers",
"relatedPost": post["id"]
}, acc_token=self.complaint_owner_token, status=422)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertIn("relatedPost invalid recipient.", str(response.json["errors"]))
# create answer by complaint owner invalid recipient
with change_auth(self.app, ("Basic", ("reviewer", ""))):
response = self.post_post({
"title": "Lorem ipsum",
"description": "Lorem ipsum dolor sit amet",
"recipient": "aboveThresholdReviewers",
"relatedPost": post["id"]
}, acc_token=self.complaint_owner_token, status=422)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertIn("relatedPost can't have the same author.", str(response.json["errors"]))
# create answer by complaint owner invalid recipient
response = self.post_post({
"title": "Lorem ipsum",
"description": "Lorem ipsum dolor sit amet",
"recipient": "aboveThresholdReviewers",
"relatedPost": "some_id"
}, acc_token=self.complaint_owner_token, status=422)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertIn("relatedPost should be one of posts.", str(response.json["errors"]))
# create answer by tender owner without related post
response = self.post_post({
"title": "Lorem ipsum",
"description": "Lorem ipsum dolor sit amet",
"recipient": "aboveThresholdReviewers",
}, acc_token=self.tender_token, status=422)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertIn("This field is required.", str(response.json["errors"]))
# create answer by tender owner
response = self.post_post({
"title": "Lorem ipsum",
"description": "Lorem ipsum dolor sit amet",
"recipient": "aboveThresholdReviewers",
"relatedPost": post["id"],
}, acc_token=self.tender_token)
self.assertEqual(response.status, "201 Created")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["data"]["author"], "tender_owner")
# create answer by tender owner invalid recipient
response = self.post_post({
"title": "Lorem ipsum",
"description": "Lorem ipsum dolor sit amet",
"recipient": "aboveThresholdReviewers",
"relatedPost": post["id"],
}, acc_token=self.tender_token, status=422)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertIn("relatedPost must be unique.", str(response.json["errors"]))
@mock.patch("openprocurement.tender.openua.validation.RELEASE_2020_04_19", RELEASE_2020_04_19_TEST_ENABLED)
def patch_complaint_post(self):
# make complaint type complaint
response = self.patch_complaint({"type": "complaint", "status": "pending"}, self.complaint_owner_token)
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.json["data"]["status"], "pending")
# create post by reviewer
with change_auth(self.app, ("Basic", ("reviewer", ""))):
response = self.post_post({
"title": "Lorem ipsum",
"description": "Lorem ipsum dolor sit amet",
"recipient": "complaint_owner",
})
self.assertEqual(response.status, "201 Created")
self.assertEqual(response.content_type, "application/json")
post = response.json["data"]
self.post_id = post["id"]
# try patch post by reviewer
with change_auth(self.app, ("Basic", ("reviewer", ""))):
response = self.patch_post({
"title": "Test"
}, status=405)
self.assertEqual(response.status, "405 Method Not Allowed")
self.assertEqual(response.content_type, "text/plain")
@mock.patch("openprocurement.tender.openua.validation.RELEASE_2020_04_19", RELEASE_2020_04_19_TEST_ENABLED)
def get_complaint_post(self):
# make complaint type complaint
response = self.patch_complaint({"type": "complaint", "status": "pending"}, self.complaint_owner_token)
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.json["data"]["status"], "pending")
# create post by reviewer
with change_auth(self.app, ("Basic", ("reviewer", ""))):
response = self.post_post({
"title": "Lorem ipsum",
"description": "Lorem ipsum dolor sit amet",
"recipient": "complaint_owner",
"documents": [{
"title": "lorem.doc",
"url": self.generate_docservice_url(),
"hash": "md5:" + "0" * 32,
"format": "application/msword",
}],
})
self.assertEqual(response.status, "201 Created")
self.assertEqual(response.content_type, "application/json")
post = response.json["data"]
self.post_id = post["id"]
response = self.get_post()
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(
set(response.json["data"]),
set(["id", "title", "description", "author", "recipient", "datePublished", "documents"])
)
self.post_id = "some_id"
response = self.get_post(status=404)
self.assertEqual(response.status, "404 Not Found")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[{
"description": "Not Found",
"location": "url",
"name": "post_id"
}]
)
@mock.patch("openprocurement.tender.openua.validation.RELEASE_2020_04_19", RELEASE_2020_04_19_TEST_ENABLED)
def get_complaint_posts(self):
# make complaint type complaint
response = self.patch_complaint({"type": "complaint", "status": "pending"}, self.complaint_owner_token)
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.json["data"]["status"], "pending")
# create post by reviewer
with change_auth(self.app, ("Basic", ("reviewer", ""))):
response = self.post_post({
"title": "Lorem ipsum",
"description": "Lorem ipsum dolor sit amet",
"recipient": "complaint_owner",
"documents": [{
"title": "lorem.doc",
"url": self.generate_docservice_url(),
"hash": "md5:" + "0" * 32,
"format": "application/msword",
}],
})
self.assertEqual(response.status, "201 Created")
self.assertEqual(response.content_type, "application/json")
post = response.json["data"]
response = self.get_posts()
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(
set(response.json["data"][0]),
set(["id", "title", "description", "author", "recipient", "datePublished", "documents"])
)
| 42.101781
| 108
| 0.659676
| 1,810
| 16,546
| 5.883978
| 0.070718
| 0.119718
| 0.179249
| 0.087136
| 0.935962
| 0.918685
| 0.911643
| 0.911643
| 0.900751
| 0.886761
| 0
| 0.024833
| 0.196845
| 16,546
| 392
| 109
| 42.209184
| 0.776582
| 0.0553
| 0
| 0.815534
| 0
| 0
| 0.306775
| 0.058778
| 0
| 0
| 0
| 0
| 0.297735
| 1
| 0.032362
| false
| 0
| 0.012945
| 0
| 0.045307
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
35c8a8cc37e213870c935fb037ff8af8130fb3c1
| 3,407
|
py
|
Python
|
python/079_Word_Search.py
|
JerryCatLeung/leetcode
|
ac33a66d7754810ca40fc4fd595b633d30d9afc4
|
[
"Apache-2.0"
] | null | null | null |
python/079_Word_Search.py
|
JerryCatLeung/leetcode
|
ac33a66d7754810ca40fc4fd595b633d30d9afc4
|
[
"Apache-2.0"
] | null | null | null |
python/079_Word_Search.py
|
JerryCatLeung/leetcode
|
ac33a66d7754810ca40fc4fd595b633d30d9afc4
|
[
"Apache-2.0"
] | null | null | null |
class Solution(object):
def exist(self, board, word):
"""
:type board: List[List[str]]
:type word: str
:rtype: bool
"""
check_board = [[True] * len(board[0]) for _ in range(len(board))]
for i in range(len(board)):
for j in range(len(board[0])):
if board[i][j] == word[0] and check_board:
check_board[i][j] = False
res = self.check_exist(check_board, board, word, 1, len(word), i, j)
if res:
return True
check_board[i][j] = True
return False
def check_exist(self, check_board, board, word, index, ls, row, col):
if index == ls:
return True
for temp in [(0, 1),(0, -1),(1, 0),(-1, 0)]:
curr_row = row + temp[0]
curr_col = col + temp[1]
if curr_row >= 0 and curr_row < len(board) and curr_col >= 0 and curr_col < len(board[0]):
if check_board[curr_row][curr_col] and board[curr_row][curr_col] == word[index]:
check_board[curr_row][curr_col] = False
res = self.check_exist(check_board, board, word, index + 1, len(word), curr_row, curr_col)
if res:
return res
check_board[curr_row][curr_col] = True
return False
if __name__ == "__main__":
s = Solution()
print s.exist(["aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","aaaaaaaaaaaaaaaaaaaaaaaaaaaaab"], "baaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
| 87.358974
| 1,914
| 0.744056
| 227
| 3,407
| 11
| 0.180617
| 0.672807
| 0.973168
| 1.249499
| 0.443732
| 0.410092
| 0.381258
| 0.381258
| 0.381258
| 0.348418
| 0
| 0.006399
| 0.174347
| 3,407
| 39
| 1,914
| 87.358974
| 0.881266
| 0
| 0
| 0.206897
| 0
| 0
| 0.552398
| 0.549954
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.034483
| 0
| 0
| 1
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
35cf49f3bbec9b0bbae3bd259f933b4da1062f13
| 11,426
|
py
|
Python
|
lenets.py
|
Chunhai-Yu/CarND-Traffic-Sign-Classifier
|
db297eb5f7d7036d3078901dd9e7218afd80c0d9
|
[
"MIT"
] | 1
|
2021-03-24T12:53:49.000Z
|
2021-03-24T12:53:49.000Z
|
lenets.py
|
Chunhai-Yu/CarND-Traffic-Sign-Classifier
|
db297eb5f7d7036d3078901dd9e7218afd80c0d9
|
[
"MIT"
] | null | null | null |
lenets.py
|
Chunhai-Yu/CarND-Traffic-Sign-Classifier
|
db297eb5f7d7036d3078901dd9e7218afd80c0d9
|
[
"MIT"
] | 1
|
2021-03-12T07:19:17.000Z
|
2021-03-12T07:19:17.000Z
|
import cv2
import numpy as np
import tensorflow as tf
from tensorflow.contrib.layers import flatten
def LeNet(x, keep_prob_conv, keep_prob_fc):
# Arguments used for tf.truncated_normal, randomly defines variables for the weights and biases for each layer
mu = 0
sigma = 0.1
# Change RGB to Gray
x = tf.image.rgb_to_grayscale(x)
# normalize the data
x = tf.map_fn(lambda image: tf.image.per_image_standardization(image), x)
# Layer 1: Convolutional. Input = 32x32x1. Output = 28x28x6.
weight_c1 = tf.Variable(tf.truncated_normal([5,5,1,6], mean=mu, stddev=sigma))
biases_c1 = tf.Variable(tf.zeros([6]))
conv1 = tf.nn.conv2d(x, weight_c1, strides=[1,1,1,1], padding='VALID')
conv1 = tf.nn.bias_add(conv1, biases_c1)
# Activation.
conv1 = tf.nn.relu(conv1)
# Dropout
conv1 = tf.nn.dropout(conv1, keep_prob_conv)
# Pooling. Input = 28x28x6. Output = 14x14x6.
conv1 = tf.nn.max_pool(conv1, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
# Layer 2: Convolutional. Output = 10x10x16.
weight_c2 = tf.Variable(tf.truncated_normal([5,5,6,16], mean=mu, stddev=sigma))
biases_c2 = tf.Variable(tf.zeros([16]))
conv2 = tf.nn.conv2d(conv1, weight_c2, strides=[1,1,1,1], padding='VALID')
conv2 = tf.nn.bias_add(conv2, biases_c2)
# Activation.
conv2 = tf.nn.relu(conv2)
# Dropout
conv2 = tf.nn.dropout(conv2, keep_prob_conv)
# Pooling. Input = 10x10x16. Output = 5x5x16.
conv2 = tf.nn.max_pool(conv2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
# Flatten. Input = 5x5x16. Output = 400.
conv2_flat = flatten(conv2)
# Layer 3: Fully Connected. Input = 400. Output = 120.
weights_3 = tf.Variable(tf.truncated_normal([400, 120], mean=mu, stddev=sigma))
biases_3 = tf.Variable(tf.zeros([120]))
fc1 = tf.add(tf.matmul(conv2_flat, weights_3), biases_3)
# Activation.
fc1 = tf.nn.relu(fc1)
# Dropout
fc1 = tf.nn.dropout(fc1, keep_prob_fc)
# Layer 4: Fully Connected. Input = 120. Output = 84.
weights_4 = tf.Variable(tf.truncated_normal([120, 84], mean=mu, stddev=sigma))
biases_4 = tf.Variable(tf.zeros([84]))
fc2 = tf.add(tf.matmul(fc1, weights_4), biases_4)
# Activation.
fc2 = tf.nn.relu(fc2)
# Dropout
fc2 = tf.nn.dropout(fc2, keep_prob_fc)
# Layer 5: Fully Connected. Input = 84. Output = 43.
weights_5 = tf.Variable(tf.truncated_normal([84,43], mean=mu, stddev=sigma))
biases_5 = tf.Variable(tf.zeros([43]))
logits = tf.add(tf.matmul(fc2, weights_5), biases_5)
return logits
def LeNet_4x(x, keep_prob_conv, keep_prob_fc):
# Arguments used for tf.truncated_normal, randomly defines variables for the weights and biases for each layer
mu = 0
sigma = 0.1
# Change RGB to Gray
x = tf.image.rgb_to_grayscale(x)
# normalize the data
x = tf.map_fn(lambda image: tf.image.per_image_standardization(image), x)
# Layer 1: Convolutional. Input = 32x32x1. Output = 28x28x24.
weight_c1 = tf.Variable(tf.truncated_normal([5,5,1,24], mean=mu, stddev=sigma))
biases_c1 = tf.Variable(tf.zeros([24]))
conv1 = tf.nn.conv2d(x, weight_c1, strides=[1,1,1,1], padding='VALID')
conv1 = tf.nn.bias_add(conv1, biases_c1)
# Activation.
conv1 = tf.nn.relu(conv1)
# Dropout
conv1 = tf.nn.dropout(conv1, keep_prob_conv)
# Pooling. Input = 28x28x6. Output = 14x14x6.
conv1 = tf.nn.max_pool(conv1, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
# Layer 2: Convolutional. Output = 10x10x16.
weight_c2 = tf.Variable(tf.truncated_normal([5,5,24,64], mean=mu, stddev=sigma))
biases_c2 = tf.Variable(tf.zeros([64]))
conv2 = tf.nn.conv2d(conv1, weight_c2, strides=[1,1,1,1], padding='VALID')
conv2 = tf.nn.bias_add(conv2, biases_c2)
# Activation.
conv2 = tf.nn.relu(conv2)
# Dropout
conv2 = tf.nn.dropout(conv2, keep_prob_conv)
# Pooling. Input = 10x10x16. Output = 5x5x16.
conv2 = tf.nn.max_pool(conv2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
# Flatten. Input = 5x5x16. Output = 400.
conv2_flat = flatten(conv2)
# Layer 3: Fully Connected. Input = 400. Output = 120.
weights_3 = tf.Variable(tf.truncated_normal([1600, 480], mean=mu, stddev=sigma))
biases_3 = tf.Variable(tf.zeros([480]))
fc1 = tf.add(tf.matmul(conv2_flat, weights_3), biases_3)
# Activation.
fc1 = tf.nn.relu(fc1)
# Dropout
fc1 = tf.nn.dropout(fc1, keep_prob_fc)
# Layer 4: Fully Connected. Input = 120. Output = 84.
weights_4 = tf.Variable(tf.truncated_normal([480, 336], mean=mu, stddev=sigma))
biases_4 = tf.Variable(tf.zeros([336]))
fc2 = tf.add(tf.matmul(fc1, weights_4), biases_4)
# Activation.
fc2 = tf.nn.relu(fc2)
# Dropout
fc2 = tf.nn.dropout(fc2, keep_prob_fc)
# Layer 5: Fully Connected. Input = 84. Output = 10.
weights_5 = tf.Variable(tf.truncated_normal([336,43], mean=mu, stddev=sigma))
biases_5 = tf.Variable(tf.zeros([43]))
logits = tf.add(tf.matmul(fc2, weights_5), biases_5)
return logits
def LeNet_4x_MS(x, keep_prob_conv, keep_prob_fc):
'''
LeNet expand the hidden layers 4x and mulitle scaled(conv1 also connected to classifier, fc1)
'''
# Arguments used for tf.truncated_normal, randomly defines variables for the weights and biases for each layer
mu = 0
sigma = 0.1
# Change RGB to Gray
x = tf.image.rgb_to_grayscale(x)
# normalize the data
x = tf.map_fn(lambda image: tf.image.per_image_standardization(image), x)
# Layer 1: Convolutional. Input = 32x32x1. Output = 28x28x24.
weight_c1 = tf.Variable(tf.truncated_normal([5,5,1,24], mean=mu, stddev=sigma))
biases_c1 = tf.Variable(tf.zeros([24]))
conv1 = tf.nn.conv2d(x, weight_c1, strides=[1,1,1,1], padding='VALID')
conv1 = tf.nn.bias_add(conv1, biases_c1)
# Activation.
conv1 = tf.nn.relu(conv1)
# Dropout
conv1 = tf.nn.dropout(conv1, keep_prob_conv)
# Pooling. Input = 28x28x24. Output = 14x14x24.
conv1 = tf.nn.max_pool(conv1, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
# flatten
conv1_flat = flatten(conv1) # 14*14*24 = 4704
# Layer 2: Convolutional. Input 14*14*24 Output = 10x10x64.
weight_c2 = tf.Variable(tf.truncated_normal([5,5,24,64], mean=mu, stddev=sigma))
biases_c2 = tf.Variable(tf.zeros([64]))
conv2 = tf.nn.conv2d(conv1, weight_c2, strides=[1,1,1,1], padding='VALID')
conv2 = tf.nn.bias_add(conv2, biases_c2)
# Activation.
conv2 = tf.nn.relu(conv2)
# Pooling. Input = 10x10x64. Output = 5x5x64.
conv2 = tf.nn.max_pool(conv2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
# Flatten. Input = 5x5x64. Output = 1600.
conv2_flat = flatten(conv2) #
# combine conv1/conv2
conv_flat = tf.concat([conv1_flat, conv2_flat],1) #(6304=4704+1600)
# Layer 3: Fully Connected. Input = 6304. Output = 480.
weights_3 = tf.Variable(tf.truncated_normal([6304, 480], mean=mu, stddev=sigma))
biases_3 = tf.Variable(tf.zeros([480]))
fc1 = tf.add(tf.matmul(conv_flat, weights_3), biases_3)
# Activation.
fc1 = tf.nn.relu(fc1)
# Dropout
fc1 = tf.nn.dropout(fc1, keep_prob_fc)
# Layer 4: Fully Connected. Input = 120. Output = 84.
weights_4 = tf.Variable(tf.truncated_normal([480, 336], mean=mu, stddev=sigma))
biases_4 = tf.Variable(tf.zeros([336]))
fc2 = tf.add(tf.matmul(fc1, weights_4), biases_4)
# Activation.
fc2 = tf.nn.relu(fc2)
# Dropout
fc2 = tf.nn.dropout(fc2, keep_prob_fc)
# Layer 5: Fully Connected. Input = 84. Output = 10.
weights_5 = tf.Variable(tf.truncated_normal([336,43], mean=mu, stddev=sigma))
biases_5 = tf.Variable(tf.zeros([43]))
logits = tf.add(tf.matmul(fc2, weights_5), biases_5)
return logits
def network(x, keep_prob_conv, keep_prob_fc):
"""
a net work according the below paper
http://yann.lecun.com/exdb/publis/pdf/sermanet-ijcnn-11.pdf
"""
# Arguments used for tf.truncated_normal, randomly defines variables for the weights and biases for each layer
mu = 0
sigma = 0.1
# Change RGB to Gray
x = tf.image.rgb_to_grayscale(x)
# normalize the data
x = tf.map_fn(lambda image: tf.image.per_image_standardization(image), x)
# Layer 1: Convolutional. Input = 32x32x1. Output = 32x32x30.
weight_c1 = tf.Variable(tf.truncated_normal([5,5,1,30], mean=mu, stddev=sigma))
biases_c1 = tf.Variable(tf.zeros([30]))
conv1 = tf.nn.conv2d(x, weight_c1, strides=[1,1,1,1], padding='SAME')
conv1 = tf.nn.bias_add(conv1, biases_c1)
# Activation.
conv1 = tf.nn.relu(conv1)
# Dropout
conv1 = tf.nn.dropout(conv1, keep_prob_conv)
# Pooling. Input = 32x32x30. Output = 16x16x30.
conv1 = tf.nn.max_pool(conv1, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
# Flatten. Input = 16x16x30. Output = 7680.
conv1_flat = flatten(conv1)
# Layer 2: Convolutional. Input 16X16X30, Output = 16x16x15.
weight_c2 = tf.Variable(tf.truncated_normal([5,5,30,15], mean=mu, stddev=sigma))
biases_c2 = tf.Variable(tf.zeros([15]))
conv2 = tf.nn.conv2d(conv1, weight_c2, strides=[1,1,1,1], padding='SAME')
conv2 = tf.nn.bias_add(conv2, biases_c2)
# Activation.
conv2 = tf.nn.relu(conv2)
# Dropout
conv2 = tf.nn.dropout(conv2, keep_prob_conv)
# Pooling. Input = 16x16x15. Output = 8x8x15.
conv2 = tf.nn.max_pool(conv2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
# Flatten. Input = 8x8x64. Output = 960.
conv2_flat = flatten(conv2)
# Layer 3: Convolutional, Input=8x8x15, Output=8x8x10
weight_c3 = tf.Variable(tf.truncated_normal([5,5,15,10], mean=mu, stddev=sigma))
biases_c3 = tf.Variable(tf.zeros([10]))
conv3 = tf.nn.conv2d(conv2, weight_c3, strides=[1,1,1,1], padding='SAME')
conv3 = tf.nn.bias_add(conv3, biases_c3)
# Activation
conv3 = tf.nn.relu(conv3)
# Dropout
conv3 = tf.nn.dropout(conv3, keep_prob_conv)
# Pooling. Input=8x8x10, Output=4x4x10
conv3 = tf.nn.max_pool(conv3, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
# Flatten Input=4x4x10, output = 160
conv3_flat = flatten(conv3)
# combine conv1/conv2/conv3
conv_flat = tf.concat([conv1_flat, conv2_flat, conv3_flat],1) #(8800=7680+960+160)
# Layer 4: Fully Connected. Input = 8800. Output = 960.
weights_3 = tf.Variable(tf.truncated_normal([8800, 960], mean=mu, stddev=sigma))
biases_3 = tf.Variable(tf.zeros([960]))
fc1 = tf.add(tf.matmul(conv_flat, weights_3), biases_3)
# Activation.
fc1 = tf.nn.relu(fc1)
# Dropout
fc1 = tf.nn.dropout(fc1, keep_prob_fc)
# Layer 5: Fully Connected. Input = 960. Output = 336
weights_4 = tf.Variable(tf.truncated_normal([960, 336], mean=mu, stddev=sigma))
biases_4 = tf.Variable(tf.zeros([336]))
fc2 = tf.add(tf.matmul(fc1, weights_4), biases_4)
# Activation.
fc2 = tf.nn.relu(fc2)
# Dropout
fc2 = tf.nn.dropout(fc2, keep_prob_fc)
# Layer 6: Fully Connected. Input = 336 Output = 43.
weights_5 = tf.Variable(tf.truncated_normal([336,43], mean=mu, stddev=sigma))
biases_5 = tf.Variable(tf.zeros([43]))
logits = tf.add(tf.matmul(fc2, weights_5), biases_5)
return logits
| 40.51773
| 114
| 0.653597
| 1,759
| 11,426
| 4.122797
| 0.08755
| 0.033094
| 0.069498
| 0.060811
| 0.858246
| 0.84487
| 0.841147
| 0.81219
| 0.800055
| 0.788748
| 0
| 0.097508
| 0.19937
| 11,426
| 282
| 115
| 40.517731
| 0.695234
| 0.259321
| 0
| 0.748344
| 0
| 0
| 0.00935
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02649
| false
| 0
| 0.02649
| 0
| 0.07947
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
35d0023b71963da7cd0849279246d1d2da3a31fb
| 28,984
|
py
|
Python
|
tests/datasource/test_new_datasource_with_runtime_data_connector.py
|
veatch/great_expectations
|
8500468618fc7293d600a3660d830c9fc23ccbf8
|
[
"Apache-2.0"
] | null | null | null |
tests/datasource/test_new_datasource_with_runtime_data_connector.py
|
veatch/great_expectations
|
8500468618fc7293d600a3660d830c9fc23ccbf8
|
[
"Apache-2.0"
] | null | null | null |
tests/datasource/test_new_datasource_with_runtime_data_connector.py
|
veatch/great_expectations
|
8500468618fc7293d600a3660d830c9fc23ccbf8
|
[
"Apache-2.0"
] | null | null | null |
import os
from typing import Dict, List
import pandas as pd
import pytest
try:
sqlalchemy = pytest.importorskip("sqlalchemy")
except ImportError:
sqlalchemy = None
from ruamel.yaml import YAML
import great_expectations.exceptions as ge_exceptions
from great_expectations.core.batch import (
Batch,
BatchDefinition,
IDDict,
RuntimeBatchRequest,
)
from great_expectations.data_context.util import (
file_relative_path,
instantiate_class_from_config,
)
from great_expectations.datasource.new_datasource import Datasource
yaml = YAML()
@pytest.fixture
def basic_datasource_with_runtime_data_connector():
basic_datasource: Datasource = instantiate_class_from_config(
yaml.load(
f"""
class_name: Datasource
execution_engine:
class_name: PandasExecutionEngine
data_connectors:
test_runtime_data_connector:
module_name: great_expectations.datasource.data_connector
class_name: RuntimeDataConnector
batch_identifiers:
- pipeline_stage_name
- airflow_run_id
- custom_key_0
""",
),
runtime_environment={"name": "my_datasource"},
config_defaults={"module_name": "great_expectations.datasource"},
)
return basic_datasource
def test_basic_datasource_runtime_data_connector_self_check(
basic_datasource_with_runtime_data_connector,
):
report = basic_datasource_with_runtime_data_connector.self_check()
assert report == {
"data_connectors": {
"count": 1,
"test_runtime_data_connector": {
"class_name": "RuntimeDataConnector",
"data_asset_count": 0,
"data_assets": {},
"example_data_asset_names": [],
"example_unmatched_data_references": [],
"note": "RuntimeDataConnector will not have data_asset_names until they are passed in through RuntimeBatchRequest",
"unmatched_data_reference_count": 0,
},
},
"execution_engine": {
"boto3_options": {},
"azure_options": {},
"caching": True,
"class_name": "PandasExecutionEngine",
"discard_subset_failing_expectations": False,
"module_name": "great_expectations.execution_engine.pandas_execution_engine",
},
}
def test_basic_datasource_runtime_data_connector_error_checking_unknown_datasource(
basic_datasource_with_runtime_data_connector,
):
# Test for an unknown datasource
with pytest.raises(ValueError):
# noinspection PyUnusedLocal
batch_list: List[
Batch
] = basic_datasource_with_runtime_data_connector.get_batch_list_from_batch_request(
batch_request=RuntimeBatchRequest(
datasource_name="non_existent_datasource",
data_connector_name="test_runtime_data_connector",
data_asset_name="my_data_asset",
)
)
def test_basic_datasource_runtime_data_connector_error_checking_unknown_dataconnector(
basic_datasource_with_runtime_data_connector,
):
# Test for an unknown data_connector
with pytest.raises(ValueError):
# noinspection PyUnusedLocal
batch_list: List[
Batch
] = basic_datasource_with_runtime_data_connector.get_batch_list_from_batch_request(
batch_request=RuntimeBatchRequest(
datasource_name=basic_datasource_with_runtime_data_connector.name,
data_connector_name="non_existent_data_connector",
data_asset_name="my_data_asset",
)
)
def test_basic_datasource_runtime_data_connector_error_checking_no_batch_idenfitiers(
basic_datasource_with_runtime_data_connector,
):
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
# Test for illegal absence of batch_identifiers when batch_data is specified
with pytest.raises(ge_exceptions.DataConnectorError):
# noinspection PyUnusedLocal
batch_list: List[
Batch
] = basic_datasource_with_runtime_data_connector.get_batch_list_from_batch_request(
batch_request=RuntimeBatchRequest(
datasource_name=basic_datasource_with_runtime_data_connector.name,
data_connector_name="test_runtime_data_connector",
data_asset_name="my_data_asset",
runtime_parameters={"batch_data": test_df},
batch_identifiers=None,
)
)
def test_basic_datasource_runtime_data_connector_error_checking_incorrect_batch_idenfitiers(
basic_datasource_with_runtime_data_connector,
):
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
# Test for illegal falsiness of batch_identifiers when batch_data is specified
with pytest.raises(ge_exceptions.DataConnectorError):
# noinspection PyUnusedLocal
batch_list: List[
Batch
] = basic_datasource_with_runtime_data_connector.get_batch_list_from_batch_request(
batch_request=RuntimeBatchRequest(
datasource_name=basic_datasource_with_runtime_data_connector.name,
data_connector_name="test_runtime_data_connector",
data_asset_name="my_data_asset",
runtime_parameters={"batch_data": test_df},
batch_identifiers=dict(),
)
)
#########################################
# Tests with data passed in as batch_data
#########################################
def test_batch_identifiers_and_batch_identifiers_success_all_keys_present(
basic_datasource_with_runtime_data_connector,
):
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
batch_identifiers = {
"pipeline_stage_name": "core_processing",
"airflow_run_id": 1234567890,
"custom_key_0": "custom_value_0",
}
# Verify that all keys in batch_identifiers are acceptable as batch_identifiers (using batch count).
batch_request: dict = {
"datasource_name": basic_datasource_with_runtime_data_connector.name,
"data_connector_name": "test_runtime_data_connector",
"data_asset_name": "IN_MEMORY_DATA_ASSET",
"runtime_parameters": {
"batch_data": test_df,
},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
batch_list: List[
Batch
] = basic_datasource_with_runtime_data_connector.get_batch_list_from_batch_request(
batch_request=batch_request
)
assert len(batch_list) == 1
def test_batch_identifiers_and_batch_identifiers_error_mostly_legal_keys(
basic_datasource_with_runtime_data_connector,
):
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
batch_identifiers = {
"pipeline_stage_name": "core_processing",
"airflow_run_id": 1234567890,
"custom_key_0": "custom_value_0",
"i_am_illegal_key": "i_am_illegal_value",
}
# Insure that keys in batch_identifiers that are not among batch_identifiers declared in
# configuration are not accepted. In this test, all legal keys plus a single illegal key are present.
batch_request: dict = {
"datasource_name": basic_datasource_with_runtime_data_connector.name,
"data_connector_name": "test_runtime_data_connector",
"data_asset_name": "IN_MEMORY_DATA_ASSET",
"runtime_parameters": {
"batch_data": test_df,
},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
with pytest.raises(ge_exceptions.DataConnectorError):
# noinspection PyUnusedLocal
batch_list: List[
Batch
] = basic_datasource_with_runtime_data_connector.get_batch_list_from_batch_request(
batch_request=batch_request
)
def test_batch_identifiers_and_batch_identifiers_error_one_illegal_key(
basic_datasource_with_runtime_data_connector,
):
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
batch_identifiers = {"unknown_key": "some_value"}
# Insure that keys in batch_identifiers that are not among batch_identifiers declared in
# configuration are not accepted. In this test, a single illegal key is present.
batch_request: dict = {
"datasource_name": basic_datasource_with_runtime_data_connector.name,
"data_connector_name": "test_runtime_data_connector",
"data_asset_name": "IN_MEMORY_DATA_ASSET",
"runtime_parameters": {
"batch_data": test_df,
},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
with pytest.raises(ge_exceptions.DataConnectorError):
# noinspection PyUnusedLocal
batch_list: List[
Batch
] = basic_datasource_with_runtime_data_connector.get_batch_list_from_batch_request(
batch_request=batch_request
)
def test_set_data_asset_name_for_runtime_data(
basic_datasource_with_runtime_data_connector,
):
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
batch_identifiers = {
"pipeline_stage_name": "core_processing",
"airflow_run_id": 1234567890,
"custom_key_0": "custom_value_0",
}
# set : my_runtime_data_asset
batch_request: dict = {
"datasource_name": basic_datasource_with_runtime_data_connector.name,
"data_connector_name": "test_runtime_data_connector",
"data_asset_name": "my_runtime_data_asset",
"runtime_parameters": {
"batch_data": test_df,
},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
batch_list: List[
Batch
] = basic_datasource_with_runtime_data_connector.get_batch_list_from_batch_request(
batch_request=batch_request
)
assert batch_list[0].batch_definition.data_asset_name == "my_runtime_data_asset"
def test_get_available_data_asset_names(basic_datasource_with_runtime_data_connector):
expected_available_data_asset_names: Dict[List[str]] = {
"test_runtime_data_connector": []
}
available_data_asset_names: Dict[
List[str]
] = basic_datasource_with_runtime_data_connector.get_available_data_asset_names()
assert available_data_asset_names == expected_available_data_asset_names
def test_get_batch_definition_list_from_batch_request_length_one(
basic_datasource_with_runtime_data_connector,
):
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
batch_identifiers = {
"airflow_run_id": 1234567890,
}
batch_request: dict = {
"datasource_name": basic_datasource_with_runtime_data_connector.name,
"data_connector_name": "test_runtime_data_connector",
"data_asset_name": "my_data_asset",
"runtime_parameters": {
"batch_data": test_df,
},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
batch_list: List[
Batch
] = basic_datasource_with_runtime_data_connector.get_batch_list_from_batch_request(
batch_request=batch_request
)
# batches are a little bit more difficult to test because of batch_markers
# they are ones that uniquely identify the data
assert len(batch_list) == 1
my_batch_1 = batch_list[0]
assert my_batch_1.batch_spec is not None
assert my_batch_1.batch_definition["data_asset_name"] == "my_data_asset"
assert isinstance(my_batch_1.data.dataframe, pd.DataFrame)
assert my_batch_1.data.dataframe.shape == (2, 2)
assert my_batch_1.data.dataframe["col2"].values[1] == 4
assert (
my_batch_1.batch_markers["pandas_data_fingerprint"]
== "1e461a0df5fe0a6db2c3bc4ef88ef1f0"
)
def test_get_batch_with_pipeline_style_batch_request_missing_batch_identifiers_error(
basic_datasource_with_runtime_data_connector,
):
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
data_connector_name: str = "test_runtime_data_connector"
data_asset_name: str = "test_asset_1"
batch_request: dict = {
"datasource_name": basic_datasource_with_runtime_data_connector.name,
"data_connector_name": data_connector_name,
"data_asset_name": data_asset_name,
"runtime_parameters": {
"batch_data": test_df,
},
"batch_identifiers": None,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
with pytest.raises(ge_exceptions.DataConnectorError):
# noinspection PyUnusedLocal
batch_list: List[
Batch
] = basic_datasource_with_runtime_data_connector.get_batch_list_from_batch_request(
batch_request=batch_request
)
def test_get_batch_definitions_and_get_batch_basics(
basic_datasource_with_runtime_data_connector,
):
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
data_connector_name: str = "test_runtime_data_connector"
data_asset_name: str = "test_asset_1"
batch_request: dict = {
"datasource_name": basic_datasource_with_runtime_data_connector.name,
"data_connector_name": data_connector_name,
"data_asset_name": data_asset_name,
"runtime_parameters": {
"batch_data": test_df,
},
"batch_identifiers": {
"airflow_run_id": 1234567890,
},
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
assert (
len(
basic_datasource_with_runtime_data_connector.get_available_batch_definitions(
batch_request=batch_request
)
)
== 1
)
my_df: pd.DataFrame = pd.DataFrame({"x": range(10), "y": range(10)})
batch: Batch = (
basic_datasource_with_runtime_data_connector.get_batch_from_batch_definition(
batch_definition=BatchDefinition(
"my_datasource",
"_pipeline",
"_pipeline",
batch_identifiers=IDDict({"some_random_id": 1}),
),
batch_data=my_df,
)
)
assert batch.batch_request == {}
####################################
# Tests with data passed in as query
####################################
@pytest.fixture
def db_file():
return file_relative_path(
__file__,
os.path.join("..", "test_sets", "test_cases_for_sql_data_connector.db"),
)
@pytest.fixture
def datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine(db_file, sa):
basic_datasource: Datasource = instantiate_class_from_config(
yaml.load(
f"""
class_name: Datasource
execution_engine:
class_name: SqlAlchemyExecutionEngine
connection_string: sqlite:///{db_file}
data_connectors:
test_runtime_data_connector:
module_name: great_expectations.datasource.data_connector
class_name: RuntimeDataConnector
batch_identifiers:
- pipeline_stage_name
- airflow_run_id
- custom_key_0
""",
),
runtime_environment={"name": "my_datasource"},
config_defaults={"module_name": "great_expectations.datasource"},
)
return basic_datasource
def test_datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine_self_check(
db_file, datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine, sa
):
report = (
datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine.self_check()
)
assert report == {
"execution_engine": {
"connection_string": f"sqlite:///{db_file}",
"module_name": "great_expectations.execution_engine.sqlalchemy_execution_engine",
"class_name": "SqlAlchemyExecutionEngine",
},
"data_connectors": {
"count": 1,
"test_runtime_data_connector": {
"class_name": "RuntimeDataConnector",
"data_asset_count": 0,
"example_data_asset_names": [],
"data_assets": {},
"note": "RuntimeDataConnector will not have data_asset_names until they are passed in through RuntimeBatchRequest",
"unmatched_data_reference_count": 0,
"example_unmatched_data_references": [],
},
},
}
def test_datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine_unknown_datasource(
datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine, sa
):
# Test for an unknown datasource
with pytest.raises(ValueError):
# noinspection PyUnusedLocal
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine.get_batch_list_from_batch_request(
batch_request=RuntimeBatchRequest(
datasource_name="non_existent_datasource",
data_connector_name="test_runtime_data_connector",
data_asset_name="my_data_asset",
)
)
def test_datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine_unknown_dataconnector(
datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine, sa
):
# Test for an unknown data_connector
with pytest.raises(ValueError):
# noinspection PyUnusedLocal
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine.get_batch_list_from_batch_request(
batch_request=RuntimeBatchRequest(
datasource_name=datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine.name,
data_connector_name="non_existent_data_connector",
data_asset_name="my_data_asset",
)
)
def test_datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine_no_batch_identifiers(
datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine, sa
):
# interacting with the database using query
test_query: str = "SELECT * FROM table_full__I;"
# Test for illegal absence of batch_identifiers when batch_data is specified
with pytest.raises(ge_exceptions.DataConnectorError):
# noinspection PyUnusedLocal
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine.get_batch_list_from_batch_request(
batch_request=RuntimeBatchRequest(
datasource_name=datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine.name,
data_connector_name="test_runtime_data_connector",
data_asset_name="my_data_asset",
runtime_parameters={"query": test_query},
batch_identifiers=None,
)
)
def test_datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine_illegal_batch_identifiers(
datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine, sa
):
# interacting with the database using query
test_query: str = "SELECT * FROM table_full__I;"
# Test for illegal falsiness of batch_identifiers when batch_data is specified
with pytest.raises(ge_exceptions.DataConnectorError):
# noinspection PyUnusedLocal
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine.get_batch_list_from_batch_request(
batch_request=RuntimeBatchRequest(
datasource_name=datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine.name,
data_connector_name="test_runtime_data_connector",
data_asset_name="my_data_asset",
runtime_parameters={"query": test_query},
batch_identifiers=dict(),
)
)
def test_batch_identifiers_and_batch_identifiers_success_all_keys_present_with_query(
datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine, sa
):
# interacting with the database using query
test_query: str = "SELECT * FROM table_full__I;"
batch_identifiers = {
"pipeline_stage_name": "core_processing",
"airflow_run_id": 1234567890,
"custom_key_0": "custom_value_0",
}
# Verify that all keys in batch_identifiers are acceptable as batch_identifiers (using batch count).
batch_request: dict = {
"datasource_name": datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine.name,
"data_connector_name": "test_runtime_data_connector",
"data_asset_name": "TEMP_QUERY_DATA_ASSET",
"runtime_parameters": {
"query": test_query,
},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine.get_batch_list_from_batch_request(
batch_request=batch_request
)
assert len(batch_list) == 1
def test_batch_identifiers_and_batch_identifiers_error_illegal_key_with_query_mostly_legal_keys(
datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine, sa
):
# interacting with the database using query
test_query: str = "SELECT * FROM table_full__I;"
batch_identifiers = {
"pipeline_stage_name": "core_processing",
"airflow_run_id": 1234567890,
"custom_key_0": "custom_value_0",
"i_am_illegal_key": "i_am_illegal_value",
}
# Insure that keys in batch_identifiers that are not among batch_identifiers declared in
# configuration are not accepted. In this test, all legal keys plus a single illegal key are present.
batch_request: dict = {
"datasource_name": datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine.name,
"data_connector_name": "test_runtime_data_connector",
"data_asset_name": "TEMP_QUERY_DATA_ASSET",
"runtime_parameters": {
"query": test_query,
},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
with pytest.raises(ge_exceptions.DataConnectorError):
# noinspection PyUnusedLocal
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine.get_batch_list_from_batch_request(
batch_request=batch_request
)
def test_batch_identifiers_and_batch_identifiers_error_illegal_key_with_query_single_illegal_key(
datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine, sa
):
# interacting with the database using query
test_query: str = "SELECT * FROM table_full__I;"
batch_identifiers = {"unknown_key": "some_value"}
# Insure that keys in batch_identifiers that are not among batch_identifiers declared in
# configuration are not accepted. In this test, a single illegal key is present.
batch_request: dict = {
"datasource_name": datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine.name,
"data_connector_name": "test_runtime_data_connector",
"data_asset_name": "TEMP_QUERY_DATA_ASSET",
"runtime_parameters": {
"query": test_query,
},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
with pytest.raises(ge_exceptions.DataConnectorError):
# noinspection PyUnusedLocal
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine.get_batch_list_from_batch_request(
batch_request=batch_request
)
def test_set_data_asset_name_for_runtime_query_data(
datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine, sa
):
test_query: str = "SELECT * FROM table_full__I;"
batch_identifiers = {
"pipeline_stage_name": "core_processing",
"airflow_run_id": 1234567890,
"custom_key_0": "custom_value_0",
}
# set : my_runtime_data_asset
batch_request: dict = {
"datasource_name": datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine.name,
"data_connector_name": "test_runtime_data_connector",
"data_asset_name": "my_runtime_data_asset",
"runtime_parameters": {
"query": test_query,
},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine.get_batch_list_from_batch_request(
batch_request=batch_request
)
assert batch_list[0].batch_definition.data_asset_name == "my_runtime_data_asset"
def test_get_batch_definition_list_from_batch_request_length_one_from_query(
datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine, sa
):
# interacting with the database using query
test_query: str = "SELECT * FROM table_full__I;"
batch_identifiers = {
"airflow_run_id": 1234567890,
}
batch_request: dict = {
"datasource_name": datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine.name,
"data_connector_name": "test_runtime_data_connector",
"data_asset_name": "my_data_asset",
"runtime_parameters": {
"query": test_query,
},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine.get_batch_list_from_batch_request(
batch_request=batch_request
)
# batches are a little bit more difficult to test because of batch_markers
# they are ones that uniquely identify the data
assert len(batch_list) == 1
my_batch_1 = batch_list[0]
assert my_batch_1.batch_spec is not None
assert my_batch_1.batch_definition["data_asset_name"] == "my_data_asset"
assert isinstance(my_batch_1.data.selectable, sqlalchemy.Table)
def test_get_batch_with_pipeline_style_batch_request_missing_batch_identifiers_error(
datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine, sa
):
# interacting with the database using query
test_query: str = "SELECT * FROM table_full__I;"
data_connector_name: str = "test_runtime_data_connector"
data_asset_name: str = "test_asset_1"
batch_request: dict = {
"datasource_name": datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine.name,
"data_connector_name": data_connector_name,
"data_asset_name": data_asset_name,
"runtime_parameters": {
"query": test_query,
},
"batch_identifiers": None,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
with pytest.raises(ge_exceptions.DataConnectorError):
# noinspection PyUnusedLocal
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine.get_batch_list_from_batch_request(
batch_request=batch_request
)
def test_get_batch_definitions_and_get_batch_basics_from_query(
datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine, sa
):
# interacting with the database using query
test_query: str = "SELECT * FROM table_full__I;"
data_connector_name: str = "test_runtime_data_connector"
data_asset_name: str = "test_asset_1"
batch_request: dict = {
"datasource_name": datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine.name,
"data_connector_name": data_connector_name,
"data_asset_name": data_asset_name,
"runtime_parameters": {
"query": test_query,
},
"batch_identifiers": {
"airflow_run_id": 1234567890,
},
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
assert (
len(
datasource_with_runtime_data_connector_and_sqlalchemy_execution_engine.get_available_batch_definitions(
batch_request=batch_request
)
)
== 1
)
| 37.788787
| 131
| 0.702836
| 3,217
| 28,984
| 5.813802
| 0.066211
| 0.100786
| 0.115489
| 0.104261
| 0.922152
| 0.909105
| 0.890927
| 0.885419
| 0.880394
| 0.865637
| 0
| 0.009939
| 0.218914
| 28,984
| 766
| 132
| 37.83812
| 0.816202
| 0.083736
| 0
| 0.710311
| 0
| 0
| 0.206458
| 0.060105
| 0
| 0
| 0
| 0
| 0.03437
| 1
| 0.045827
| false
| 0.003273
| 0.018003
| 0.001637
| 0.06874
| 0.001637
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ea0e4fb02570efb7ed1eeccf76b9f0f6ce919673
| 33,459
|
py
|
Python
|
pytorch-a2c-ppo-acktr/main.py
|
mjsargent/gym-miniworld
|
79614f991f7bfc3428959e6e6b82461bc54bdd2e
|
[
"Apache-2.0"
] | null | null | null |
pytorch-a2c-ppo-acktr/main.py
|
mjsargent/gym-miniworld
|
79614f991f7bfc3428959e6e6b82461bc54bdd2e
|
[
"Apache-2.0"
] | null | null | null |
pytorch-a2c-ppo-acktr/main.py
|
mjsargent/gym-miniworld
|
79614f991f7bfc3428959e6e6b82461bc54bdd2e
|
[
"Apache-2.0"
] | null | null | null |
import copy
import glob
import os
import time
import types
from collections import deque
import gym
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import algo
from arguments import get_args
from envs import make_vec_envs
from model import Policy, SFPolicy, QPolicy, SFConditionedPolicy
from storage import RolloutStorage
#from visualize import visdom_plot
import wandb
args = get_args()
assert args.algo in ['a2c', 'ppo', 'acktr', 'sf', "q", "a2csf"]
if args.recurrent_policy:
assert args.algo in ['a2c', 'ppo', "sf", "q", "a2csf"], \
'Recurrent policy is not implemented for ACKTR'
num_updates = int(args.num_frames) // args.num_steps // args.num_processes
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
try:
os.makedirs(args.log_dir)
except OSError:
files = glob.glob(os.path.join(args.log_dir, '*.monitor.csv'))
for f in files:
os.remove(f)
eval_log_dir = args.log_dir + "_eval"
try:
os.makedirs(eval_log_dir)
except OSError:
files = glob.glob(os.path.join(eval_log_dir, '*.monitor.csv'))
for f in files:
os.remove(f)
def main():
torch.set_num_threads(1)
device = torch.device("cuda:0" if args.cuda else "cpu")
"""
if args.vis:
from visdom import Visdom
viz = Visdom(port=args.port)
win = None
"""
feature_size = 2
envs = make_vec_envs(args.env_name, args.seed, args.num_processes,
args.gamma, args.log_dir, args.add_timestep, device, False)
if args.algo == 'sf':
policy= SFPolicy(envs.observation_space.shape, envs.action_space, feature_size = 2,
base_kwargs={'recurrent': args.recurrent_policy})
policy.to(device)
elif args.algo == "q":
policy= QPolicy(envs.observation_space.shape, envs.action_space, feature_size = 2,
base_kwargs={'recurrent': args.recurrent_policy})
policy.to(device)
elif args.algo == "a2csf":
actor_critic = SFConditionedPolicy(envs.observation_space.shape, envs.action_space, feature_size = 2,
base_kwargs={'recurrent': args.recurrent_policy})
actor_critic.to(device)
else:
actor_critic = Policy(envs.observation_space.shape, envs.action_space, feature_size = 2,
base_kwargs={'recurrent': args.recurrent_policy})
actor_critic.to(device)
if args.algo == 'a2c':
agent = algo.A2C_ACKTR(actor_critic, args.value_loss_coef,
args.entropy_coef, lr=args.lr,
eps=args.eps, alpha=args.alpha,
max_grad_norm=args.max_grad_norm,
feature_size = 2)
elif args.algo == 'a2csf':
agent = algo.A2C_SF(actor_critic, args.value_loss_coef,
args.entropy_coef, lr_psi=args.lr,
lr_policy = args.lr, lr_w = 1,
eps=args.eps, alpha=args.alpha,
max_grad_norm=args.max_grad_norm,
feature_size = 2, gamma=args.gamma)
elif args.algo == 'ppo':
agent = algo.PPO(actor_critic, args.clip_param, args.ppo_epoch, args.num_mini_batch,
args.value_loss_coef, args.entropy_coef, lr=args.lr,
eps=args.eps,
max_grad_norm=args.max_grad_norm)
elif args.algo == 'acktr':
agent = algo.A2C_ACKTR(actor_critic, args.value_loss_coef,
args.entropy_coef, acktr=True)
elif args.algo == 'sf':
agent = algo.SF(policy, feature_size = feature_size,
phi_lr=3e-4, psi_lr=3e-4, eps=args.eps_explore)
elif args.algo == 'q':
agent = algo.QLearning(policy, feature_size = feature_size,
lr=args.lr, eps=args.eps_explore)
use_a2csf_storage = True if args.algo == "a2csf" else False
if args.algo == "sf" or args.algo == "q":
rollouts = RolloutStorage(args.num_steps, args.num_processes,
envs.observation_space.shape, envs.action_space,
policy.recurrent_hidden_state_size, feature_dim = feature_size)
else:
rollouts = RolloutStorage(args.num_steps, args.num_processes,
envs.observation_space.shape, envs.action_space,
actor_critic.recurrent_hidden_state_size, feature_dim = feature_size,
a2csf = use_a2csf_storage)
obs = envs.reset()
# create a dummy feature
dummy_feature = torch.zeros([args.num_processes, feature_size])
rollouts.features[0].copy_(dummy_feature)
rollouts.obs[0].copy_(obs)
rollouts.to(device)
episode_rewards = deque(maxlen=100)
start = time.time()
wandb.init(project = "tSF")
if args.algo == "sf":
for j in range(num_updates):
for step in range(args.num_steps):
# Sample actions
with torch.no_grad():
_, _, action, _ , recurrent_hidden_states = policy.act(
rollouts.obs[step],
rollouts.recurrent_hidden_states[step],
rollouts.masks[step],
rollouts.features[step])
# Obser reward and next obs
obs, reward, done, infos = envs.step(action)
# info is a tuple of dicts
_feature = []
for info in infos:
if "feature" in info.keys():
_feature.append(info["feature"])
feature = torch.tensor(np.stack(_feature, axis = 0)).to(device)
# FIXME: works only for environments with sparse rewards
for idx, eps_done in enumerate(done):
if eps_done:
episode_rewards.append(np.array(reward[idx]))
# If done then clean the history of observations.
masks = torch.FloatTensor([[0.0] if done_ else [1.0] for done_ in done])
rollouts.insert(obs = obs,recurrent_hidden_states = recurrent_hidden_states,
action_log_probs = None, value_preds = None,
actions = action, rewards = reward, masks = masks,
feature = feature)
psi_loss, phi_loss, w_loss = agent.update(rollouts)
rollouts.after_update()
if j % args.save_interval == 0 and args.save_dir != "":
print('Saving model')
print()
save_path = os.path.join(args.save_dir, args.algo)
try:
os.makedirs(save_path)
except OSError:
pass
# A really ugly way to save a model to CPU
save_model = policy
if args.cuda:
save_model = copy.deepcopy(policy).cpu()
save_model = [save_model, hasattr(envs.venv, 'ob_rms') and envs.venv.ob_rms or None]
torch.save(save_model, os.path.join(save_path, args.env_name + ".pt"))
total_num_steps = (j + 1) * args.num_processes * args.num_steps
if j % args.log_interval == 0 and len(episode_rewards) > 1:
end = time.time()
print("Updates {}, num timesteps {}, FPS {} \n Last {} training episodes: mean/median reward {:.2f}/{:.2f}, min/max reward {:.2f}/{:.2f}, success rate {:.2f}\n".
format(
j, total_num_steps,
int(total_num_steps / (end - start)),
len(episode_rewards),
np.mean(episode_rewards),
np.median(episode_rewards),
np.min(episode_rewards),
np.max(episode_rewards),
np.count_nonzero(np.greater(episode_rewards, 0)) / len(episode_rewards)
)
)
wandb.log({"mean_reward": np.mean(episode_rewards),
"success_rate": np.count_nonzero(np.greater(episode_rewards, 0)) / len(episode_rewards),
"num_updates": j,
"psi_loss": float(psi_loss),
"phi_loss": float(phi_loss),
"w_loss": float(w_loss)
}, step = total_num_steps)
if args.eval_interval is not None and len(episode_rewards) > 1 and j % args.eval_interval == 0:
eval_envs = make_vec_envs(args.env_name, args.seed + args.num_processes, args.num_processes,
args.gamma, eval_log_dir, args.add_timestep, device, True)
if eval_envs.venv.__class__.__name__ == "VecNormalize":
eval_envs.venv.ob_rms = envs.venv.ob_rms
# An ugly hack to remove updates
def _obfilt(self, obs):
if self.ob_rms:
obs = np.clip((obs - self.ob_rms.mean) / np.sqrt(self.ob_rms.var + self.epsilon), -self.clipob, self.clipob)
return obs
else:
return obs
eval_envs.venv._obfilt = types.MethodType(_obfilt, envs.venv)
eval_episode_rewards = []
obs = eval_envs.reset()
eval_recurrent_hidden_states = torch.zeros(args.num_processes, actor_critic.recurrent_hidden_state_size, device=device)
eval_masks = torch.zeros(args.num_processes, 1, device=device)
# create a dummy feature
eval_features = torch.zeros([args.num_processes, feature_size])
while len(eval_episode_rewards) < 10:
with torch.no_grad():
_, action, _, _, eval_recurrent_hidden_states = policy.act( obs, eval_recurrent_hidden_states, eval_masks, eval_features, deterministic=True)
# Obser reward and next obs
obs, reward, done, infos = eval_envs.step(action)
_feature = []
for info in infos:
if "feature" in info.keys():
_feature.append(info["feature"])
eval_feature = np.stack(_feature, axis = 0)
eval_masks = torch.FloatTensor([[0.0] if done_ else [1.0] for done_ in done])
for info in infos:
if 'episode' in info.keys():
eval_episode_rewards.append(info['episode']['r'])
eval_envs.close()
print(" Evaluation using {} episodes: mean reward {:.5f}\n".format(
len(eval_episode_rewards),
np.mean(eval_episode_rewards)
))
wandb.log({"mean_eval_reward": np.mean(eval_episode_rewards),
}, step = total_num_steps)
"""
if args.vis and j % args.vis_interval == 0:
try:
# Sometimes monitor doesn't properly flush the outputs
win = visdom_plot(viz, win, args.log_dir, args.env_name,
args.algo, args.num_frames)
except IOError:
pass
"""
envs.close()
elif args.algo == "q":
for j in range(num_updates):
for step in range(args.num_steps):
# Sample actions
with torch.no_grad():
_, action, _, recurrent_hidden_states = policy.act(
rollouts.obs[step],
rollouts.recurrent_hidden_states[step],
rollouts.masks[step],
rollouts.features[step])
# Obser reward and next obs
obs, reward, done, infos = envs.step(action)
# info is a tuple of dicts
_feature = []
for info in infos:
if "feature" in info.keys():
_feature.append(info["feature"])
feature = torch.tensor(np.stack(_feature, axis = 0)).to(device)
# FIXME: works only for environments with sparse rewards
for idx, eps_done in enumerate(done):
if eps_done:
episode_rewards.append(np.array(reward[idx]))
# If done then clean the history of observations.
masks = torch.FloatTensor([[0.0] if done_ else [1.0] for done_ in done])
rollouts.insert(obs = obs,recurrent_hidden_states = recurrent_hidden_states,
action_log_probs = None, value_preds = None,
actions = action, rewards = reward, masks = masks,
feature = feature)
q_loss = agent.update(rollouts)
rollouts.after_update()
if j % args.save_interval == 0 and args.save_dir != "":
print('Saving model')
print()
save_path = os.path.join(args.save_dir, args.algo)
try:
os.makedirs(save_path)
except OSError:
pass
# A really ugly way to save a model to CPU
save_model = policy
if args.cuda:
save_model = copy.deepcopy(policy).cpu()
save_model = [save_model, hasattr(envs.venv, 'ob_rms') and envs.venv.ob_rms or None]
torch.save(save_model, os.path.join(save_path, args.env_name + ".pt"))
total_num_steps = (j + 1) * args.num_processes * args.num_steps
if j % args.log_interval == 0 and len(episode_rewards) > 1:
end = time.time()
print("Updates {}, num timesteps {}, FPS {} \n Last {} training episodes: mean/median reward {:.2f}/{:.2f}, min/max reward {:.2f}/{:.2f}, success rate {:.2f}\n".
format(
j, total_num_steps,
int(total_num_steps / (end - start)),
len(episode_rewards),
np.mean(episode_rewards),
np.median(episode_rewards),
np.min(episode_rewards),
np.max(episode_rewards),
np.count_nonzero(np.greater(episode_rewards, 0)) / len(episode_rewards)
)
)
wandb.log({"mean_reward": np.mean(episode_rewards),
"success_rate": np.count_nonzero(np.greater(episode_rewards, 0)) / len(episode_rewards),
"num_updates": j,
"q_loss": float(q_loss),
}, step = total_num_steps)
if args.eval_interval is not None and len(episode_rewards) > 1 and j % args.eval_interval == 0:
eval_envs = make_vec_envs(args.env_name, args.seed + args.num_processes, args.num_processes,
args.gamma, eval_log_dir, args.add_timestep, device, True)
if eval_envs.venv.__class__.__name__ == "VecNormalize":
eval_envs.venv.ob_rms = envs.venv.ob_rms
# An ugly hack to remove updates
def _obfilt(self, obs):
if self.ob_rms:
obs = np.clip((obs - self.ob_rms.mean) / np.sqrt(self.ob_rms.var + self.epsilon), -self.clipob, self.clipob)
return obs
else:
return obs
eval_envs.venv._obfilt = types.MethodType(_obfilt, envs.venv)
eval_episode_rewards = []
obs = eval_envs.reset()
eval_recurrent_hidden_states = torch.zeros(args.num_processes, actor_critic.recurrent_hidden_state_size, device=device)
eval_masks = torch.zeros(args.num_processes, 1, device=device)
# create a dummy feature
eval_features = torch.zeros([args.num_processes, feature_size])
while len(eval_episode_rewards) < 10:
with torch.no_grad():
_, action, _, eval_recurrent_hidden_states = policy.act( obs, eval_recurrent_hidden_states, eval_masks, eval_features, deterministic=True)
# Obser reward and next obs
obs, reward, done, infos = eval_envs.step(action)
_feature = []
for info in infos:
if "feature" in info.keys():
_feature.append(info["feature"])
eval_feature = np.stack(_feature, axis = 0)
eval_masks = torch.FloatTensor([[0.0] if done_ else [1.0] for done_ in done])
for info in infos:
if 'episode' in info.keys():
eval_episode_rewards.append(info['episode']['r'])
eval_envs.close()
print(" Evaluation using {} episodes: mean reward {:.5f}\n".format(
len(eval_episode_rewards),
np.mean(eval_episode_rewards)
))
wandb.log({"mean_eval_reward": np.mean(eval_episode_rewards),
}, step = total_num_steps)
"""
if args.vis and j % args.vis_interval == 0:
try:
# Sometimes monitor doesn't properly flush the outputs
win = visdom_plot(viz, win, args.log_dir, args.env_name,
args.algo, args.num_frames)
except IOError:
pass
"""
envs.close()
elif args.algo == "a2csf":
for j in range(num_updates):
for step in range(args.num_steps):
# Sample actions
with torch.no_grad():
value, action, action_log_prob, recurrent_hidden_states, psi = actor_critic.act(
rollouts.obs[step],
rollouts.recurrent_hidden_states[step],
rollouts.masks[step],
rollouts.features[step])
# Obser reward and next obs
obs, reward, done, infos = envs.step(action)
# info is a tuple of dicts
_feature = []
for info in infos:
if "feature" in info.keys():
_feature.append(info["feature"])
feature = torch.FloatTensor(np.stack(_feature, axis = 0)).to(device)
estimated_reward = actor_critic.evaluate_rewards(feature)
"""
for info in infos:
if 'episode' in info.keys():
print(reward)
episode_rewards.append(info['episode']['r'])
"""
# FIXME: works only for environments with sparse rewards
for idx, eps_done in enumerate(done):
if eps_done:
episode_rewards.append(np.array(reward[idx]))
# If done then clean the history of observations.
masks = torch.FloatTensor([[0.0] if done_ else [1.0] for done_ in done])
rollouts.insert(obs, recurrent_hidden_states, action, action_log_prob, value, reward, masks, feature, psi, estimated_reward)
with torch.no_grad():
next_value, next_psi = actor_critic.get_value(rollouts.obs[-1],
rollouts.recurrent_hidden_states[-1],
rollouts.masks[-1],
rollouts.features[-1])
rollouts.compute_returns(next_value, args.use_gae, args.gamma, args.tau, sf = True)
rollouts.compute_psi_returns(next_psi,args.gamma)
value_loss, action_loss, dist_entropy, psi_loss, w_loss = agent.update(rollouts)
rollouts.after_update()
if j % args.save_interval == 0 and args.save_dir != "":
print('Saving model')
print()
save_path = os.path.join(args.save_dir, args.algo)
try:
os.makedirs(save_path)
except OSError:
pass
# A really ugly way to save a model to CPU
save_model = actor_critic
if args.cuda:
save_model = copy.deepcopy(actor_critic).cpu()
save_model = [save_model, hasattr(envs.venv, 'ob_rms') and envs.venv.ob_rms or None]
torch.save(save_model, os.path.join(save_path, args.env_name + ".pt"))
total_num_steps = (j + 1) * args.num_processes * args.num_steps
if j % args.log_interval == 0 and len(episode_rewards) > 1:
end = time.time()
print("Updates {}, num timesteps {}, FPS {} \n Last {} training episodes: mean/median reward {:.2f}/{:.2f}, min/max reward {:.2f}/{:.2f}, success rate {:.2f}\n".
format(
j, total_num_steps,
int(total_num_steps / (end - start)),
len(episode_rewards),
np.mean(episode_rewards),
np.median(episode_rewards),
np.min(episode_rewards),
np.max(episode_rewards),
np.count_nonzero(np.greater(episode_rewards, 0)) / len(episode_rewards)
)
)
wandb.log({"mean_reward": np.mean(episode_rewards),
"success_rate": np.count_nonzero(np.greater(episode_rewards, 0)) / len(episode_rewards),
"num_updates": j,
"value_loss": float(value_loss),
"action_loss": float(action_loss),
"dist_entropy": float(dist_entropy),
"psi_loss": float(psi_loss),
"w_loss": float(w_loss)
}, step = total_num_steps)
if args.eval_interval is not None and len(episode_rewards) > 1 and j % args.eval_interval == 0:
eval_envs = make_vec_envs(args.env_name, args.seed + args.num_processes, args.num_processes,
args.gamma, eval_log_dir, args.add_timestep, device, True)
if eval_envs.venv.__class__.__name__ == "VecNormalize":
eval_envs.venv.ob_rms = envs.venv.ob_rms
# An ugly hack to remove updates
def _obfilt(self, obs):
if self.ob_rms:
obs = np.clip((obs - self.ob_rms.mean) / np.sqrt(self.ob_rms.var + self.epsilon), -self.clipob, self.clipob)
return obs
else:
return obs
eval_envs.venv._obfilt = types.MethodType(_obfilt, envs.venv)
eval_episode_rewards = []
obs = eval_envs.reset()
eval_recurrent_hidden_states = torch.zeros(args.num_processes,
actor_critic.recurrent_hidden_state_size, device=device)
eval_masks = torch.zeros(args.num_processes, 1, device=device)
# create a dummy feature
eval_features = torch.zeros([args.num_processes, feature_size])
while len(eval_episode_rewards) < 10:
with torch.no_grad():
_, action, _, eval_recurrent_hidden_states = actor_critic.act(
obs, eval_recurrent_hidden_states, eval_masks, eval_features, deterministic=True)
# Obser reward and next obs
obs, reward, done, infos = eval_envs.step(action)
_feature = []
for info in infos:
if "feature" in info.keys():
_feature.append(info["feature"])
eval_feature = np.stack(_feature, axis = 0)
eval_masks = torch.FloatTensor([[0.0] if done_ else [1.0] for done_ in done])
for info in infos:
if 'episode' in info.keys():
eval_episode_rewards.append(info['episode']['r'])
eval_envs.close()
print(" Evaluation using {} episodes: mean reward {:.5f}\n".format(
len(eval_episode_rewards),
np.mean(eval_episode_rewards)
))
wandb.log({"mean_eval_reward": np.mean(eval_episode_rewards),
}, step = total_num_steps)
"""
if args.vis and j % args.vis_interval == 0:
try:
# Sometimes monitor doesn't properly flush the outputs
win = visdom_plot(viz, win, args.log_dir, args.env_name,
args.algo, args.num_frames)
except IOError:
pass
"""
envs.close()
else:
for j in range(num_updates):
for step in range(args.num_steps):
# Sample actions
with torch.no_grad():
value, action, action_log_prob, recurrent_hidden_states = actor_critic.act(
rollouts.obs[step],
rollouts.recurrent_hidden_states[step],
rollouts.masks[step],
rollouts.features[step])
# Obser reward and next obs
if j > 5:
env_mask = np.array([0, 1, 0, 0])
else:
env_mask = np.array([0, 0, 0, 0])
obs, reward, done, infos = envs.step(action, env_mask)
print(obs[:,0,0,0])
# info is a tuple of dicts
_feature = []
for info in infos:
if "feature" in info.keys():
_feature.append(info["feature"])
feature = torch.tensor(np.stack(_feature, axis = 0)).to(device)
"""
for info in infos:
if 'episode' in info.keys():
print(reward)
episode_rewards.append(info['episode']['r'])
"""
# FIXME: works only for environments with sparse rewards
for idx, eps_done in enumerate(done):
if eps_done:
episode_rewards.append(np.array(reward[idx]))
# If done then clean the history of observations.
masks = torch.FloatTensor([[0.0] if done_ else [1.0] for done_ in done])
rollouts.insert(obs, recurrent_hidden_states, action, action_log_prob, value, reward, masks, feature, psi = None, estimated_reward = None)
with torch.no_grad():
next_value = actor_critic.get_value(rollouts.obs[-1],
rollouts.recurrent_hidden_states[-1],
rollouts.masks[-1],
rollouts.features[-1]).detach()
rollouts.compute_returns(next_value, args.use_gae, args.gamma, args.tau)
value_loss, action_loss, dist_entropy = agent.update(rollouts)
rollouts.after_update()
if j % args.save_interval == 0 and args.save_dir != "":
print('Saving model')
print()
save_path = os.path.join(args.save_dir, args.algo)
try:
os.makedirs(save_path)
except OSError:
pass
# A really ugly way to save a model to CPU
save_model = actor_critic
if args.cuda:
save_model = copy.deepcopy(actor_critic).cpu()
save_model = [save_model, hasattr(envs.venv, 'ob_rms') and envs.venv.ob_rms or None]
torch.save(save_model, os.path.join(save_path, args.env_name + ".pt"))
total_num_steps = (j + 1) * args.num_processes * args.num_steps
if j % args.log_interval == 0 and len(episode_rewards) > 1:
end = time.time()
print("Updates {}, num timesteps {}, FPS {} \n Last {} training episodes: mean/median reward {:.2f}/{:.2f}, min/max reward {:.2f}/{:.2f}, success rate {:.2f}\n".
format(
j, total_num_steps,
int(total_num_steps / (end - start)),
len(episode_rewards),
np.mean(episode_rewards),
np.median(episode_rewards),
np.min(episode_rewards),
np.max(episode_rewards),
np.count_nonzero(np.greater(episode_rewards, 0)) / len(episode_rewards)
)
)
wandb.log({"mean_reward": np.mean(episode_rewards),
"success_rate": np.count_nonzero(np.greater(episode_rewards, 0)) / len(episode_rewards),
"num_updates": j,
"value_loss": float(value_loss),
"action_loss": float(action_loss),
"dist_entropy": float(dist_entropy)
}, step = total_num_steps)
if args.eval_interval is not None and len(episode_rewards) > 1 and j % args.eval_interval == 0:
eval_envs = make_vec_envs(args.env_name, args.seed + args.num_processes, args.num_processes,
args.gamma, eval_log_dir, args.add_timestep, device, True)
if eval_envs.venv.__class__.__name__ == "VecNormalize":
eval_envs.venv.ob_rms = envs.venv.ob_rms
# An ugly hack to remove updates
def _obfilt(self, obs):
if self.ob_rms:
obs = np.clip((obs - self.ob_rms.mean) / np.sqrt(self.ob_rms.var + self.epsilon), -self.clipob, self.clipob)
return obs
else:
return obs
eval_envs.venv._obfilt = types.MethodType(_obfilt, envs.venv)
eval_episode_rewards = []
obs = eval_envs.reset()
eval_recurrent_hidden_states = torch.zeros(args.num_processes,
actor_critic.recurrent_hidden_state_size, device=device)
eval_masks = torch.zeros(args.num_processes, 1, device=device)
# create a dummy feature
eval_features = torch.zeros([args.num_processes, feature_size])
while len(eval_episode_rewards) < 10:
with torch.no_grad():
_, action, _, eval_recurrent_hidden_states = actor_critic.act(
obs, eval_recurrent_hidden_states, eval_masks, eval_features, deterministic=True)
# Obser reward and next obs
obs, reward, done, infos = eval_envs.step(action)
_feature = []
for info in infos:
if "feature" in info.keys():
_feature.append(info["feature"])
eval_feature = np.stack(_feature, axis = 0)
eval_masks = torch.FloatTensor([[0.0] if done_ else [1.0] for done_ in done])
for info in infos:
if 'episode' in info.keys():
eval_episode_rewards.append(info['episode']['r'])
eval_envs.close()
print(" Evaluation using {} episodes: mean reward {:.5f}\n".format(
len(eval_episode_rewards),
np.mean(eval_episode_rewards)
))
wandb.log({"mean_eval_reward": np.mean(eval_episode_rewards),
}, step = total_num_steps)
"""
if args.vis and j % args.vis_interval == 0:
try:
# Sometimes monitor doesn't properly flush the outputs
win = visdom_plot(viz, win, args.log_dir, args.env_name,
args.algo, args.num_frames)
except IOError:
pass
"""
envs.close()
if __name__ == "__main__":
main()
| 43.909449
| 177
| 0.50937
| 3,623
| 33,459
| 4.477505
| 0.07342
| 0.068179
| 0.028603
| 0.012822
| 0.899642
| 0.889841
| 0.874861
| 0.867341
| 0.861793
| 0.861793
| 0
| 0.008228
| 0.397053
| 33,459
| 761
| 178
| 43.967148
| 0.795876
| 0.036343
| 0
| 0.774131
| 0
| 0.007722
| 0.05407
| 0
| 0
| 0
| 0
| 0.005256
| 0.003861
| 1
| 0.009653
| false
| 0.007722
| 0.034749
| 0
| 0.059846
| 0.032819
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ea677fe15da31187b7c8b2180789afb81150f266
| 717
|
py
|
Python
|
src/sage/calculus/predefined.py
|
bopopescu/sage
|
2d495be78e0bdc7a0a635454290b27bb4f5f70f0
|
[
"BSL-1.0"
] | 1,742
|
2015-01-04T07:06:13.000Z
|
2022-03-30T11:32:52.000Z
|
src/sage/calculus/predefined.py
|
Ivo-Maffei/sage
|
467fbc70a08b552b3de33d9065204ee9cbfb02c7
|
[
"BSL-1.0"
] | 66
|
2015-03-19T19:17:24.000Z
|
2022-03-16T11:59:30.000Z
|
src/sage/calculus/predefined.py
|
dimpase/sage
|
468f23815ade42a2192b0a9cd378de8fdc594dcd
|
[
"BSL-1.0"
] | 495
|
2015-01-10T10:23:18.000Z
|
2022-03-24T22:06:11.000Z
|
from sage.symbolic.ring import var as _var
a = _var('a')
b = _var('b')
c = _var('c')
d = _var('d')
f = _var('f')
g = _var('g')
h = _var('h')
j = _var('j')
k = _var('k')
l = _var('l')
m = _var('m')
n = _var('n')
o = _var('o')
p = _var('p')
q = _var('q')
r = _var('r')
s = _var('s')
t = _var('t')
u = _var('u')
v = _var('v')
w = _var('w')
x = _var('x')
y = _var('y')
z = _var('z')
A = _var('A')
B = _var('B')
C = _var('C')
D = _var('D')
E = _var('E')
F = _var('F')
G = _var('G')
H = _var('H')
J = _var('J')
K = _var('K')
L = _var('L')
M = _var('M')
N = _var('N')
P = _var('P')
Q = _var('Q')
R = _var('R')
S = _var('S')
T = _var('T')
U = _var('U')
V = _var('V')
W = _var('W')
X = _var('X')
Y = _var('Y')
Z = _var('Z')
| 13.788462
| 42
| 0.450488
| 152
| 717
| 1.802632
| 0.210526
| 0.043796
| 0.036496
| 0.043796
| 0.839416
| 0.839416
| 0.839416
| 0.839416
| 0.839416
| 0.839416
| 0
| 0
| 0.211994
| 717
| 51
| 43
| 14.058824
| 0.484956
| 0
| 0
| 0
| 0
| 0
| 0.067039
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.020408
| 0
| 0.020408
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
ea7ae2e09c13dc99361ba5bbe01ecf69efd9b4d8
| 9,427
|
py
|
Python
|
tests/test_tb.py
|
StanfordAHA/Lake
|
34df001db107e1a0824b7fdb05b9f2145bf49a3e
|
[
"BSD-3-Clause"
] | 11
|
2019-10-14T02:05:38.000Z
|
2022-03-10T14:10:22.000Z
|
tests/test_tb.py
|
StanfordAHA/Lake
|
34df001db107e1a0824b7fdb05b9f2145bf49a3e
|
[
"BSD-3-Clause"
] | 29
|
2019-09-02T05:49:40.000Z
|
2022-02-26T00:57:54.000Z
|
tests/test_tb.py
|
StanfordAHA/Lake
|
34df001db107e1a0824b7fdb05b9f2145bf49a3e
|
[
"BSD-3-Clause"
] | 1
|
2021-04-16T20:26:13.000Z
|
2021-04-16T20:26:13.000Z
|
from lake.models.tb_model import TBModel
from lake.modules.transpose_buffer import TransposeBuffer
import magma as m
from magma import *
import fault
import tempfile
import kratos as k
import random as rand
import pytest
@pytest.mark.parametrize("start_addr", [0, 1])
def test_tb(start_addr,
word_width=16,
fetch_width=4,
num_tb=1,
max_tb_height=1,
max_range=5,
max_range_inner=5,
max_stride=15,
tb_iterator_support=2):
model_tb = TBModel(word_width,
fetch_width,
num_tb,
max_tb_height,
max_range,
max_range_inner)
new_config = {}
new_config["range_outer"] = 5
new_config["range_inner"] = 3
new_config["stride"] = 2
new_config["indices"] = [0, 1, 2]
new_config["tb_height"] = 1
new_config["dimensionality"] = 2
new_config["starting_addr"] = start_addr
model_tb.set_config(new_config=new_config)
dut = TransposeBuffer(word_width,
fetch_width,
num_tb,
max_tb_height,
max_range,
max_range_inner,
max_stride,
tb_iterator_support)
magma_dut = k.util.to_magma(dut, flatten_array=True, check_flip_flop_always_ff=False)
tester = fault.Tester(magma_dut, magma_dut.clk)
tester.circuit.clk = 0
tester.circuit.rst_n = 1
tester.step(2)
tester.circuit.rst_n = 0
tester.step(2)
tester.circuit.rst_n = 1
# configuration registers
tester.circuit.indices_0 = 0
tester.circuit.indices_1 = 1
tester.circuit.indices_2 = 2
tester.circuit.range_outer = 5
tester.circuit.range_inner = 3
tester.circuit.stride = 2
tester.circuit.tb_height = 1
tester.circuit.dimensionality = 2
tester.circuit.starting_addr = start_addr
rand.seed(0)
num_iters = 300
for i in range(num_iters):
data = []
for j in range(fetch_width):
data.append(rand.randint(0, 2**word_width - 1))
for j in range(fetch_width):
setattr(tester.circuit, f"input_data_{j}", data[j])
if i % fetch_width == 0:
valid_data = 1
else:
valid_data = 0
valid_data = rand.randint(0, 1)
tester.circuit.valid_data = valid_data
input_data = data
mem_valid_data = rand.randint(0, 1)
tester.circuit.mem_valid_data = mem_valid_data
ack_in = valid_data
tester.circuit.ack_in = ack_in
ren = 1
tester.circuit.ren = ren
model_data, model_valid, model_rdy_to_arbiter = \
model_tb.interact(input_data, valid_data, ack_in, ren, mem_valid_data)
# print("i: ", i, " model valid ", model_valid, " model data ", model_data)
tester.eval()
tester.circuit.output_valid.expect(model_valid)
if model_valid:
tester.circuit.col_pixels.expect(model_data[0])
tester.step(2)
with tempfile.TemporaryDirectory() as tempdir:
tester.compile_and_run(target="verilator",
directory=tempdir,
magma_output="verilog",
flags=["-Wno-fatal"])
def test_id(word_width=16,
fetch_width=4,
num_tb=1,
max_tb_height=1,
max_range=12,
max_range_inner=5,
max_stride=15,
tb_iterator_support=2):
model_tb = TBModel(word_width,
fetch_width,
num_tb,
max_tb_height,
max_range,
max_range_inner)
new_config = {}
new_config["range_outer"] = 12
new_config["range_inner"] = 3
new_config["stride"] = 1
new_config["indices"] = [0, 1, 2]
new_config["tb_height"] = 1
new_config["dimensionality"] = 1
new_config["starting_addr"] = 0
model_tb.set_config(new_config=new_config)
dut = TransposeBuffer(word_width,
fetch_width,
num_tb,
max_tb_height,
max_range,
max_range_inner,
max_stride,
tb_iterator_support)
magma_dut = k.util.to_magma(dut, flatten_array=True)
tester = fault.Tester(magma_dut, magma_dut.clk)
tester.circuit.clk = 0
tester.circuit.rst_n = 1
tester.step(2)
tester.circuit.rst_n = 0
tester.step(2)
tester.circuit.rst_n = 1
# configuration registers
# dimensionality = 1 version
tester.circuit.indices_0 = 0
tester.circuit.indices_1 = 1
tester.circuit.indices_2 = 2
tester.circuit.range_outer = 12
tester.circuit.range_inner = 3
tester.circuit.stride = 1
tester.circuit.tb_height = 1
tester.circuit.dimensionality = 1
tester.circuit.starting_addr = 0
rand.seed(0)
num_iters = 300
for i in range(num_iters):
# print()
# print("i: ", i)
data = []
for j in range(fetch_width):
data.append(rand.randint(0, 2**word_width - 1))
for j in range(fetch_width):
setattr(tester.circuit, f"input_data_{j}", data[j])
valid_data = rand.randint(0, 1)
tester.circuit.valid_data = valid_data
input_data = data
mem_valid_data = rand.randint(0, 1)
tester.circuit.mem_valid_data = mem_valid_data
ack_in = valid_data
tester.circuit.ack_in = ack_in
ren = 1
tester.circuit.ren = ren
model_data, model_valid, model_rdy_to_arbiter = \
model_tb.interact(input_data, valid_data, ack_in, ren, mem_valid_data)
tester.eval()
tester.circuit.output_valid.expect(model_valid)
if model_valid:
tester.circuit.col_pixels.expect(model_data[0])
# print("model data ", model_data, " model_valid ", model_valid)
tester.step(2)
with tempfile.TemporaryDirectory() as tempdir:
tester.compile_and_run(target="verilator",
directory=tempdir,
magma_output="verilog",
flags=["-Wno-fatal"])
def test_fw1(word_width=16,
fetch_width=1,
num_tb=1,
max_tb_height=1,
max_range=5,
max_range_inner=5,
max_stride=15,
tb_iterator_support=2):
model_tb = TBModel(word_width,
fetch_width,
num_tb,
max_tb_height,
max_range,
max_range_inner)
new_config = {}
new_config["range_outer"] = 5
new_config["range_inner"] = 3
new_config["stride"] = 1
new_config["indices"] = [0, 1, 2]
new_config["tb_height"] = 1
new_config["dimensionality"] = 1
new_config["starting_addr"] = 0
model_tb.set_config(new_config=new_config)
dut = TransposeBuffer(word_width,
fetch_width,
num_tb,
max_tb_height,
max_range,
max_range_inner,
max_stride,
tb_iterator_support)
magma_dut = k.util.to_magma(dut, flatten_array=True)
tester = fault.Tester(magma_dut, magma_dut.clk)
tester.circuit.clk = 0
tester.circuit.rst_n = 1
tester.step(2)
tester.circuit.rst_n = 0
tester.step(2)
tester.circuit.rst_n = 1
# configuration registers
tester.circuit.indices_0 = 0
tester.circuit.indices_1 = 1
tester.circuit.indices_2 = 2
tester.circuit.range_outer = 5
tester.circuit.range_inner = 3
tester.circuit.stride = 1
tester.circuit.tb_height = 1
tester.circuit.dimensionality = 1
tester.circuit.starting_addr = 0
rand.seed(0)
data = 0
num_iters = 300
for i in range(num_iters):
# print()
# print("i: ", i)
data = rand.randint(0, 2**word_width - 1)
tester.circuit.input_data = data
valid_data = rand.randint(0, 1)
tester.circuit.valid_data = valid_data
input_data = data
mem_valid_data = rand.randint(0, 1)
tester.circuit.mem_valid_data = mem_valid_data
ack_in = valid_data
tester.circuit.ack_in = ack_in
ren = 1
tester.circuit.ren = ren
model_data, model_valid, model_rdy_to_arbiter = \
model_tb.interact(input_data, valid_data, ack_in, ren, mem_valid_data)
# print("i: ", i, " model valid ", model_valid, " model data ", model_data)
tester.eval()
tester.circuit.output_valid.expect(model_valid)
if model_valid:
tester.circuit.col_pixels.expect(model_data[0])
tester.step(2)
with tempfile.TemporaryDirectory() as tempdir:
tester.compile_and_run(target="verilator",
directory=tempdir,
magma_output="verilog",
flags=["-Wno-fatal"])
if __name__ == "__main__":
test_tb()
# test_id()
# test_fw1()
| 28.224551
| 89
| 0.56667
| 1,155
| 9,427
| 4.333333
| 0.103896
| 0.155844
| 0.055944
| 0.030569
| 0.904496
| 0.895505
| 0.895505
| 0.890909
| 0.881119
| 0.881119
| 0
| 0.026307
| 0.342739
| 9,427
| 333
| 90
| 28.309309
| 0.781472
| 0.040098
| 0
| 0.861789
| 0
| 0
| 0.037299
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.012195
| false
| 0
| 0.036585
| 0
| 0.04878
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
57a02f1dbe87707a47303254c24ac1d2ddf4a927
| 4,989
|
py
|
Python
|
wrappers/SONATAClient/sonpackage.py
|
CN-UPB/python-mano-wrappers
|
8e3607feaa97bc3e2c906ee8e4b25b21853ea6cf
|
[
"Apache-2.0"
] | null | null | null |
wrappers/SONATAClient/sonpackage.py
|
CN-UPB/python-mano-wrappers
|
8e3607feaa97bc3e2c906ee8e4b25b21853ea6cf
|
[
"Apache-2.0"
] | null | null | null |
wrappers/SONATAClient/sonpackage.py
|
CN-UPB/python-mano-wrappers
|
8e3607feaa97bc3e2c906ee8e4b25b21853ea6cf
|
[
"Apache-2.0"
] | null | null | null |
from ..CommonInterface import CommonInterfaceSonPackage
# from .helpers import Helpers
import json
import requests
class Package(CommonInterfaceSonPackage):
def __init__(self, host, port=4002):
self._host = host
self._port = port
self._base_path = 'http://{0}:{1}'
self._user_endpoint = '{0}'
def get_son_packages(self, token, _filter=None, host=None, port=None):
if host is None:
base_path = "http://{0}:{1}".format(self._host, self._port)
else:
base_path = "http://{0}:{1}".format(host, port)
query_path = ''
if _filter:
query_path = '?_admin.type=' + _filter
_endpoint = "{0}/catalogues/api/v2/son-packages{1}".format(base_path, query_path)
result = {'error': True, 'data': ''}
headers = {"Content-Type": "application/json", 'Authorization': 'Bearer {}'.format(token)}
try:
r = requests.get(_endpoint, params=None, verify=False, stream=True, headers=headers)
except Exception as e:
result['data'] = str(e)
return result
if r.status_code == requests.codes.ok:
result['error'] = False
result['data'] = r.text
return json.dumps(result)
def post_son_packages(self, token, package_path, host=None, port=None):
if host is None:
base_path = self._base_path.format(self._host, self._port)
else:
base_path = self._base_path.format(host, port)
result = {'error': True, 'data': ''}
headers = {"Content-Type": "application/x-www-form-urlencoded",
"Content-Disposition": "attachment; filename=sonata_example.son",
'Authorization': 'Bearer {}'.format(token)}
_endpoint = "{0}/catalogues/api/v2/son-packages".format(base_path)
try:
r = requests.post(_endpoint, data=open(package_path, 'rb'), verify=False, headers=headers)
except Exception as e:
result['data'] = str(e)
return result
if r.status_code == requests.codes.created:
result['error'] = False
result['data'] = r.text
return json.dumps(result)
def delete_son_packages_PackageId(self, token, id, host=None, port=None):
if host is None:
base_path = self._base_path.format(self._host, self._port)
else:
base_path = self._base_path.format(host, port)
result = {'error': True, 'data': ''}
headers = {"Content-Type": "application/x-yaml", "accept": "application/json",
'Authorization': 'Bearer {}'.format(token)}
_endpoint = "{0}/catalogues/api/v2/son-packages/{1}".format(base_path, id)
try:
r = requests.delete(_endpoint, params=None, verify=False, headers=headers)
except Exception as e:
result['data'] = str(e)
return result
if r.status_code == requests.codes.no_content:
result['error'] = False
result['data'] = r.text
return json.dumps(result)
def put_son_packages_PackageId(self, token, data_path, id, host=None, port=None):
if host is None:
base_path = self._base_path.format(self._host, self._port)
else:
base_path = self._base_path.format(host, port)
result = {'error': True, 'data': ''}
headers = {"Content-Type": "application/x-yaml", "accept": "application/json",
'Authorization': 'Bearer {}'.format(token)}
_endpoint = "{0}/catalogues/api/v2/son-packages/{1}".format(base_path, id)
try:
r = requests.delete(_endpoint, params=None, verify=False, headers=headers)
except Exception as e:
result['data'] = str(e)
return result
if r.status_code == requests.codes.no_content:
result['error'] = False
result['data'] = r.text
return json.dumps(result)
def get_son_packages_PackageId(self, token, id, host=None, port=None):
if host is None:
base_path = "http://{0}:{1}".format(self._host, self._port)
else:
base_path = "http://{0}:{1}".format(host, port)
_endpoint = "{0}/catalogues/api/v2/son-packages{1}".format(base_path, id)
result = {'error': True, 'data': ''}
headers = {"Content-Type": "application/json", 'Authorization': 'Bearer {}'.format(token)}
try:
r = requests.get(_endpoint, params=None, verify=False, stream=True, headers=headers)
except Exception as e:
result['data'] = str(e)
return result
if r.status_code == requests.codes.ok:
result['error'] = False
result['data'] = r.text
return json.dumps(result)
| 39.595238
| 103
| 0.563039
| 569
| 4,989
| 4.782074
| 0.149385
| 0.064682
| 0.030871
| 0.035281
| 0.823962
| 0.808159
| 0.808159
| 0.808159
| 0.808159
| 0.808159
| 0
| 0.008298
| 0.299459
| 4,989
| 126
| 104
| 39.595238
| 0.770243
| 0.005612
| 0
| 0.762376
| 0
| 0
| 0.156153
| 0.050465
| 0
| 0
| 0
| 0
| 0
| 1
| 0.059406
| false
| 0
| 0.029703
| 0
| 0.19802
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
57a92b93a24868c2458024584c4ba00bb4e9648b
| 9,796
|
py
|
Python
|
main_flask.py
|
akahard2dj/CandleMap
|
84f559ea3c049a446bc884f9492c44516c4e12fd
|
[
"MIT"
] | null | null | null |
main_flask.py
|
akahard2dj/CandleMap
|
84f559ea3c049a446bc884f9492c44516c4e12fd
|
[
"MIT"
] | null | null | null |
main_flask.py
|
akahard2dj/CandleMap
|
84f559ea3c049a446bc884f9492c44516c4e12fd
|
[
"MIT"
] | null | null | null |
from flask import Flask, abort, request
from profanity import profanity
import json
from SQLiteDB.candle_location_db import CandleLocation
from SQLiteDB.candle_board_db import CandleBoard
from SQLiteDB.candle_count_db import CandleCount
from SQLiteDB.api_key_db import APIKey
from util.verification import VerificationText
app = Flask(__name__)
candle_location_db = CandleLocation()
candle_board_db = CandleBoard()
candle_count_db = CandleCount()
api_key_db = APIKey()
verify_text = VerificationText()
f = open('banned_word_list.txt', 'r', encoding='utf-8')
banned_words = f.readlines()
for idx in range(len(banned_words)):
banned_words[idx] = banned_words[idx].rstrip('\n')
profanity.load_words(banned_words)
profanity.set_censor_characters('-')
@app.route('/')
def main_page():
return 'test'
@app.route('/api/v2/candle_count', methods=['GET', 'POST'])
def candle_count_api():
# api key comparing
api_key = request.args.get("apikey")
api_key_db.connect()
is_available = api_key_db.is_issued_key(api_key)
res_dict = dict()
if is_available == True:
if request.method == 'GET':
status = candle_count_db.connect()
if status['connection_status'] == 'failed':
res_dict['result'] = 'fail'
res_dict['result_detail'] = 'sqlite3 connection error'
return res_dict
count = candle_count_db.get_candle_count()
if not count:
count = 0
# res_dict['result'] = 'ok'
# res_dict['type'] = 'CandleCount'
res_dict['count'] = count
if request.method == 'POST':
status = candle_count_db.connect()
if status['connection_status'] == 'failed':
# res_dict['result'] = 'fail'
# res_dict['result_detail'] = 'sqlite3 connection error'
return res_dict
json_data = request.get_json()
candle_count_db.db_update(json_data)
count = candle_count_db.get_candle_count()
# res_dict['result'] = 'ok'
# res_dict['type'] = 'CandleCount'
res_dict['count'] = count
else:
res_dict['count'] = []
return json.dumps(res_dict)
@app.route('/api/v2/candle_board', methods=['GET', 'POST', 'DELETE'])
def candle_board_api():
method_flag = {'POST': 1, 'DELETE': 2, 'GET': 3}
# api key comparing
api_key = request.args.get("apikey")
api_key_db.connect()
is_available = api_key_db.is_issued_key(api_key)
res_dict = dict()
if is_available == True:
if request.method == 'POST':
# todo 금칙어 update
status = candle_board_db.connect()
if status['connection_status'] == 'failed':
res_dict['result_msg'] = 'failed'
res_dict['result_detail'] = 'sqlite3 connection error'
return res_dict
json_data = request.get_json()
text = json_data['content']
text_verify_html = verify_text.html_remove(text)
# if text == text_verify_html:
# res_dict['text_verify'] = {'html': 'not_removed'}
# else:
# res_dict['text_verify'] = {'html': 'removed'}
db_to_json = {'content': text_verify_html}
candle_board_db.db_update(method_flag['POST'], db_to_json)
offset = request.args.get("offset")
limit = request.args.get("limit")
board_contents = candle_board_db.fetch_posted_step(offset, limit)
# res_dict['result_msg'] = 'success'
# res_dict['result_detail'] = 'POST connection'
res_dict['data'] = board_contents
elif request.method == 'GET':
status = candle_board_db.connect()
if status['connection_status'] == 'failed':
res_dict['result_msg'] = 'failed'
res_dict['result_detail'] = 'sqlite3 connection error'
return res_dict
offset = request.args.get("offset")
limit = request.args.get("limit")
board_contents = candle_board_db.fetch_posted_step(offset, limit)
# res_dict['result_msg'] = 'success'
# res_dict['result_detail'] = 'GET connection'
res_dict['data'] = board_contents
elif request.method == 'DELETE':
status = candle_board_db.connect()
if status['connection_status'] == 'failed':
res_dict['result_msg'] = 'failed'
res_dict['result_detail'] = 'sqlite3 connection error'
json_data = request.get_json()
offset = request.args.get("offset")
limit = request.args.get("limit")
candle_board_db.db_update(method_flag['DELETE'], json_data)
board_contents = candle_board_db.fetch_posted_step(offset, limit)
# res_dict['result_msg'] = 'success'
# res_dict['result_detail'] = 'DELETE connection'
res_dict['data'] = board_contents
else:
res_dict['result_msg'] = 'success'
res_dict['result_detail'] = 'invalid connection'
else:
res_dict['data'] = []
return json.dumps(res_dict, ensure_ascii=False)
@app.route('/api/v1/candle_count/', methods=['GET', 'POST'])
def candle_count():
res_dict = {}
if request.method == 'GET':
status = candle_count_db.connect()
if status['connection_status'] == 'failed':
res_dict['result'] = 'fail'
res_dict['result_detail'] = 'sqlite3 connection error'
return res_dict
count = candle_count_db.get_candle_count()
if not count:
count = 0
#res_dict['result'] = 'ok'
#res_dict['type'] = 'CandleCount'
res_dict['count'] = count
if request.method == 'POST':
status = candle_count_db.connect()
if status['connection_status'] == 'failed':
#res_dict['result'] = 'fail'
#res_dict['result_detail'] = 'sqlite3 connection error'
return res_dict
json_data = request.get_json()
candle_count_db.db_update(json_data)
count = candle_count_db.get_candle_count()
#res_dict['result'] = 'ok'
#res_dict['type'] = 'CandleCount'
res_dict['count'] = count
return json.dumps(res_dict)
@app.route('/api/v1/candle_location/', methods=['POST'])
def candle_location():
res_dict = {}
if request.method == 'POST':
if not request.get_json():
res_dict["result_msg"] = "fail"
return json.dumps(res_dict)
json_data = request.get_json()
candle_location_db.connect()
candle_location_db.db_update(json_data)
# result -> candle_flag
flag = candle_location_db.get_candle_flag(json_data)
res_dict["result_msg"] = "success"
res_dict["candle_flag"] = flag
else:
res_dict["result_msg"] = "success"
res_dict["result_detail"] = "invalid access"
return json.dumps(res_dict)
@app.route('/api/v1/candle_board', methods=['GET', 'POST', 'DELETE'])
def candle_board():
res_dict = dict()
method_flag = {'POST': 1, 'DELETE': 2, 'GET': 3}
if request.method == 'POST':
#todo 금칙어 update
status = candle_board_db.connect()
if status['connection_status'] == 'failed':
res_dict['result_msg'] = 'failed'
res_dict['result_detail'] = 'sqlite3 connection error'
return res_dict
json_data = request.get_json()
text = json_data['content']
text_verify_html = verify_text.html_remove(text)
#if text == text_verify_html:
#res_dict['text_verify'] = {'html': 'not_removed'}
#else:
#res_dict['text_verify'] = {'html': 'removed'}
profanity_check = profanity.censor(text_verify_html)
db_to_json = {'content': profanity_check}
candle_board_db.db_update(method_flag['POST'], db_to_json)
offset = request.args.get("offset")
limit = request.args.get("limit")
board_contents = candle_board_db.fetch_posted_step(offset, limit)
#res_dict['result_msg'] = 'success'
#res_dict['result_detail'] = 'POST connection'
res_dict['data'] = board_contents
elif request.method == 'GET':
status = candle_board_db.connect()
if status['connection_status'] == 'failed':
res_dict['result_msg'] = 'failed'
res_dict['result_detail'] = 'sqlite3 connection error'
return res_dict
offset = request.args.get("offset")
limit = request.args.get("limit")
board_contents = candle_board_db.fetch_posted_step(offset, limit)
#res_dict['result_msg'] = 'success'
#res_dict['result_detail'] = 'GET connection'
res_dict['data'] = board_contents
elif request.method == 'DELETE':
status = candle_board_db.connect()
if status['connection_status'] == 'failed':
res_dict['result_msg'] = 'failed'
res_dict['result_detail'] = 'sqlite3 connection error'
json_data = request.get_json()
offset = request.args.get("offset")
limit = request.args.get("limit")
candle_board_db.db_update(method_flag['DELETE'], json_data)
board_contents = candle_board_db.fetch_posted_step(offset, limit)
#res_dict['result_msg'] = 'success'
#res_dict['result_detail'] = 'DELETE connection'
res_dict['data'] = board_contents
else:
res_dict['result_msg'] = 'success'
res_dict['result_detail'] = 'invalid connection'
return json.dumps(res_dict, ensure_ascii=False)
if __name__ == '__main__':
app.run(host='0.0.0.0')
#app.run(debug=True)
| 34.013889
| 77
| 0.606778
| 1,160
| 9,796
| 4.808621
| 0.100862
| 0.105414
| 0.102546
| 0.064719
| 0.82431
| 0.808175
| 0.808175
| 0.802797
| 0.759591
| 0.73772
| 0
| 0.003888
| 0.264904
| 9,796
| 287
| 78
| 34.132404
| 0.770726
| 0.126889
| 0
| 0.748663
| 0
| 0
| 0.149284
| 0.005285
| 0
| 0
| 0
| 0.003484
| 0
| 1
| 0.032086
| false
| 0
| 0.042781
| 0.005348
| 0.15508
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
57cb4bc7f518209322a0d824c1d2228e22c3f993
| 4,652
|
py
|
Python
|
py3canvas/tests/feature_flags.py
|
tylerclair/py3canvas
|
7485d458606b65200f0ffa5bbe597a9d0bee189f
|
[
"MIT"
] | null | null | null |
py3canvas/tests/feature_flags.py
|
tylerclair/py3canvas
|
7485d458606b65200f0ffa5bbe597a9d0bee189f
|
[
"MIT"
] | null | null | null |
py3canvas/tests/feature_flags.py
|
tylerclair/py3canvas
|
7485d458606b65200f0ffa5bbe597a9d0bee189f
|
[
"MIT"
] | null | null | null |
"""FeatureFlags API Tests for Version 1.0.
This is a testing template for the generated FeatureFlagsAPI Class.
"""
import unittest
import requests
import secrets
from py3canvas.apis.feature_flags import FeatureFlagsAPI
from py3canvas.apis.feature_flags import Feature
from py3canvas.apis.feature_flags import Featureflag
class TestFeatureFlagsAPI(unittest.TestCase):
"""Tests for the FeatureFlagsAPI."""
def setUp(self):
self.client = FeatureFlagsAPI(secrets.instance_address, secrets.access_token)
def test_list_features_courses(self):
"""Integration test for the FeatureFlagsAPI.list_features_courses method."""
course_id = None # Change me!!
r = self.client.list_features_courses(course_id)
def test_list_features_accounts(self):
"""Integration test for the FeatureFlagsAPI.list_features_accounts method."""
account_id = None # Change me!!
r = self.client.list_features_accounts(account_id)
def test_list_features_users(self):
"""Integration test for the FeatureFlagsAPI.list_features_users method."""
user_id = None # Change me!!
r = self.client.list_features_users(user_id)
def test_list_enabled_features_courses(self):
"""Integration test for the FeatureFlagsAPI.list_enabled_features_courses method."""
course_id = None # Change me!!
r = self.client.list_enabled_features_courses(course_id)
def test_list_enabled_features_accounts(self):
"""Integration test for the FeatureFlagsAPI.list_enabled_features_accounts method."""
account_id = None # Change me!!
r = self.client.list_enabled_features_accounts(account_id)
def test_list_enabled_features_users(self):
"""Integration test for the FeatureFlagsAPI.list_enabled_features_users method."""
user_id = None # Change me!!
r = self.client.list_enabled_features_users(user_id)
def test_list_environment_features(self):
"""Integration test for the FeatureFlagsAPI.list_environment_features method."""
r = self.client.list_environment_features()
def test_get_feature_flag_courses(self):
"""Integration test for the FeatureFlagsAPI.get_feature_flag_courses method."""
course_id = None # Change me!!
feature = None # Change me!!
r = self.client.get_feature_flag_courses(course_id, feature)
def test_get_feature_flag_accounts(self):
"""Integration test for the FeatureFlagsAPI.get_feature_flag_accounts method."""
account_id = None # Change me!!
feature = None # Change me!!
r = self.client.get_feature_flag_accounts(account_id, feature)
def test_get_feature_flag_users(self):
"""Integration test for the FeatureFlagsAPI.get_feature_flag_users method."""
user_id = None # Change me!!
feature = None # Change me!!
r = self.client.get_feature_flag_users(feature, user_id)
def test_set_feature_flag_courses(self):
"""Integration test for the FeatureFlagsAPI.set_feature_flag_courses method."""
# This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.
pass
def test_set_feature_flag_accounts(self):
"""Integration test for the FeatureFlagsAPI.set_feature_flag_accounts method."""
# This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.
pass
def test_set_feature_flag_users(self):
"""Integration test for the FeatureFlagsAPI.set_feature_flag_users method."""
# This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.
pass
def test_remove_feature_flag_courses(self):
"""Integration test for the FeatureFlagsAPI.remove_feature_flag_courses method."""
course_id = None # Change me!!
feature = None # Change me!!
r = self.client.remove_feature_flag_courses(course_id, feature)
def test_remove_feature_flag_accounts(self):
"""Integration test for the FeatureFlagsAPI.remove_feature_flag_accounts method."""
account_id = None # Change me!!
feature = None # Change me!!
r = self.client.remove_feature_flag_accounts(account_id, feature)
def test_remove_feature_flag_users(self):
"""Integration test for the FeatureFlagsAPI.remove_feature_flag_users method."""
user_id = None # Change me!!
feature = None # Change me!!
r = self.client.remove_feature_flag_users(feature, user_id)
| 40.103448
| 125
| 0.719046
| 586
| 4,652
| 5.421502
| 0.119454
| 0.083097
| 0.067989
| 0.110796
| 0.869374
| 0.86119
| 0.810198
| 0.742839
| 0.705068
| 0.401952
| 0
| 0.001348
| 0.202709
| 4,652
| 115
| 126
| 40.452174
| 0.855217
| 0.408212
| 0
| 0.355932
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.288136
| false
| 0.050847
| 0.101695
| 0
| 0.40678
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
57d2c8edf9d53e801dde17ae4136457d4b928555
| 10,969
|
py
|
Python
|
MeshTest/UnitTest/System/Entity/test_concrete.py
|
ys-warble/Mesh
|
115e7391d19ea09db3c627d8b8ed90b3e3bef9b5
|
[
"MIT"
] | null | null | null |
MeshTest/UnitTest/System/Entity/test_concrete.py
|
ys-warble/Mesh
|
115e7391d19ea09db3c627d8b8ed90b3e3bef9b5
|
[
"MIT"
] | 2
|
2019-02-25T00:10:15.000Z
|
2019-03-22T20:13:32.000Z
|
MeshTest/UnitTest/System/Entity/test_concrete.py
|
ys-warble/Mesh
|
115e7391d19ea09db3c627d8b8ed90b3e3bef9b5
|
[
"MIT"
] | null | null | null |
import numpy as np
from Mesh.System.Entity.Concrete import transform_shape
from MeshTest.AppTestCase import AppTestCase
class TestConcrete(AppTestCase):
def setUp(self):
super().setUp()
self.arr = np.arange(8).reshape((2, 2, 2))
def test_transform_shape_valid(self):
expected = self.arr
actual = transform_shape(self.arr, (1, 0, 0), (1, 0, 0))
self.assertTrue(np.array_equal(expected, actual))
expected = np.array([
[[2, 3],
[6, 7]],
[[0, 1],
[4, 5]]])
actual = transform_shape(self.arr, (1, 0, 0), (0, 1, 0))
self.assertTrue(np.array_equal(expected, actual))
expected = np.array([
[[1, 5],
[3, 7]],
[[0, 4],
[2, 6]]])
actual = transform_shape(self.arr, (1, 0, 0), (0, 0, 1))
self.assertTrue(np.array_equal(expected, actual))
expected = np.array([
[[6, 7],
[4, 5]],
[[2, 3],
[0, 1]]])
actual = transform_shape(self.arr, (1, 0, 0), (-1, 0, 0))
self.assertTrue(np.array_equal(expected, actual))
expected = np.array([
[[4, 5],
[0, 1]],
[[6, 7],
[2, 3]]])
actual = transform_shape(self.arr, (1, 0, 0), (0, -1, 0))
self.assertTrue(np.array_equal(expected, actual))
expected = np.array([
[[4, 0],
[6, 2]],
[[5, 1],
[7, 3]]])
actual = transform_shape(self.arr, (1, 0, 0), (0, 0, -1))
self.assertTrue(np.array_equal(expected, actual))
#########################################################
expected = np.array([
[[4, 5],
[0, 1]],
[[6, 7],
[2, 3]]])
actual = transform_shape(self.arr, (0, 1, 0), (1, 0, 0))
self.assertTrue(np.array_equal(expected, actual))
expected = self.arr
actual = transform_shape(self.arr, (0, 1, 0), (0, 1, 0))
self.assertTrue(np.array_equal(expected, actual))
expected = np.array([
[[1, 3],
[0, 2]],
[[5, 7],
[4, 6]]])
actual = transform_shape(self.arr, (0, 1, 0), (0, 0, 1))
self.assertTrue(np.array_equal(expected, actual))
expected = np.array([
[[2, 3],
[6, 7]],
[[0, 1],
[4, 5]]])
actual = transform_shape(self.arr, (0, 1, 0), (-1, 0, 0))
self.assertTrue(np.array_equal(expected, actual))
expected = np.array([
[[6, 7],
[4, 5]],
[[2, 3],
[0, 1]]])
actual = transform_shape(self.arr, (0, 1, 0), (0, -1, 0))
self.assertTrue(np.array_equal(expected, actual))
expected = np.array([
[[2, 0],
[3, 1]],
[[6, 4],
[7, 5]]])
actual = transform_shape(self.arr, (0, 1, 0), (0, 0, -1))
self.assertTrue(np.array_equal(expected, actual))
##########################################################
expected = np.array([
[[4, 0],
[6, 2]],
[[5, 1],
[7, 3]]])
actual = transform_shape(self.arr, (0, 0, 1), (1, 0, 0))
self.assertTrue(np.array_equal(expected, actual))
expected = np.array([
[[2, 0],
[3, 1]],
[[6, 4],
[7, 5]]])
actual = transform_shape(self.arr, (0, 0, 1), (0, 1, 0))
self.assertTrue(np.array_equal(expected, actual))
expected = self.arr
actual = transform_shape(self.arr, (0, 0, 1), (0, 0, 1))
self.assertTrue(np.array_equal(expected, actual))
expected = np.array([
[[1, 5],
[3, 7]],
[[0, 4],
[2, 6]]])
actual = transform_shape(self.arr, (0, 0, 1), (-1, 0, 0))
self.assertTrue(np.array_equal(expected, actual))
expected = np.array([
[[1, 3],
[0, 2]],
[[5, 7],
[4, 6]]])
actual = transform_shape(self.arr, (0, 0, 1), (0, -1, 0))
self.assertTrue(np.array_equal(expected, actual))
expected = np.array([
[[5, 4],
[7, 6]],
[[1, 0],
[3, 2]]])
actual = transform_shape(self.arr, (0, 0, 1), (0, 0, -1))
self.assertTrue(np.array_equal(expected, actual))
expected = np.array([
[[6, 7],
[4, 5]],
[[2, 3],
[0, 1]]])
actual = transform_shape(self.arr, (-1, 0, 0), (1, 0, 0))
self.assertTrue(np.array_equal(expected, actual))
expected = np.array([
[[4, 5],
[0, 1]],
[[6, 7],
[2, 3]]])
actual = transform_shape(self.arr, (-1, 0, 0), (0, 1, 0))
self.assertTrue(np.array_equal(expected, actual))
expected = np.array([
[[4, 0],
[6, 2]],
[[5, 1],
[7, 3]]])
actual = transform_shape(self.arr, (-1, 0, 0), (0, 0, 1))
self.assertTrue(np.array_equal(expected, actual))
expected = self.arr
actual = transform_shape(self.arr, (-1, 0, 0), (-1, 0, 0))
self.assertTrue(np.array_equal(expected, actual))
expected = np.array([
[[2, 3],
[6, 7]],
[[0, 1],
[4, 5]]])
actual = transform_shape(self.arr, (-1, 0, 0), (0, -1, 0))
self.assertTrue(np.array_equal(expected, actual))
expected = np.array([
[[1, 5],
[3, 7]],
[[0, 4],
[2, 6]]])
actual = transform_shape(self.arr, (-1, 0, 0), (0, 0, -1))
self.assertTrue(np.array_equal(expected, actual))
#########################################################
expected = np.array([
[[2, 3],
[6, 7]],
[[0, 1],
[4, 5]]])
actual = transform_shape(self.arr, (0, -1, 0), (1, 0, 0))
self.assertTrue(np.array_equal(expected, actual))
expected = np.array([
[[6, 7],
[4, 5]],
[[2, 3],
[0, 1]]])
actual = transform_shape(self.arr, (0, -1, 0), (0, 1, 0))
self.assertTrue(np.array_equal(expected, actual))
expected = np.array([
[[2, 0],
[3, 1]],
[[6, 4],
[7, 5]]])
actual = transform_shape(self.arr, (0, -1, 0), (0, 0, 1))
self.assertTrue(np.array_equal(expected, actual))
expected = np.array([
[[4, 5],
[0, 1]],
[[6, 7],
[2, 3]]])
actual = transform_shape(self.arr, (0, -1, 0), (-1, 0, 0))
self.assertTrue(np.array_equal(expected, actual))
expected = self.arr
actual = transform_shape(self.arr, (0, -1, 0), (0, -1, 0))
self.assertTrue(np.array_equal(expected, actual))
expected = np.array([
[[1, 3],
[0, 2]],
[[5, 7],
[4, 6]]])
actual = transform_shape(self.arr, (0, -1, 0), (0, 0, -1))
self.assertTrue(np.array_equal(expected, actual))
##########################################################
expected = np.array([
[[1, 5],
[3, 7]],
[[0, 4],
[2, 6]]])
actual = transform_shape(self.arr, (0, 0, -1), (1, 0, 0))
self.assertTrue(np.array_equal(expected, actual))
expected = np.array([
[[1, 3],
[0, 2]],
[[5, 7],
[4, 6]]])
actual = transform_shape(self.arr, (0, 0, -1), (0, 1, 0))
self.assertTrue(np.array_equal(expected, actual))
expected = np.array([
[[5, 4],
[7, 6]],
[[1, 0],
[3, 2]]])
actual = transform_shape(self.arr, (0, 0, -1), (0, 0, 1))
self.assertTrue(np.array_equal(expected, actual))
expected = np.array([
[[4, 0],
[6, 2]],
[[5, 1],
[7, 3]]])
actual = transform_shape(self.arr, (0, 0, -1), (-1, 0, 0))
self.assertTrue(np.array_equal(expected, actual))
expected = np.array([
[[2, 0],
[3, 1]],
[[6, 4],
[7, 5]]])
actual = transform_shape(self.arr, (0, 0, -1), (0, -1, 0))
self.assertTrue(np.array_equal(expected, actual))
expected = self.arr
actual = transform_shape(self.arr, (0, 0, -1), (0, 0, -1))
self.assertTrue(np.array_equal(expected, actual))
def test_transform_shape_invalid_type(self):
self.assertRaises(TypeError, lambda: transform_shape(None, None, None))
self.assertRaises(TypeError, lambda: transform_shape(1, 1, 1))
self.assertRaises(TypeError, lambda: transform_shape('string', 'string', 'string'))
self.assertRaises(TypeError, lambda: transform_shape(self.arr, None, None))
self.assertRaises(TypeError, lambda: transform_shape(self.arr, (0, 0, 0), None))
self.assertRaises(TypeError, lambda: transform_shape(self.arr, None, (0, 0, 0)))
self.assertRaises(TypeError, lambda: transform_shape(None, (1, 0, 0), (0, 1, 0)))
self.assertRaises(TypeError, lambda: transform_shape(1, (1, 0, 0), (0, 1, 0)))
self.assertRaises(TypeError, lambda: transform_shape('string', (1, 0, 0), (0, 1, 0)))
def test_transform_shape_invalid_unimplemented(self):
self.assertRaises(NotImplementedError, lambda: transform_shape(self.arr, (0, 0, 0), (0, 0, 0)))
self.assertRaises(NotImplementedError, lambda: transform_shape(self.arr, (0, 2, 0), (1, 0, 0)))
self.assertRaises(NotImplementedError, lambda: transform_shape(self.arr, (0, 1, 0), (0, 0, 2)))
self.assertRaises(NotImplementedError, lambda: transform_shape(self.arr, (0, 2, 0), (1, 0, 0)))
self.assertRaises(NotImplementedError, lambda: transform_shape(self.arr, (0, 0.5, 0.5), (1, 0, 0)))
self.assertRaises(NotImplementedError, lambda: transform_shape(self.arr, (0, 1, 0), (0.3, 0.4, 0)))
def test_transform_shape_invalid_index(self):
self.assertRaises(IndexError, lambda: transform_shape(self.arr, (0, 0), (0, 0)))
self.assertRaises(IndexError, lambda: transform_shape(self.arr, (0, 1), (0, 1)))
self.assertRaises(IndexError, lambda: transform_shape(self.arr, (0, 1, 0), (0, 1)))
self.assertRaises(IndexError, lambda: transform_shape(self.arr, (0, 1), (0, 0, 1)))
self.assertRaises(IndexError, lambda: transform_shape(self.arr, (0, 1, 0), (0, 0, 0, 1)))
self.assertRaises(IndexError, lambda: transform_shape(self.arr, (0, 1, 0, 0), (0, 0, 1)))
self.assertRaises(IndexError, lambda: transform_shape(self.arr, (0, 1, 0, 0), (0, 0, 1, 0)))
| 35.157051
| 107
| 0.472058
| 1,341
| 10,969
| 3.779269
| 0.03654
| 0.039463
| 0.031965
| 0.21547
| 0.948106
| 0.941989
| 0.929558
| 0.898777
| 0.887135
| 0.841555
| 0
| 0.077163
| 0.328927
| 10,969
| 311
| 108
| 35.270096
| 0.61133
| 0
| 0
| 0.743295
| 0
| 0
| 0.002235
| 0
| 0
| 0
| 0
| 0
| 0.222222
| 1
| 0.019157
| false
| 0
| 0.011494
| 0
| 0.034483
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
57dc92a308accf76950d4c8af2a97681578eaaa0
| 15,308
|
py
|
Python
|
sdk/python/pulumi_aws/ecs/cluster_capacity_providers.py
|
chivandikwa/pulumi-aws
|
19c08bf9dcb90544450ffa4eec7bf6751058fde2
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/ecs/cluster_capacity_providers.py
|
chivandikwa/pulumi-aws
|
19c08bf9dcb90544450ffa4eec7bf6751058fde2
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/ecs/cluster_capacity_providers.py
|
chivandikwa/pulumi-aws
|
19c08bf9dcb90544450ffa4eec7bf6751058fde2
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['ClusterCapacityProvidersArgs', 'ClusterCapacityProviders']
@pulumi.input_type
class ClusterCapacityProvidersArgs:
def __init__(__self__, *,
cluster_name: pulumi.Input[str],
capacity_providers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
default_capacity_provider_strategies: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterCapacityProvidersDefaultCapacityProviderStrategyArgs']]]] = None):
"""
The set of arguments for constructing a ClusterCapacityProviders resource.
:param pulumi.Input[str] cluster_name: Name of the ECS cluster to manage capacity providers for.
:param pulumi.Input[Sequence[pulumi.Input[str]]] capacity_providers: Set of names of one or more capacity providers to associate with the cluster. Valid values also include `FARGATE` and `FARGATE_SPOT`.
:param pulumi.Input[Sequence[pulumi.Input['ClusterCapacityProvidersDefaultCapacityProviderStrategyArgs']]] default_capacity_provider_strategies: Set of capacity provider strategies to use by default for the cluster. Detailed below.
"""
pulumi.set(__self__, "cluster_name", cluster_name)
if capacity_providers is not None:
pulumi.set(__self__, "capacity_providers", capacity_providers)
if default_capacity_provider_strategies is not None:
pulumi.set(__self__, "default_capacity_provider_strategies", default_capacity_provider_strategies)
@property
@pulumi.getter(name="clusterName")
def cluster_name(self) -> pulumi.Input[str]:
"""
Name of the ECS cluster to manage capacity providers for.
"""
return pulumi.get(self, "cluster_name")
@cluster_name.setter
def cluster_name(self, value: pulumi.Input[str]):
pulumi.set(self, "cluster_name", value)
@property
@pulumi.getter(name="capacityProviders")
def capacity_providers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Set of names of one or more capacity providers to associate with the cluster. Valid values also include `FARGATE` and `FARGATE_SPOT`.
"""
return pulumi.get(self, "capacity_providers")
@capacity_providers.setter
def capacity_providers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "capacity_providers", value)
@property
@pulumi.getter(name="defaultCapacityProviderStrategies")
def default_capacity_provider_strategies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ClusterCapacityProvidersDefaultCapacityProviderStrategyArgs']]]]:
"""
Set of capacity provider strategies to use by default for the cluster. Detailed below.
"""
return pulumi.get(self, "default_capacity_provider_strategies")
@default_capacity_provider_strategies.setter
def default_capacity_provider_strategies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterCapacityProvidersDefaultCapacityProviderStrategyArgs']]]]):
pulumi.set(self, "default_capacity_provider_strategies", value)
@pulumi.input_type
class _ClusterCapacityProvidersState:
def __init__(__self__, *,
capacity_providers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
cluster_name: Optional[pulumi.Input[str]] = None,
default_capacity_provider_strategies: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterCapacityProvidersDefaultCapacityProviderStrategyArgs']]]] = None):
"""
Input properties used for looking up and filtering ClusterCapacityProviders resources.
:param pulumi.Input[Sequence[pulumi.Input[str]]] capacity_providers: Set of names of one or more capacity providers to associate with the cluster. Valid values also include `FARGATE` and `FARGATE_SPOT`.
:param pulumi.Input[str] cluster_name: Name of the ECS cluster to manage capacity providers for.
:param pulumi.Input[Sequence[pulumi.Input['ClusterCapacityProvidersDefaultCapacityProviderStrategyArgs']]] default_capacity_provider_strategies: Set of capacity provider strategies to use by default for the cluster. Detailed below.
"""
if capacity_providers is not None:
pulumi.set(__self__, "capacity_providers", capacity_providers)
if cluster_name is not None:
pulumi.set(__self__, "cluster_name", cluster_name)
if default_capacity_provider_strategies is not None:
pulumi.set(__self__, "default_capacity_provider_strategies", default_capacity_provider_strategies)
@property
@pulumi.getter(name="capacityProviders")
def capacity_providers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Set of names of one or more capacity providers to associate with the cluster. Valid values also include `FARGATE` and `FARGATE_SPOT`.
"""
return pulumi.get(self, "capacity_providers")
@capacity_providers.setter
def capacity_providers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "capacity_providers", value)
@property
@pulumi.getter(name="clusterName")
def cluster_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the ECS cluster to manage capacity providers for.
"""
return pulumi.get(self, "cluster_name")
@cluster_name.setter
def cluster_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cluster_name", value)
@property
@pulumi.getter(name="defaultCapacityProviderStrategies")
def default_capacity_provider_strategies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ClusterCapacityProvidersDefaultCapacityProviderStrategyArgs']]]]:
"""
Set of capacity provider strategies to use by default for the cluster. Detailed below.
"""
return pulumi.get(self, "default_capacity_provider_strategies")
@default_capacity_provider_strategies.setter
def default_capacity_provider_strategies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterCapacityProvidersDefaultCapacityProviderStrategyArgs']]]]):
pulumi.set(self, "default_capacity_provider_strategies", value)
class ClusterCapacityProviders(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
capacity_providers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
cluster_name: Optional[pulumi.Input[str]] = None,
default_capacity_provider_strategies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ClusterCapacityProvidersDefaultCapacityProviderStrategyArgs']]]]] = None,
__props__=None):
"""
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example_cluster = aws.ecs.Cluster("exampleCluster")
example_cluster_capacity_providers = aws.ecs.ClusterCapacityProviders("exampleClusterCapacityProviders",
cluster_name=example_cluster.name,
capacity_providers=["FARGATE"],
default_capacity_provider_strategies=[aws.ecs.ClusterCapacityProvidersDefaultCapacityProviderStrategyArgs(
base=1,
weight=100,
capacity_provider="FARGATE",
)])
```
## Import
ECS cluster capacity providers can be imported using the `cluster_name` attribute. For example
```sh
$ pulumi import aws:ecs/clusterCapacityProviders:ClusterCapacityProviders example my-cluster
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] capacity_providers: Set of names of one or more capacity providers to associate with the cluster. Valid values also include `FARGATE` and `FARGATE_SPOT`.
:param pulumi.Input[str] cluster_name: Name of the ECS cluster to manage capacity providers for.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ClusterCapacityProvidersDefaultCapacityProviderStrategyArgs']]]] default_capacity_provider_strategies: Set of capacity provider strategies to use by default for the cluster. Detailed below.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ClusterCapacityProvidersArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example_cluster = aws.ecs.Cluster("exampleCluster")
example_cluster_capacity_providers = aws.ecs.ClusterCapacityProviders("exampleClusterCapacityProviders",
cluster_name=example_cluster.name,
capacity_providers=["FARGATE"],
default_capacity_provider_strategies=[aws.ecs.ClusterCapacityProvidersDefaultCapacityProviderStrategyArgs(
base=1,
weight=100,
capacity_provider="FARGATE",
)])
```
## Import
ECS cluster capacity providers can be imported using the `cluster_name` attribute. For example
```sh
$ pulumi import aws:ecs/clusterCapacityProviders:ClusterCapacityProviders example my-cluster
```
:param str resource_name: The name of the resource.
:param ClusterCapacityProvidersArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ClusterCapacityProvidersArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
capacity_providers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
cluster_name: Optional[pulumi.Input[str]] = None,
default_capacity_provider_strategies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ClusterCapacityProvidersDefaultCapacityProviderStrategyArgs']]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ClusterCapacityProvidersArgs.__new__(ClusterCapacityProvidersArgs)
__props__.__dict__["capacity_providers"] = capacity_providers
if cluster_name is None and not opts.urn:
raise TypeError("Missing required property 'cluster_name'")
__props__.__dict__["cluster_name"] = cluster_name
__props__.__dict__["default_capacity_provider_strategies"] = default_capacity_provider_strategies
super(ClusterCapacityProviders, __self__).__init__(
'aws:ecs/clusterCapacityProviders:ClusterCapacityProviders',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
capacity_providers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
cluster_name: Optional[pulumi.Input[str]] = None,
default_capacity_provider_strategies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ClusterCapacityProvidersDefaultCapacityProviderStrategyArgs']]]]] = None) -> 'ClusterCapacityProviders':
"""
Get an existing ClusterCapacityProviders resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] capacity_providers: Set of names of one or more capacity providers to associate with the cluster. Valid values also include `FARGATE` and `FARGATE_SPOT`.
:param pulumi.Input[str] cluster_name: Name of the ECS cluster to manage capacity providers for.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ClusterCapacityProvidersDefaultCapacityProviderStrategyArgs']]]] default_capacity_provider_strategies: Set of capacity provider strategies to use by default for the cluster. Detailed below.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ClusterCapacityProvidersState.__new__(_ClusterCapacityProvidersState)
__props__.__dict__["capacity_providers"] = capacity_providers
__props__.__dict__["cluster_name"] = cluster_name
__props__.__dict__["default_capacity_provider_strategies"] = default_capacity_provider_strategies
return ClusterCapacityProviders(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="capacityProviders")
def capacity_providers(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
Set of names of one or more capacity providers to associate with the cluster. Valid values also include `FARGATE` and `FARGATE_SPOT`.
"""
return pulumi.get(self, "capacity_providers")
@property
@pulumi.getter(name="clusterName")
def cluster_name(self) -> pulumi.Output[str]:
"""
Name of the ECS cluster to manage capacity providers for.
"""
return pulumi.get(self, "cluster_name")
@property
@pulumi.getter(name="defaultCapacityProviderStrategies")
def default_capacity_provider_strategies(self) -> pulumi.Output[Optional[Sequence['outputs.ClusterCapacityProvidersDefaultCapacityProviderStrategy']]]:
"""
Set of capacity provider strategies to use by default for the cluster. Detailed below.
"""
return pulumi.get(self, "default_capacity_provider_strategies")
| 52.604811
| 257
| 0.707539
| 1,592
| 15,308
| 6.547739
| 0.104271
| 0.072813
| 0.09977
| 0.10447
| 0.808039
| 0.786454
| 0.780794
| 0.773216
| 0.75566
| 0.746642
| 0
| 0.000741
| 0.206493
| 15,308
| 290
| 258
| 52.786207
| 0.857413
| 0.3555
| 0
| 0.639456
| 1
| 0
| 0.187252
| 0.126596
| 0
| 0
| 0
| 0
| 0
| 1
| 0.14966
| false
| 0.006803
| 0.047619
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
17b1fb950239cf84895b01209096ae3457d28065
| 14,607
|
py
|
Python
|
example/controller/tests/view/module/pagination/__init__.py
|
donghak-shin/dp-tornado
|
095bb293661af35cce5f917d8a2228d273489496
|
[
"MIT"
] | 18
|
2015-04-07T14:28:39.000Z
|
2020-02-08T14:03:38.000Z
|
example/controller/tests/view/module/pagination/__init__.py
|
donghak-shin/dp-tornado
|
095bb293661af35cce5f917d8a2228d273489496
|
[
"MIT"
] | 7
|
2016-10-05T05:14:06.000Z
|
2021-05-20T02:07:22.000Z
|
example/controller/tests/view/module/pagination/__init__.py
|
donghak-shin/dp-tornado
|
095bb293661af35cce5f917d8a2228d273489496
|
[
"MIT"
] | 11
|
2015-12-15T09:49:39.000Z
|
2021-09-06T18:38:21.000Z
|
# -*- coding: utf-8 -*-
from dp_tornado.engine.controller import Controller
from bs4 import BeautifulSoup
class PaginationController(Controller):
def get(self):
self.test_simple_1()
self.test_simple_2()
self.test_all()
self.test_first_page()
self.test_last_page()
self.test_first_block()
self.test_last_block()
self.test_render()
def test_render(self):
params = {
'total_count': 100,
'page': 3,
'rpp': 10,
'kwargs': {
}
}
self.render('tests/view/module/pagination.html', params)
def test_simple_1(self):
params = {
'total_count': 100,
'page': 3,
'rpp': 10,
'kwargs': {
}
}
pagination = self.render_string('tests/view/module/pagination.html', params)
pagination = BeautifulSoup(pagination, 'lxml')
assert(len(pagination.findAll('div')) == 1)
assert(len(pagination.find('div').findAll('strong')) == 1)
assert(len(pagination.find('div').findAll('a')) == 9)
def test_simple_2(self):
params = {
'total_count': 1000,
'page': 25,
'rpp': 10,
'kwargs': {
}
}
pagination = self.render_string('tests/view/module/pagination.html', params)
pagination = BeautifulSoup(pagination, 'lxml')
assert(len(pagination.findAll('div')) == 1)
assert(len(pagination.find('div').findAll('strong')) == 1)
assert(len(pagination.find('div').findAll('a')) == 13)
def test_all(self):
region_tag = 'div'
region_class = 'region-class'
first = 'First'
first_class = 'first-class'
last = 'Last'
last_class = 'last-class'
prev_block = 'Prev-Block'
prev_block_class = 'prev-block-class'
next_block = 'Next-Block'
next_block_class = 'next-block-class'
prev = 'Prev'
prev_class = 'prev-class'
next = 'Next'
next_class = 'next-class'
current_tag = 'strong'
current_class = 'current-class'
link_tag = 'a'
link_class = 'link-class'
params = {
'total_count': 10000,
'page': 33,
'rpp': 10,
'kwargs': {
'region_tag': region_tag,
'region_class': region_class,
'first': first,
'first_class': first_class,
'last': last,
'last_class': last_class,
'prev_block': prev_block,
'prev_block_class': prev_block_class,
'next_block': next_block,
'next_block_class': next_block_class,
'prev': prev,
'prev_class': prev_class,
'next': next,
'next_class': next_class,
'current_tag': current_tag,
'current_class': current_class,
'link_tag': link_tag,
'link_class': link_class,
'space': '_'
}
}
pagination = self.render_string('tests/view/module/pagination.html', params)
pagination = BeautifulSoup(pagination, 'lxml')
assert(len(pagination.findAll(region_tag)) == 1)
assert(pagination.find(region_tag).attrs['class'][0] == region_class)
assert(len(pagination.find(region_tag).findAll(link_tag)) == 15)
links = pagination.find(region_tag).findAll(link_tag)
assert(links[0].attrs['class'][0] == first_class)
assert(links[0].text == first)
assert(links[1].attrs['class'][0] == prev_block_class)
assert(links[1].text == prev_block)
assert(links[2].attrs['class'][0] == prev_class)
assert(links[2].text == prev)
assert(links[-3].attrs['class'][0] == next_class)
assert(links[-3].text == next)
assert(links[-2].attrs['class'][0] == next_block_class)
assert(links[-2].text == next_block)
assert(links[-1].attrs['class'][0] == last_class)
assert(links[-1].text == last)
links = links[3:-3]
for e in links:
assert(e.name == link_tag)
assert(e.attrs['class'][0] == link_class)
assert(self.helper.numeric.extract_numbers(e.text) == e.text)
def test_first_page(self):
region_tag = 'div'
region_class = 'region-class'
first = 'First'
first_class = 'first-class'
last = 'Last'
last_class = 'last-class'
prev_block = 'Prev-Block'
prev_block_class = 'prev-block-class'
next_block = 'Next-Block'
next_block_class = 'next-block-class'
prev = 'Prev'
prev_class = 'prev-class'
next = 'Next'
next_class = 'next-class'
current_tag = 'strong'
current_class = 'current-class'
link_tag = 'a'
link_class = 'link-class'
params = {
'total_count': 10000,
'page': 1,
'rpp': 10,
'kwargs': {
'region_tag': region_tag,
'region_class': region_class,
'first': first,
'first_class': first_class,
'last': last,
'last_class': last_class,
'prev_block': prev_block,
'prev_block_class': prev_block_class,
'next_block': next_block,
'next_block_class': next_block_class,
'prev': prev,
'prev_class': prev_class,
'next': next,
'next_class': next_class,
'current_tag': current_tag,
'current_class': current_class,
'link_tag': link_tag,
'link_class': link_class,
'space': '_'
}
}
pagination = self.render_string('tests/view/module/pagination.html', params)
pagination = BeautifulSoup(pagination, 'lxml')
assert(len(pagination.findAll(region_tag)) == 1)
assert(pagination.find(region_tag).attrs['class'][0] == region_class)
assert(len(pagination.find(region_tag).findAll(link_tag)) == (15 - 3))
links = pagination.find(region_tag).findAll(link_tag)
assert(links[-3].attrs['class'][0] == next_class)
assert(links[-3].text == next)
assert(links[-2].attrs['class'][0] == next_block_class)
assert(links[-2].text == next_block)
assert(links[-1].attrs['class'][0] == last_class)
assert(links[-1].text == last)
links = links[0:-3]
for e in links:
assert(e.name == link_tag)
assert(e.attrs['class'][0] == link_class)
assert(self.helper.numeric.extract_numbers(e.text) == e.text)
def test_last_page(self):
region_tag = 'div'
region_class = 'region-class'
first = 'First'
first_class = 'first-class'
last = 'Last'
last_class = 'last-class'
prev_block = 'Prev-Block'
prev_block_class = 'prev-block-class'
next_block = 'Next-Block'
next_block_class = 'next-block-class'
prev = 'Prev'
prev_class = 'prev-class'
next = 'Next'
next_class = 'next-class'
current_tag = 'strong'
current_class = 'current-class'
link_tag = 'a'
link_class = 'link-class'
params = {
'total_count': 10000,
'page': 1000,
'rpp': 10,
'kwargs': {
'region_tag': region_tag,
'region_class': region_class,
'first': first,
'first_class': first_class,
'last': last,
'last_class': last_class,
'prev_block': prev_block,
'prev_block_class': prev_block_class,
'next_block': next_block,
'next_block_class': next_block_class,
'prev': prev,
'prev_class': prev_class,
'next': next,
'next_class': next_class,
'current_tag': current_tag,
'current_class': current_class,
'link_tag': link_tag,
'link_class': link_class,
'space': '_'
}
}
pagination = self.render_string('tests/view/module/pagination.html', params)
pagination = BeautifulSoup(pagination, 'lxml')
assert(len(pagination.findAll(region_tag)) == 1)
assert(pagination.find(region_tag).attrs['class'][0] == region_class)
assert(len(pagination.find(region_tag).findAll(link_tag)) == (15 - 3))
links = pagination.find(region_tag).findAll(link_tag)
assert(links[0].attrs['class'][0] == first_class)
assert(links[0].text == first)
assert(links[1].attrs['class'][0] == prev_block_class)
assert(links[1].text == prev_block)
assert(links[2].attrs['class'][0] == prev_class)
assert(links[2].text == prev)
links = links[3:]
for e in links:
assert(e.name == link_tag)
assert(e.attrs['class'][0] == link_class)
assert(self.helper.numeric.extract_numbers(e.text) == e.text)
def test_first_block(self):
region_tag = 'div'
region_class = 'region-class'
first = 'First'
first_class = 'first-class'
last = 'Last'
last_class = 'last-class'
prev_block = 'Prev-Block'
prev_block_class = 'prev-block-class'
next_block = 'Next-Block'
next_block_class = 'next-block-class'
prev = 'Prev'
prev_class = 'prev-class'
next = 'Next'
next_class = 'next-class'
current_tag = 'strong'
current_class = 'current-class'
link_tag = 'a'
link_class = 'link-class'
params = {
'total_count': 10000,
'page': 2,
'rpp': 10,
'kwargs': {
'region_tag': region_tag,
'region_class': region_class,
'first': first,
'first_class': first_class,
'last': last,
'last_class': last_class,
'prev_block': prev_block,
'prev_block_class': prev_block_class,
'next_block': next_block,
'next_block_class': next_block_class,
'prev': prev,
'prev_class': prev_class,
'next': next,
'next_class': next_class,
'current_tag': current_tag,
'current_class': current_class,
'link_tag': link_tag,
'link_class': link_class,
'space': '_'
}
}
pagination = self.render_string('tests/view/module/pagination.html', params)
pagination = BeautifulSoup(pagination, 'lxml')
assert(len(pagination.findAll(region_tag)) == 1)
assert(pagination.find(region_tag).attrs['class'][0] == region_class)
assert(len(pagination.find(region_tag).findAll(link_tag)) == (15 - 2))
links = pagination.find(region_tag).findAll(link_tag)
assert(links[0].attrs['class'][0] == prev_class)
assert(links[0].text == prev)
assert(links[-3].attrs['class'][0] == next_class)
assert(links[-3].text == next)
assert(links[-2].attrs['class'][0] == next_block_class)
assert(links[-2].text == next_block)
assert(links[-1].attrs['class'][0] == last_class)
assert(links[-1].text == last)
links = links[1:-3]
for e in links:
assert(e.name == link_tag)
assert(e.attrs['class'][0] == link_class)
assert(self.helper.numeric.extract_numbers(e.text) == e.text)
def test_last_block(self):
region_tag = 'div'
region_class = 'region-class'
first = 'First'
first_class = 'first-class'
last = 'Last'
last_class = 'last-class'
prev_block = 'Prev-Block'
prev_block_class = 'prev-block-class'
next_block = 'Next-Block'
next_block_class = 'next-block-class'
prev = 'Prev'
prev_class = 'prev-class'
next = 'Next'
next_class = 'next-class'
current_tag = 'strong'
current_class = 'current-class'
link_tag = 'a'
link_class = 'link-class'
params = {
'total_count': 10000,
'page': 999,
'rpp': 10,
'kwargs': {
'region_tag': region_tag,
'region_class': region_class,
'first': first,
'first_class': first_class,
'last': last,
'last_class': last_class,
'prev_block': prev_block,
'prev_block_class': prev_block_class,
'next_block': next_block,
'next_block_class': next_block_class,
'prev': prev,
'prev_class': prev_class,
'next': next,
'next_class': next_class,
'current_tag': current_tag,
'current_class': current_class,
'link_tag': link_tag,
'link_class': link_class,
'space': '_'
}
}
pagination = self.render_string('tests/view/module/pagination.html', params)
pagination = BeautifulSoup(pagination, 'lxml')
assert(len(pagination.findAll(region_tag)) == 1)
assert(pagination.find(region_tag).attrs['class'][0] == region_class)
assert(len(pagination.find(region_tag).findAll(link_tag)) == (15 - 2))
links = pagination.find(region_tag).findAll(link_tag)
assert(links[0].attrs['class'][0] == first_class)
assert(links[0].text == first)
assert(links[1].attrs['class'][0] == prev_block_class)
assert(links[1].text == prev_block)
assert(links[2].attrs['class'][0] == prev_class)
assert(links[2].text == prev)
assert(links[-1].attrs['class'][0] == next_class)
assert(links[-1].text == next)
links = links[3:-1]
for e in links:
assert(e.name == link_tag)
assert(e.attrs['class'][0] == link_class)
assert(self.helper.numeric.extract_numbers(e.text) == e.text)
| 27.875954
| 84
| 0.525844
| 1,580
| 14,607
| 4.626582
| 0.047468
| 0.056635
| 0.045144
| 0.049248
| 0.947606
| 0.946785
| 0.94186
| 0.934884
| 0.934884
| 0.934884
| 0
| 0.018348
| 0.339563
| 14,607
| 523
| 85
| 27.929254
| 0.739401
| 0.001438
| 0
| 0.847411
| 0
| 0
| 0.160518
| 0.018102
| 0
| 0
| 0
| 0
| 0.207084
| 1
| 0.024523
| false
| 0
| 0.00545
| 0
| 0.032698
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
17b3257d427965ab03a2a19c13479c51f5dbf384
| 5,912
|
py
|
Python
|
google/dataflow/v1beta3/dataflow-v1beta3-py/google/cloud/dataflow_v1beta3/types/__init__.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 7
|
2021-02-21T10:39:41.000Z
|
2021-12-07T07:31:28.000Z
|
google/dataflow/v1beta3/dataflow-v1beta3-py/google/cloud/dataflow_v1beta3/types/__init__.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 6
|
2021-02-02T23:46:11.000Z
|
2021-11-15T01:46:02.000Z
|
google/dataflow/v1beta3/dataflow-v1beta3-py/google/cloud/dataflow_v1beta3/types/__init__.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 4
|
2021-01-28T23:25:45.000Z
|
2021-08-30T01:55:16.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .environment import (
AutoscalingSettings,
DebugOptions,
Disk,
Environment,
Package,
SdkHarnessContainerImage,
TaskRunnerSettings,
WorkerPool,
WorkerSettings,
AutoscalingAlgorithm,
DefaultPackageSet,
FlexResourceSchedulingGoal,
JobType,
ShuffleMode,
TeardownPolicy,
WorkerIPAddressConfiguration,
)
from .jobs import (
BigQueryIODetails,
BigTableIODetails,
CheckActiveJobsRequest,
CheckActiveJobsResponse,
CreateJobRequest,
DatastoreIODetails,
DisplayData,
ExecutionStageState,
ExecutionStageSummary,
FailedLocation,
FileIODetails,
GetJobRequest,
Job,
JobExecutionInfo,
JobExecutionStageInfo,
JobMetadata,
ListJobsRequest,
ListJobsResponse,
PipelineDescription,
PubSubIODetails,
SdkVersion,
SnapshotJobRequest,
SpannerIODetails,
Step,
TransformSummary,
UpdateJobRequest,
JobState,
JobView,
KindType,
)
from .messages import (
AutoscalingEvent,
JobMessage,
ListJobMessagesRequest,
ListJobMessagesResponse,
StructuredMessage,
JobMessageImportance,
)
from .metrics import (
GetJobExecutionDetailsRequest,
GetJobMetricsRequest,
GetStageExecutionDetailsRequest,
JobExecutionDetails,
JobMetrics,
MetricStructuredName,
MetricUpdate,
ProgressTimeseries,
StageExecutionDetails,
StageSummary,
WorkerDetails,
WorkItemDetails,
ExecutionState,
)
from .snapshots import (
DeleteSnapshotRequest,
DeleteSnapshotResponse,
GetSnapshotRequest,
ListSnapshotsRequest,
ListSnapshotsResponse,
PubsubSnapshotMetadata,
Snapshot,
SnapshotState,
)
from .streaming import (
ComputationTopology,
CustomSourceLocation,
DataDiskAssignment,
KeyRangeDataDiskAssignment,
KeyRangeLocation,
MountedDataDisk,
PubsubLocation,
StateFamilyConfig,
StreamingApplianceSnapshotConfig,
StreamingComputationRanges,
StreamingSideInputLocation,
StreamingStageLocation,
StreamLocation,
TopologyConfig,
)
from .templates import (
ContainerSpec,
CreateJobFromTemplateRequest,
DynamicTemplateLaunchParams,
FlexTemplateRuntimeEnvironment,
GetTemplateRequest,
GetTemplateResponse,
InvalidTemplateParameters,
LaunchFlexTemplateParameter,
LaunchFlexTemplateRequest,
LaunchFlexTemplateResponse,
LaunchTemplateParameters,
LaunchTemplateRequest,
LaunchTemplateResponse,
ParameterMetadata,
RuntimeEnvironment,
RuntimeMetadata,
SDKInfo,
TemplateMetadata,
ParameterType,
)
__all__ = (
'AutoscalingSettings',
'DebugOptions',
'Disk',
'Environment',
'Package',
'SdkHarnessContainerImage',
'TaskRunnerSettings',
'WorkerPool',
'WorkerSettings',
'AutoscalingAlgorithm',
'DefaultPackageSet',
'FlexResourceSchedulingGoal',
'JobType',
'ShuffleMode',
'TeardownPolicy',
'WorkerIPAddressConfiguration',
'BigQueryIODetails',
'BigTableIODetails',
'CheckActiveJobsRequest',
'CheckActiveJobsResponse',
'CreateJobRequest',
'DatastoreIODetails',
'DisplayData',
'ExecutionStageState',
'ExecutionStageSummary',
'FailedLocation',
'FileIODetails',
'GetJobRequest',
'Job',
'JobExecutionInfo',
'JobExecutionStageInfo',
'JobMetadata',
'ListJobsRequest',
'ListJobsResponse',
'PipelineDescription',
'PubSubIODetails',
'SdkVersion',
'SnapshotJobRequest',
'SpannerIODetails',
'Step',
'TransformSummary',
'UpdateJobRequest',
'JobState',
'JobView',
'KindType',
'AutoscalingEvent',
'JobMessage',
'ListJobMessagesRequest',
'ListJobMessagesResponse',
'StructuredMessage',
'JobMessageImportance',
'GetJobExecutionDetailsRequest',
'GetJobMetricsRequest',
'GetStageExecutionDetailsRequest',
'JobExecutionDetails',
'JobMetrics',
'MetricStructuredName',
'MetricUpdate',
'ProgressTimeseries',
'StageExecutionDetails',
'StageSummary',
'WorkerDetails',
'WorkItemDetails',
'ExecutionState',
'DeleteSnapshotRequest',
'DeleteSnapshotResponse',
'GetSnapshotRequest',
'ListSnapshotsRequest',
'ListSnapshotsResponse',
'PubsubSnapshotMetadata',
'Snapshot',
'SnapshotState',
'ComputationTopology',
'CustomSourceLocation',
'DataDiskAssignment',
'KeyRangeDataDiskAssignment',
'KeyRangeLocation',
'MountedDataDisk',
'PubsubLocation',
'StateFamilyConfig',
'StreamingApplianceSnapshotConfig',
'StreamingComputationRanges',
'StreamingSideInputLocation',
'StreamingStageLocation',
'StreamLocation',
'TopologyConfig',
'ContainerSpec',
'CreateJobFromTemplateRequest',
'DynamicTemplateLaunchParams',
'FlexTemplateRuntimeEnvironment',
'GetTemplateRequest',
'GetTemplateResponse',
'InvalidTemplateParameters',
'LaunchFlexTemplateParameter',
'LaunchFlexTemplateRequest',
'LaunchFlexTemplateResponse',
'LaunchTemplateParameters',
'LaunchTemplateRequest',
'LaunchTemplateResponse',
'ParameterMetadata',
'RuntimeEnvironment',
'RuntimeMetadata',
'SDKInfo',
'TemplateMetadata',
'ParameterType',
)
| 24.329218
| 74
| 0.714817
| 325
| 5,912
| 12.990769
| 0.553846
| 0.014211
| 0.006158
| 0.007579
| 0.863098
| 0.863098
| 0.811937
| 0.811937
| 0.74325
| 0.74325
| 0
| 0.001908
| 0.202131
| 5,912
| 242
| 75
| 24.429752
| 0.893152
| 0.096245
| 0
| 0
| 0
| 0
| 0.342031
| 0.14342
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.039823
| 0
| 0.039823
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
17e22c53d55ab769f6ed8874f77cea25cc3fb536
| 135
|
py
|
Python
|
code/chapter-1/exercise1_2.py
|
Kevin-Oudai/python-solutions
|
d67f6b14723b000fec0011c3e8156b805eb288f7
|
[
"MIT"
] | null | null | null |
code/chapter-1/exercise1_2.py
|
Kevin-Oudai/python-solutions
|
d67f6b14723b000fec0011c3e8156b805eb288f7
|
[
"MIT"
] | null | null | null |
code/chapter-1/exercise1_2.py
|
Kevin-Oudai/python-solutions
|
d67f6b14723b000fec0011c3e8156b805eb288f7
|
[
"MIT"
] | null | null | null |
print("Welcome to Python")
print("Welcome to Python")
print("Welcome to Python")
print("Welcome to Python")
print("Welcome to Python")
| 22.5
| 26
| 0.740741
| 20
| 135
| 5
| 0.2
| 0.6
| 0.7
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0.111111
| 135
| 5
| 27
| 27
| 0.833333
| 0
| 0
| 1
| 0
| 0
| 0.62963
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 15
|
17e43d8cd899f3aaac008d2930e7f57c31b0c140
| 683
|
py
|
Python
|
python/se3-legacy/tests/test_ie.py
|
saucelabs-training/platform-config-tests
|
11dfab8f9be2fe118ed0b0fa4adebb75a5f1a64c
|
[
"MIT"
] | 1
|
2021-11-17T22:29:42.000Z
|
2021-11-17T22:29:42.000Z
|
python/se3-legacy/tests/test_ie.py
|
saucelabs-training/platform-config-tests
|
11dfab8f9be2fe118ed0b0fa4adebb75a5f1a64c
|
[
"MIT"
] | null | null | null |
python/se3-legacy/tests/test_ie.py
|
saucelabs-training/platform-config-tests
|
11dfab8f9be2fe118ed0b0fa4adebb75a5f1a64c
|
[
"MIT"
] | 1
|
2021-11-17T22:29:35.000Z
|
2021-11-17T22:29:35.000Z
|
def test_win10(helpers):
caps = {}
caps['browserName'] = 'internet explorer'
caps['platform'] = 'Windows 10'
caps['version'] = '11'
driver = helpers.start_driver(caps)
helpers.validate_google(driver)
def test_late_win7(helpers):
caps = {}
caps['browserName'] = 'internet explorer'
caps['platform'] = 'Windows 7'
caps['version'] = '11'
driver = helpers.start_driver(caps)
helpers.validate_google(driver)
def test_early_win7(helpers):
caps = {}
caps['browserName'] = 'internet explorer'
caps['platform'] = 'Windows 7'
caps['version'] = '9'
driver = helpers.start_driver(caps)
helpers.validate_google(driver)
| 25.296296
| 45
| 0.650073
| 77
| 683
| 5.623377
| 0.285714
| 0.048499
| 0.103926
| 0.180139
| 0.944573
| 0.944573
| 0.944573
| 0.944573
| 0.944573
| 0.676674
| 0
| 0.02381
| 0.200586
| 683
| 26
| 46
| 26.269231
| 0.769231
| 0
| 0
| 0.761905
| 0
| 0
| 0.237189
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
aa397bf2e760183861998320af68ee05b732d3c8
| 2,774
|
py
|
Python
|
tests/integration/taskqueue/manager_test.py
|
gunjanswitchco/gcloud-rest
|
bf6af906880c25500d8e76b5ce3f807968185780
|
[
"MIT"
] | null | null | null |
tests/integration/taskqueue/manager_test.py
|
gunjanswitchco/gcloud-rest
|
bf6af906880c25500d8e76b5ce3f807968185780
|
[
"MIT"
] | null | null | null |
tests/integration/taskqueue/manager_test.py
|
gunjanswitchco/gcloud-rest
|
bf6af906880c25500d8e76b5ce3f807968185780
|
[
"MIT"
] | null | null | null |
import json
import time
import pytest
from gcloud.rest.taskqueue import encode
from gcloud.rest.taskqueue import TaskManager
@pytest.mark.xfail
def test_lifecycle(caplog, mocker, project, creds, pull_queue_name):
tasks = [
{'test_idx': 1},
{'test_idx': 2},
{'test_idx': 3},
{'test_idx': 4},
]
worker = mocker.Mock()
worker.return_value = ['ok' for _ in tasks]
tm = TaskManager(project, pull_queue_name, worker, batch_size=len(tasks),
lease_seconds=10, service_file=creds)
# drain old test tasks
tm.tq.drain()
# insert new ones
for task in tasks:
tm.tq.insert(encode(json.dumps(task)),
tag=encode('gcloud-rest-manager-test-lifecycle'))
tm.find_and_process_work()
assert worker.mock_calls == [mocker.call(tasks)]
for record in caplog.records:
assert record.levelname != 'ERROR'
@pytest.mark.slow
@pytest.mark.xfail
def test_multiple_leases(caplog, mocker, project, creds, pull_queue_name):
tasks = [
{'test_idx': 1},
{'test_idx': 2},
]
def succeed_after_multiple_leases(ts):
time.sleep(10)
return ['ok' for _ in ts]
worker = mocker.Mock()
worker.side_effect = succeed_after_multiple_leases
tm = TaskManager(project, pull_queue_name, worker, batch_size=len(tasks),
lease_seconds=4, service_file=creds)
# drain old test tasks
tm.tq.drain()
# insert new ones
for task in tasks:
tm.tq.insert(encode(json.dumps(task)),
tag=encode('gcloud-rest-manager-test-multilease'))
caplog.clear()
tm.find_and_process_work()
assert worker.mock_calls == [mocker.call(tasks)]
for record in caplog.records:
assert record.levelname != 'ERROR'
@pytest.mark.slow
@pytest.mark.xfail
def test_multiple_leases_churn(caplog, mocker, project, creds,
pull_queue_name):
tasks = [
{'test_idx': 1},
{'test_idx': 2},
]
def succeed_after_multiple_leases(ts):
_ = [x**2 for x in range(40000000)]
return ['ok' for _ in ts]
worker = mocker.Mock()
worker.side_effect = succeed_after_multiple_leases
tm = TaskManager(project, pull_queue_name, worker, batch_size=len(tasks),
lease_seconds=4, service_file=creds)
# drain old test tasks
tm.tq.drain()
# insert new ones
for task in tasks:
tm.tq.insert(encode(json.dumps(task)),
tag=encode('gcloud-rest-manager-test-multilease'))
caplog.clear()
tm.find_and_process_work()
assert worker.mock_calls == [mocker.call(tasks)]
for record in caplog.records:
assert record.levelname != 'ERROR'
| 26.932039
| 77
| 0.629056
| 357
| 2,774
| 4.703081
| 0.22409
| 0.033353
| 0.046456
| 0.061942
| 0.900536
| 0.852889
| 0.852889
| 0.852889
| 0.852889
| 0.852889
| 0
| 0.011138
| 0.255588
| 2,774
| 102
| 78
| 27.196078
| 0.801937
| 0.039654
| 0
| 0.736111
| 0
| 0
| 0.071133
| 0.039142
| 0
| 0
| 0
| 0
| 0.083333
| 1
| 0.069444
| false
| 0
| 0.069444
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a4cad147a4bfe6a35c10df7da0d14d3b47220d17
| 15,075
|
py
|
Python
|
python/test/test_BoolFunction.py
|
zeta1999/tweedledum
|
f070bf582347668f96943a459e51e1a39572b7f4
|
[
"MIT"
] | 1
|
2022-03-04T21:44:26.000Z
|
2022-03-04T21:44:26.000Z
|
python/test/test_BoolFunction.py
|
CQCL/tweedledum
|
f070bf582347668f96943a459e51e1a39572b7f4
|
[
"MIT"
] | null | null | null |
python/test/test_BoolFunction.py
|
CQCL/tweedledum
|
f070bf582347668f96943a459e51e1a39572b7f4
|
[
"MIT"
] | 1
|
2021-04-12T06:17:06.000Z
|
2021-04-12T06:17:06.000Z
|
#-------------------------------------------------------------------------------
# Part of Tweedledum Project. This file is distributed under the MIT License.
# See accompanying file /LICENSE for details.
#-------------------------------------------------------------------------------
import unittest
from tweedledum.BoolFunctionCompiler import BitVec, BoolFunction
from . import examples
class TestBoolFunction(unittest.TestCase):
def test_constant_3bit(self):
function = BoolFunction(examples.constant_3bit)
self.assertEqual(function.signature_, [])
result = examples.constant_3bit()
self.assertEqual(result, BitVec(3, '101'))
def test_id(self):
function = BoolFunction(examples.identity)
self.assertEqual(function.signature_, [[type(BitVec(1)), 1]])
result = examples.identity(BitVec(1, '0'))
self.assertEqual(result, BitVec(1, '0'))
result = examples.identity(BitVec(1, '1'))
self.assertEqual(result, BitVec(1, '1'))
def test_id_2bit(self):
function = BoolFunction(examples.identity_2bit)
self.assertEqual(function.signature_, [[type(BitVec(2)), 2]])
for a in range(4):
tmp = BitVec(2, a)
result = examples.identity(tmp)
self.assertEqual(result, tmp)
def test_bool_not(self):
function = BoolFunction(examples.bool_not)
self.assertEqual(function.signature_, [[type(BitVec(1)), 1]])
for a in range(2):
tmp = BitVec(1, a)
result = examples.bool_not(tmp)
self.assertEqual(result, not bool(tmp))
def test_bit_not(self):
function = BoolFunction(examples.bit_not)
self.assertEqual(function.signature_, [[type(BitVec(1)), 1]])
for a in range(2):
tmp = BitVec(1, a)
result = examples.bit_not(tmp)
self.assertEqual(result, ~tmp)
def test_bit_not_2bit(self):
function = BoolFunction(examples.bit_not_2bit)
self.assertEqual(function.signature_, [[type(BitVec(2)), 2]])
for a in range(4):
tmp = BitVec(2, a)
result = examples.bit_not_2bit(tmp)
self.assertEqual(result, ~tmp)
def test_bool_and(self):
function = BoolFunction(examples.bool_and)
self.assertEqual(function.signature_, [[type(BitVec(1)), 1],
[type(BitVec(1)), 1]])
for a in range(2):
for b in range(2):
result = examples.bool_and(BitVec(1, a), BitVec(1, b))
tmp = BitVec(1, a) and BitVec(1, b)
self.assertEqual(result, tmp)
def test_bit_and(self):
function = BoolFunction(examples.bit_and)
self.assertEqual(function.signature_, [[type(BitVec(1)), 1],
[type(BitVec(1)), 1]])
for a in range(2):
for b in range(2):
result = examples.bit_and(BitVec(1, a), BitVec(1, b))
tmp = BitVec(1, a) & BitVec(1, b)
self.assertEqual(result, tmp)
def test_bit_and_2bit(self):
function = BoolFunction(examples.bit_and_2bit)
self.assertEqual(function.signature_, [[type(BitVec(2)), 2],
[type(BitVec(2)), 2]])
for a in range(4):
for b in range(4):
result = examples.bit_and_2bit(BitVec(2, a), BitVec(2, b))
tmp = BitVec(2, a) & BitVec(2, b)
self.assertEqual(result, tmp)
def test_bool_or(self):
function = BoolFunction(examples.bool_or)
self.assertEqual(function.signature_, [[type(BitVec(1)), 1],
[type(BitVec(1)), 1]])
for a in range(2):
for b in range(2):
result = examples.bool_or(BitVec(1, a), BitVec(1, b))
tmp = BitVec(1, a) or BitVec(1, b)
self.assertEqual(result, tmp)
def test_bit_or(self):
function = BoolFunction(examples.bit_or)
self.assertEqual(function.signature_, [[type(BitVec(1)), 1],
[type(BitVec(1)), 1]])
for a in range(2):
for b in range(2):
result = examples.bit_or(BitVec(1, a), BitVec(1, b))
tmp = BitVec(1, a) | BitVec(1, b)
self.assertEqual(result, tmp)
def test_bit_or_2bit(self):
function = BoolFunction(examples.bit_or_2bit)
self.assertEqual(function.signature_, [[type(BitVec(2)), 2],
[type(BitVec(2)), 2]])
for a in range(4):
for b in range(4):
result = examples.bit_or_2bit(BitVec(2, a), BitVec(2, b))
tmp = BitVec(2, a) | BitVec(2, b)
self.assertEqual(result, tmp)
def test_bit_xor(self):
function = BoolFunction(examples.bit_xor)
self.assertEqual(function.signature_, [[type(BitVec(1)), 1],
[type(BitVec(1)), 1]])
for a in range(2):
for b in range(2):
result = examples.bit_xor(BitVec(1, a), BitVec(1, b))
tmp = BitVec(1, a) ^ BitVec(1, b)
self.assertEqual(result, tmp)
def test_bit_xor_2bit(self):
function = BoolFunction(examples.bit_xor_2bit)
self.assertEqual(function.signature_, [[type(BitVec(2)), 2],
[type(BitVec(2)), 2]])
for a in range(4):
for b in range(4):
result = examples.bit_xor_2bit(BitVec(2, a), BitVec(2, b))
tmp = BitVec(2, a) ^ BitVec(2, b)
self.assertEqual(result, tmp)
class TestBoolFunctionSimulation(unittest.TestCase):
def test_constant(self):
function = BoolFunction(examples.constant)
result = function.simulate()
self.assertEqual(result, [True])
def test_constant_2bit(self):
function = BoolFunction(examples.constant_2bit)
result = function.simulate()
self.assertEqual(result, [False, True])
def test_id(self):
function = BoolFunction(examples.identity)
result = function.simulate(BitVec(1, '0'))
self.assertEqual(result, [False])
result = function.simulate(BitVec(1, '1'))
self.assertEqual(result, [True])
def test_id_2bit(self):
function = BoolFunction(examples.identity_2bit)
for a in range(4):
tmp = BitVec(2, a)
result = function.simulate(tmp)
self.assertEqual(result, [bool(tmp[0]), bool(tmp[1])])
def test_bool_not(self):
function = BoolFunction(examples.bool_not)
result = function.simulate(BitVec(1, '0'))
self.assertEqual(result, [True])
result = function.simulate(BitVec(1, '1'))
self.assertEqual(result, [False])
def test_bit_not(self):
function = BoolFunction(examples.bit_not)
result = function.simulate(BitVec(1, '0'))
self.assertEqual(result, [True])
result = function.simulate(BitVec(1, '1'))
self.assertEqual(result, [False])
def test_bit_not_2bit(self):
function = BoolFunction(examples.bit_not_2bit)
for a in range(4):
tmp = BitVec(2, a)
result = function.simulate(tmp)
self.assertEqual(result, [not tmp[0], not tmp[1]])
def test_bool_and(self):
function = BoolFunction(examples.bool_and)
for a in range(2):
for b in range(2):
result = function.simulate(BitVec(1, a), BitVec(1, b))
tmp = BitVec(1, a) and BitVec(1, b)
self.assertEqual(result, [bool(tmp[0])])
def test_bit_and(self):
function = BoolFunction(examples.bit_and)
for a in range(2):
for b in range(2):
result = function.simulate(BitVec(1, a), BitVec(1, b))
tmp = BitVec(1, a) & BitVec(1, b)
self.assertEqual(result, [bool(tmp[0])])
def test_bit_and_2bit(self):
function = BoolFunction(examples.bit_and_2bit)
for a in range(4):
for b in range(4):
result = function.simulate(BitVec(2, a), BitVec(2, b))
tmp = BitVec(2, a) & BitVec(2, b)
self.assertEqual(result, [bool(tmp[0]), bool(tmp[1])])
def test_bool_or(self):
function = BoolFunction(examples.bool_or)
for a in range(2):
for b in range(2):
result = function.simulate(BitVec(1, a), BitVec(1, b))
tmp = BitVec(1, a) or BitVec(1, b)
self.assertEqual(result, [bool(tmp[0])])
def test_bit_or(self):
function = BoolFunction(examples.bit_or)
for a in range(2):
for b in range(2):
result = function.simulate(BitVec(1, a), BitVec(1, b))
tmp = BitVec(1, a) | BitVec(1, b)
self.assertEqual(result, [bool(tmp[0])])
def test_bit_or_2bit(self):
function = BoolFunction(examples.bit_or_2bit)
for a in range(4):
for b in range(4):
result = function.simulate(BitVec(2, a), BitVec(2, b))
tmp = BitVec(2, a) | BitVec(2, b)
self.assertEqual(result, [bool(tmp[0]), bool(tmp[1])])
def test_bit_xor(self):
function = BoolFunction(examples.bit_xor)
for a in range(2):
for b in range(2):
result = function.simulate(BitVec(1, a), BitVec(1, b))
tmp = BitVec(1, a) ^ BitVec(1, b)
self.assertEqual(result, [bool(tmp[0])])
def test_bit_xor_2bit(self):
function = BoolFunction(examples.bit_xor_2bit)
for a in range(4):
for b in range(4):
result = function.simulate(BitVec(2, a), BitVec(2, b))
tmp = BitVec(2, a) ^ BitVec(2, b)
self.assertEqual(result, [bool(tmp[0]), bool(tmp[1])])
# Simulate full truth table
class TestBoolFunctionFullSimulation(unittest.TestCase):
def test_id(self):
function = BoolFunction(examples.identity)
truth_table = function.simulate_all()
self.assertEqual(len(truth_table), 1)
self.assertEqual(str(truth_table[0]), '10')
def test_id_str(self):
function = BoolFunction("x")
truth_table = function.simulate_all()
self.assertEqual(len(truth_table), 1)
self.assertEqual(str(truth_table[0]), '10')
def test_id_2bit(self):
function = BoolFunction(examples.identity_2bit)
truth_table = function.simulate_all()
self.assertEqual(len(truth_table), 2)
self.assertEqual(str(truth_table[0]), '1010')
self.assertEqual(str(truth_table[1]), '1100')
def test_not(self):
function = BoolFunction(examples.bool_not)
truth_table = function.simulate_all()
self.assertEqual(len(truth_table), 1)
self.assertEqual(str(truth_table[0]), '01')
def test_not_str(self):
function = BoolFunction("~x")
truth_table = function.simulate_all()
self.assertEqual(len(truth_table), 1)
self.assertEqual(str(truth_table[0]), '01')
def test_not_2bit(self):
function = BoolFunction(examples.bit_not_2bit)
truth_table = function.simulate_all()
self.assertEqual(len(truth_table), 2)
self.assertEqual(str(truth_table[0]), '0101')
self.assertEqual(str(truth_table[1]), '0011')
def test_and(self):
function = BoolFunction(examples.bool_and)
truth_table = function.simulate_all()
self.assertEqual(len(truth_table), 1)
self.assertEqual(str(truth_table[0]), '1000')
def test_and_str(self):
function = BoolFunction("x & b")
truth_table = function.simulate_all()
self.assertEqual(len(truth_table), 1)
self.assertEqual(str(truth_table[0]), '1000')
def test_and_2bit(self):
function = BoolFunction(examples.bit_and_2bit)
truth_table = function.simulate_all()
self.assertEqual(len(truth_table), 2)
output0 = BitVec(16, 0xaaaa) & BitVec(16, 0xf0f0)
output1 = BitVec(16, 0xcccc) & BitVec(16, 0xff00)
self.assertEqual(str(truth_table[0]), str(output0))
self.assertEqual(str(truth_table[1]), str(output1))
def test_or(self):
function = BoolFunction(examples.bool_or)
truth_table = function.simulate_all()
self.assertEqual(len(truth_table), 1)
self.assertEqual(str(truth_table[0]), '1110')
def test_or_str(self):
function = BoolFunction("x | b")
truth_table = function.simulate_all()
self.assertEqual(len(truth_table), 1)
self.assertEqual(str(truth_table[0]), '1110')
def test_or_2bit(self):
function = BoolFunction(examples.bit_or_2bit)
truth_table = function.simulate_all()
self.assertEqual(len(truth_table), 2)
output0 = BitVec(16, 0xaaaa) | BitVec(16, 0xf0f0)
output1 = BitVec(16, 0xcccc) | BitVec(16, 0xff00)
self.assertEqual(str(truth_table[0]), str(output0))
self.assertEqual(str(truth_table[1]), str(output1))
def test_xor_str(self):
function = BoolFunction("x ^ b")
truth_table = function.simulate_all()
self.assertEqual(len(truth_table), 1)
self.assertEqual(str(truth_table[0]), '0110')
def test_xor_2bit(self):
function = BoolFunction(examples.bit_xor_2bit)
truth_table = function.simulate_all()
self.assertEqual(len(truth_table), 2)
output0 = BitVec(16, 0xaaaa) ^ BitVec(16, 0xf0f0)
output1 = BitVec(16, 0xcccc) ^ BitVec(16, 0xff00)
self.assertEqual(str(truth_table[0]), str(output0))
self.assertEqual(str(truth_table[1]), str(output1))
| 43.950437
| 80
| 0.531144
| 1,699
| 15,075
| 4.58093
| 0.049441
| 0.154182
| 0.132597
| 0.156238
| 0.940126
| 0.895156
| 0.867275
| 0.847874
| 0.816395
| 0.781447
| 0
| 0.036776
| 0.341625
| 15,075
| 342
| 81
| 44.078947
| 0.747406
| 0.020232
| 0
| 0.788396
| 0
| 0
| 0.00508
| 0
| 0
| 0
| 0.004876
| 0
| 0.273038
| 1
| 0.146758
| false
| 0
| 0.010239
| 0
| 0.167235
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1046957f063d775f5e5ee89ddc8d4f78a0582a3b
| 17,037
|
py
|
Python
|
src/makeMetre.py
|
ytyaru/Python.Audio.Chord.2017081743
|
f9bad6c9c013c216aff586bed56ea646f26d1236
|
[
"CC0-1.0"
] | 1
|
2019-11-14T07:30:23.000Z
|
2019-11-14T07:30:23.000Z
|
src/makeMetre.py
|
ytyaru/Python.Audio.Scale.201708102021
|
6f5e47c7af00ff793cce0893dff29b1e6904cb4e
|
[
"CC0-1.0"
] | null | null | null |
src/makeMetre.py
|
ytyaru/Python.Audio.Scale.201708102021
|
6f5e47c7af00ff793cce0893dff29b1e6904cb4e
|
[
"CC0-1.0"
] | null | null | null |
#!python3.6
#coding:utf-8
import time
import Wave.Player
import Wave.Sampler
import Wave.BaseWaveMaker
import Wave.WaveFile
import MusicTheory.EqualTemperament
import MusicTheory.Scale
import MusicTheory.tempo
import pathlib
def make_metre():
wm = Wave.BaseWaveMaker.BaseWaveMaker()
sampler = Wave.Sampler.Sampler()
et = MusicTheory.EqualTemperament.EqualTemperament()
scale = MusicTheory.Scale.Scale()
timebase = MusicTheory.tempo.TimeBase()
timebase.BPM = 120
timebase.Metre=(4,4)
nv = MusicTheory.tempo.NoteValue(timebase)
wf = Wave.WaveFile.WaveFile()
wf.BasePath = pathlib.PurePath('../res/metres/')
p = Wave.Player.Player()
p.Open()
scale.Major(key='C')
print(f'BPM={timebase.BPM} キー={scale.Key} 音階={scale.Scales}')
print('========== 単純拍子 ==========')
timebase.Metre=(2,2)
print(f'拍子={timebase.Metre} 強弱')
wav = []
for bar in range(4):
wav.append(sampler.Sampling(wm.Sin(a=1, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(2))))
wav.append(sampler.Sampling(wm.Sin(a=0.4, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(2))))
wf.Write(b''.join(wav), filename='2-2(Sw)')
timebase.Metre=(2,2)
print(f'拍子={timebase.Metre} 弱強')
wav = []
for bar in range(4):
wav.append(sampler.Sampling(wm.Sin(a=0.4, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(2))))
wav.append(sampler.Sampling(wm.Sin(a=1, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(2))))
wf.Write(b''.join(wav), filename='2-2(wS)')
timebase.Metre=(2,4)
print(f'拍子={timebase.Metre} 強強弱弱')
wav = []
for bar in range(4):
wav.append(sampler.Sampling(wm.Sin(a=1, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(2))))
wav.append(sampler.Sampling(wm.Sin(a=0.4, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(2))))
wf.Write(b''.join(wav), filename='2-4(Sw)')
timebase.Metre=(2,4)
print(f'拍子={timebase.Metre} 弱弱強強')
wav.clear()
for bar in range(4):
wav.append(sampler.Sampling(wm.Sin(a=0.4, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(2))))
wav.append(sampler.Sampling(wm.Sin(a=1, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(2))))
wf.Write(b''.join(wav), filename='2-4(wS)')
timebase.Metre=(3,4)
print(f'拍子={timebase.Metre}')
wav.clear()
for bar in range(4):
wav.append(sampler.Sampling(wm.Sin(a=1, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wav.append(sampler.Sampling(wm.Sin(a=0.4, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wav.append(sampler.Sampling(wm.Sin(a=0.4, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
# 4/4拍子だと以下のように3連符で表さねばならない(楽譜が複雑になる。それを解決するために拍子を設定する)
# wav.append(sampler.Sampling(wm.Sin(a=1, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(1, let=3))))
# wav.append(sampler.Sampling(wm.Sin(a=0.4, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(1, let=3))))
# wav.append(sampler.Sampling(wm.Sin(a=0.4, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(1, let=3))))
wf.Write(b''.join(wav), filename='3-4(Sww)')
timebase.Metre=(3,2)
print(f'拍子={timebase.Metre}')
wav = []
for bar in range(4):
wav.append(sampler.Sampling(wm.Sin(a=1, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(2))))
wav.append(sampler.Sampling(wm.Sin(a=0.4, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(2))))
wav.append(sampler.Sampling(wm.Sin(a=0.4, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(2))))
wf.Write(b''.join(wav), filename='3-2(Sww)')
timebase.Metre=(3,8)
print(f'拍子={timebase.Metre}')
wav = []
for bar in range(4):
wav.append(sampler.Sampling(wm.Sin(a=1, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(8))))
wav.append(sampler.Sampling(wm.Sin(a=0.4, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(8))))
wav.append(sampler.Sampling(wm.Sin(a=0.4, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(8))))
wf.Write(b''.join(wav), filename='3-8(Sww)')
timebase.Metre=(4,4)
print(f'拍子={timebase.Metre} 強弱中弱')
wav.clear()
for bar in range(4):
wav.append(sampler.Sampling(wm.Sin(a=1, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wav.append(sampler.Sampling(wm.Sin(a=0.4, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wav.append(sampler.Sampling(wm.Sin(a=0.7, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wav.append(sampler.Sampling(wm.Sin(a=0.4, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wf.Write(b''.join(wav), filename='4-4(SwMw)')
timebase.Metre=(4,4)
print(f'拍子={timebase.Metre} 強弱弱弱')
wav.clear()
for bar in range(4):
wav.append(sampler.Sampling(wm.Sin(a=1, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wav.append(sampler.Sampling(wm.Sin(a=0.4, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wav.append(sampler.Sampling(wm.Sin(a=0.4, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wav.append(sampler.Sampling(wm.Sin(a=0.4, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wf.Write(b''.join(wav), filename='4-4(Swww)')
print('========== 複合拍子 ==========')
# 2拍子の発展
# 6/8拍子 2拍子で各拍が3連符
print('拍子=6/8')
timebase.Metre=(6,8)
wav.clear()
for bar in range(4):
wav.append(sampler.Sampling(wm.Sin(a=1, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(8))))
wav.append(sampler.Sampling(wm.Sin(a=0.3, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(8))))
wav.append(sampler.Sampling(wm.Sin(a=0.3, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(8))))
wav.append(sampler.Sampling(wm.Sin(a=0.7, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(8))))
wav.append(sampler.Sampling(wm.Sin(a=0.3, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(8))))
wav.append(sampler.Sampling(wm.Sin(a=0.3, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(8))))
wf.Write(b''.join(wav), filename='6-8(SwwMww)')
# 6/4拍子 2拍子で各拍が3連符
print('拍子=6/4')
# timebase.Metre=(4,4)
timebase.Metre=(6,4)
wav.clear()
for bar in range(4):
wav.append(sampler.Sampling(wm.Sin(a=1, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wav.append(sampler.Sampling(wm.Sin(a=0.4, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wav.append(sampler.Sampling(wm.Sin(a=0.4, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wav.append(sampler.Sampling(wm.Sin(a=0.7, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wav.append(sampler.Sampling(wm.Sin(a=0.4, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wav.append(sampler.Sampling(wm.Sin(a=0.4, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wf.Write(b''.join(wav), filename='6-4(SwwMww)')
# 3拍子の発展
# 9/8拍子
print('拍子=9/8')
timebase.Metre=(9,8)
wav.clear()
for bar in range(4):
wav.append(sampler.Sampling(wm.Sin(a=1, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(8))))
wav.append(sampler.Sampling(wm.Sin(a=0.4, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(8))))
wav.append(sampler.Sampling(wm.Sin(a=0.4, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(8))))
wav.append(sampler.Sampling(wm.Sin(a=0.7, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(8))))
wav.append(sampler.Sampling(wm.Sin(a=0.4, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(8))))
wav.append(sampler.Sampling(wm.Sin(a=0.4, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(8))))
wav.append(sampler.Sampling(wm.Sin(a=0.7, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(8))))
wav.append(sampler.Sampling(wm.Sin(a=0.4, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(8))))
wav.append(sampler.Sampling(wm.Sin(a=0.4, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(8))))
wf.Write(b''.join(wav), filename='9-8(SwwMwwMww)')
# 4拍子の発展
# 12/8拍子
timebase.Metre=(12,8)
print(f'12/8拍子 強弱中弱')
wav.clear()
for bar in range(4):
wav.append(sampler.Sampling(wm.Sin(a=1, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(8))))
wav.append(sampler.Sampling(wm.Sin(a=0.3, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(8))))
wav.append(sampler.Sampling(wm.Sin(a=0.3, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(8))))
wav.append(sampler.Sampling(wm.Sin(a=0.5, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(8))))
wav.append(sampler.Sampling(wm.Sin(a=0.3, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(8))))
wav.append(sampler.Sampling(wm.Sin(a=0.3, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(8))))
wav.append(sampler.Sampling(wm.Sin(a=0.75, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(8))))
wav.append(sampler.Sampling(wm.Sin(a=0.3, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(8))))
wav.append(sampler.Sampling(wm.Sin(a=0.3, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(8))))
wav.append(sampler.Sampling(wm.Sin(a=0.5, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(8))))
wav.append(sampler.Sampling(wm.Sin(a=0.3, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(8))))
wav.append(sampler.Sampling(wm.Sin(a=0.3, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(8))))
wf.Write(b''.join(wav), filename='12-8(SwwmwwMwwmww)')
timebase.Metre=(12,8)
print(f'12/8拍子 強弱弱弱')
wav.clear()
for bar in range(4):
wav.append(sampler.Sampling(wm.Sin(a=1, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(8))))
wav.append(sampler.Sampling(wm.Sin(a=0.3, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(8))))
wav.append(sampler.Sampling(wm.Sin(a=0.3, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(8))))
wav.append(sampler.Sampling(wm.Sin(a=0.5, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(8))))
wav.append(sampler.Sampling(wm.Sin(a=0.3, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(8))))
wav.append(sampler.Sampling(wm.Sin(a=0.3, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(8))))
wav.append(sampler.Sampling(wm.Sin(a=0.5, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(8))))
wav.append(sampler.Sampling(wm.Sin(a=0.3, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(8))))
wav.append(sampler.Sampling(wm.Sin(a=0.3, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(8))))
wav.append(sampler.Sampling(wm.Sin(a=0.5, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(8))))
wav.append(sampler.Sampling(wm.Sin(a=0.3, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(8))))
wav.append(sampler.Sampling(wm.Sin(a=0.3, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(8))))
wf.Write(b''.join(wav), filename='12-8(Swwmwwmwwmww)')
print('========== 変拍子 ==========')
timebase.Metre=(5,4)
print(f'5拍子 3拍子+2拍子 (3+2)/4 2拍子の中に3拍子と2拍子がある')
wav.clear()
for bar in range(4):
wav.append(sampler.Sampling(wm.Sin(a=1, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wav.append(sampler.Sampling(wm.Sin(a=0.4, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wav.append(sampler.Sampling(wm.Sin(a=0.4, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wav.append(sampler.Sampling(wm.Sin(a=0.7, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wav.append(sampler.Sampling(wm.Sin(a=0.4, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wf.Write(b''.join(wav), filename='5-4(SwwMw)')
print(f'5拍子 2拍子+3拍子 (2+3)/4')
wav.clear()
for bar in range(4):
wav.append(sampler.Sampling(wm.Sin(a=1, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wav.append(sampler.Sampling(wm.Sin(a=0.4, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wav.append(sampler.Sampling(wm.Sin(a=0.7, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wav.append(sampler.Sampling(wm.Sin(a=0.4, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wav.append(sampler.Sampling(wm.Sin(a=0.4, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wf.Write(b''.join(wav), filename='5-4(SwMww)')
print(f'純5拍子? 強弱弱弱弱 5/4')
wav.clear()
for bar in range(4):
wav.append(sampler.Sampling(wm.Sin(a=1, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wav.append(sampler.Sampling(wm.Sin(a=0.4, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wav.append(sampler.Sampling(wm.Sin(a=0.4, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wav.append(sampler.Sampling(wm.Sin(a=0.4, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wav.append(sampler.Sampling(wm.Sin(a=0.4, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wf.Write(b''.join(wav), filename='5-4(Swwww)')
print(f'7拍子 7/4 強弱弱弱弱弱弱')
timebase.Metre=(4,4)
wav.clear()
for bar in range(4):
wav.append(sampler.Sampling(wm.Sin(a=1, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wav.append(sampler.Sampling(wm.Sin(a=0.4, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wav.append(sampler.Sampling(wm.Sin(a=0.4, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wav.append(sampler.Sampling(wm.Sin(a=0.4, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wav.append(sampler.Sampling(wm.Sin(a=0.4, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wav.append(sampler.Sampling(wm.Sin(a=0.4, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wav.append(sampler.Sampling(wm.Sin(a=0.4, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wf.Write(b''.join(wav), filename='7-4(Swwwwww)')
print(f'7拍子 (4+3)/4 2拍子の中に4,3拍子がある。')
wav.clear()
for bar in range(4):
wav.append(sampler.Sampling(wm.Sin(a=1, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wav.append(sampler.Sampling(wm.Sin(a=0.3, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wav.append(sampler.Sampling(wm.Sin(a=0.5, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wav.append(sampler.Sampling(wm.Sin(a=0.3, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wav.append(sampler.Sampling(wm.Sin(a=0.8, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wav.append(sampler.Sampling(wm.Sin(a=0.3, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wav.append(sampler.Sampling(wm.Sin(a=0.3, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wf.Write(b''.join(wav), filename='7-4(SwwwMww)')
print(f'7拍子 (3+4)/4 2拍子の中に3,4拍子がある。')
wav.clear()
for bar in range(4):
wav.append(sampler.Sampling(wm.Sin(a=1, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wav.append(sampler.Sampling(wm.Sin(a=0.3, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wav.append(sampler.Sampling(wm.Sin(a=0.3, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wav.append(sampler.Sampling(wm.Sin(a=0.8, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wav.append(sampler.Sampling(wm.Sin(a=0.3, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wav.append(sampler.Sampling(wm.Sin(a=0.5, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wav.append(sampler.Sampling(wm.Sin(a=0.3, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wf.Write(b''.join(wav), filename='7-4(SwwMwww)')
print(f'7拍子 (3+2+2)/4 3拍子の中に2,2,3拍子がある。')
wav.clear()
for bar in range(4):
wav.append(sampler.Sampling(wm.Sin(a=1, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wav.append(sampler.Sampling(wm.Sin(a=0.3, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wav.append(sampler.Sampling(wm.Sin(a=0.3, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wav.append(sampler.Sampling(wm.Sin(a=0.8, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wav.append(sampler.Sampling(wm.Sin(a=0.3, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wav.append(sampler.Sampling(wm.Sin(a=0.8, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wav.append(sampler.Sampling(wm.Sin(a=0.3, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wf.Write(b''.join(wav), filename='7-4(SwwMwMw)')
print(f'7拍子 (2+2+3)/4 3拍子の中に2,2,3拍子がある。')
wav.clear()
for bar in range(4):
wav.append(sampler.Sampling(wm.Sin(a=1, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wav.append(sampler.Sampling(wm.Sin(a=0.3, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wav.append(sampler.Sampling(wm.Sin(a=0.8, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wav.append(sampler.Sampling(wm.Sin(a=0.3, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wav.append(sampler.Sampling(wm.Sin(a=0.8, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wav.append(sampler.Sampling(wm.Sin(a=0.3, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wav.append(sampler.Sampling(wm.Sin(a=0.3, fs=8000, f0=scale.Frequencies[0]*2, sec=nv.Get(4))))
wf.Write(b''.join(wav), filename='7-4(SwMwMww)')
p.Close()
if __name__ == "__main__" :
make_metre()
| 57.363636
| 110
| 0.634149
| 3,122
| 17,037
| 3.457399
| 0.040038
| 0.102557
| 0.182324
| 0.273485
| 0.894015
| 0.894015
| 0.891884
| 0.88929
| 0.884288
| 0.864276
| 0
| 0.095969
| 0.139461
| 17,037
| 296
| 111
| 57.557432
| 0.64027
| 0.028937
| 0
| 0.692913
| 0
| 0
| 0.050212
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.003937
| false
| 0
| 0.035433
| 0
| 0.03937
| 0.102362
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
52ceed1ffc9ac18c1b29e4ad68689974aa310190
| 6,796
|
py
|
Python
|
tests/unit/extutils/imgproc.py
|
RaenonX/Jelly-Bot-API
|
c7da1e91783dce3a2b71b955b3a22b68db9056cf
|
[
"MIT"
] | 5
|
2020-08-26T20:12:00.000Z
|
2020-12-11T16:39:22.000Z
|
tests/unit/extutils/imgproc.py
|
RaenonX/Jelly-Bot
|
c7da1e91783dce3a2b71b955b3a22b68db9056cf
|
[
"MIT"
] | 234
|
2019-12-14T03:45:19.000Z
|
2020-08-26T18:55:19.000Z
|
tests/unit/extutils/imgproc.py
|
RaenonX/Jelly-Bot-API
|
c7da1e91783dce3a2b71b955b3a22b68db9056cf
|
[
"MIT"
] | 2
|
2019-10-23T15:21:15.000Z
|
2020-05-22T09:35:55.000Z
|
import os
from tempfile import TemporaryDirectory
from zipfile import ZipFile, is_zipfile
from extutils.imgproc.apng2gif import convert, ConvertResult, ConvertOpResult
from tests.base import TestCase
__all__ = ["TestApng2Gif", "TestApng2GifConvertResult", "TestApng2GifConvertOpResult"]
class TestApng2Gif(TestCase):
def test_convert(self):
with TemporaryDirectory() as temp_dir:
out_path = os.path.join(temp_dir, "out.gif")
out_path_frames = os.path.join(temp_dir, "out-frames.zip")
with open("tests/res/linesticker.apng", "rb") as f:
result = convert(f.read(), out_path, zip_frames=False)
self.assertTrue(result.frame_extraction.success)
self.assertGreaterEqual(result.frame_extraction.duration, 0)
self.assertFalse(result.frame_zipping.success)
self.assertEqual(result.frame_zipping.duration, 0)
self.assertIsNone(result.frame_zipping.exception)
self.assertTrue(result.image_data_collation.success)
self.assertGreaterEqual(result.image_data_collation.duration, 0)
self.assertTrue(result.gif_merging.success)
self.assertGreaterEqual(result.gif_merging.duration, 0)
self.assertTrue(result.succeed)
self.assertTrue(os.path.exists(out_path))
self.assertFalse(os.path.exists(out_path_frames))
with open(out_path, "rb") as f:
self.assertTrue(f.read(6) in (b"GIF87a", b"GIF89a"))
def test_convert_zip_frames(self):
with TemporaryDirectory() as temp_dir:
out_path = os.path.join(temp_dir, "out.gif")
out_path_frames = os.path.join(temp_dir, "out-frames.zip")
with open("tests/res/linesticker.apng", "rb") as f:
result = convert(f.read(), out_path)
self.assertTrue(result.frame_extraction.success)
self.assertGreaterEqual(result.frame_extraction.duration, 0)
self.assertIsNone(result.frame_zipping.exception)
self.assertTrue(result.frame_zipping.success)
self.assertGreaterEqual(result.frame_zipping.duration, 0)
self.assertTrue(result.image_data_collation.success)
self.assertGreaterEqual(result.image_data_collation.duration, 0)
self.assertTrue(result.gif_merging.success)
self.assertGreaterEqual(result.gif_merging.duration, 0)
self.assertTrue(result.succeed)
self.assertTrue(os.path.exists(out_path), out_path)
self.assertTrue(os.path.exists(out_path_frames), out_path_frames)
with open(out_path, "rb") as f:
self.assertTrue(f.read(6) in (b"GIF87a", b"GIF89a"))
self.assertTrue(is_zipfile(out_path_frames))
self.assertGreaterEqual(len(ZipFile(out_path_frames).namelist()), 0)
class TestApng2GifConvertResult(TestCase):
def test_succeed(self):
result = ConvertResult()
self.assertFalse(result.succeed)
result.frame_extraction.set_success(0.0)
self.assertFalse(result.succeed)
result.frame_zipping.set_success(0.0)
self.assertFalse(result.succeed)
result.image_data_collation.set_success(0.0)
self.assertFalse(result.succeed)
result.gif_merging.set_success(0.0)
self.assertTrue(result.succeed)
def test_succeed_no_zip_frames(self):
result = ConvertResult()
self.assertFalse(result.succeed)
result.frame_extraction.set_success(0.0)
self.assertFalse(result.succeed)
result.image_data_collation.set_success(0.0)
self.assertFalse(result.succeed)
result.gif_merging.set_success(0.0)
self.assertTrue(result.succeed)
self.assertFalse(result.frame_zipping.success)
def test_succeed_set_success(self):
result = ConvertResult()
self.assertFalse(result.succeed)
result.frame_extraction.set_success(0.1)
self.assertFalse(result.succeed)
result.image_data_collation.set_success(0.1)
self.assertFalse(result.succeed)
result.gif_merging.set_success(0.1)
self.assertTrue(result.succeed)
self.assertFalse(result.frame_zipping.success)
class TestApng2GifConvertOpResult(TestCase):
def test_set_success(self):
result = ConvertOpResult()
self.assertFalse(result.success)
self.assertEqual(result.duration, 0)
self.assertIsNone(result.exception)
result.set_success(0.7)
self.assertTrue(result.success)
self.assertEqual(result.duration, 0.7)
self.assertIsNone(result.exception)
def test_set_failed(self):
result = ConvertOpResult()
self.assertFalse(result.success)
self.assertEqual(result.duration, 0)
self.assertIsNone(result.exception)
result.set_failure(ValueError())
self.assertFalse(result.success)
self.assertEqual(result.duration, 0.0)
self.assertIsInstance(result.exception, ValueError)
def test_set_failed_no_exception(self):
result = ConvertOpResult()
self.assertFalse(result.success)
self.assertEqual(result.duration, 0)
self.assertIsNone(result.exception)
result.set_failure()
self.assertFalse(result.success)
self.assertEqual(result.duration, 0.0)
self.assertIsNone(result.exception)
def test_set_twice(self):
result = ConvertOpResult()
self.assertFalse(result.success)
self.assertEqual(result.duration, 0)
self.assertIsNone(result.exception)
result.set_failure(ValueError())
self.assertFalse(result.success)
self.assertEqual(result.duration, 0.0)
self.assertIsInstance(result.exception, ValueError)
with self.assertRaises(ValueError):
result.set_success(0.7)
with self.assertRaises(ValueError):
result.set_failure()
self.assertFalse(result.success)
self.assertEqual(result.duration, 0.0)
self.assertIsInstance(result.exception, ValueError)
def test_set_twice_2(self):
result = ConvertOpResult()
self.assertFalse(result.success)
self.assertEqual(result.duration, 0)
self.assertIsNone(result.exception)
result.set_success(0.7)
self.assertTrue(result.success)
self.assertEqual(result.duration, 0.7)
self.assertIsNone(result.exception)
with self.assertRaises(ValueError):
result.set_success(0.7)
with self.assertRaises(ValueError):
result.set_failure()
self.assertTrue(result.success)
self.assertEqual(result.duration, 0.7)
self.assertIsNone(result.exception)
| 34.851282
| 86
| 0.676133
| 761
| 6,796
| 5.884363
| 0.103811
| 0.026798
| 0.103171
| 0.081286
| 0.857749
| 0.847477
| 0.824252
| 0.806387
| 0.806387
| 0.80594
| 0
| 0.014026
| 0.223661
| 6,796
| 194
| 87
| 35.030928
| 0.834723
| 0
| 0
| 0.758865
| 0
| 0
| 0.027958
| 0.015303
| 0
| 0
| 0
| 0
| 0.588652
| 1
| 0.070922
| false
| 0
| 0.035461
| 0
| 0.12766
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
5e11fc1312f940ad6ecbf7b8b7ab6c04a4a7d52f
| 121
|
py
|
Python
|
app/endpoints/__init__.py
|
s-andrew/FlaskProductRest
|
167e44e7c379f50cf83502a5fc423cb6ef92132a
|
[
"BSD-2-Clause"
] | null | null | null |
app/endpoints/__init__.py
|
s-andrew/FlaskProductRest
|
167e44e7c379f50cf83502a5fc423cb6ef92132a
|
[
"BSD-2-Clause"
] | null | null | null |
app/endpoints/__init__.py
|
s-andrew/FlaskProductRest
|
167e44e7c379f50cf83502a5fc423cb6ef92132a
|
[
"BSD-2-Clause"
] | null | null | null |
from .products import products_blueprint
def register_blueprints(app):
app.register_blueprint(products_blueprint)
| 30.25
| 46
| 0.826446
| 14
| 121
| 6.857143
| 0.571429
| 0.354167
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115702
| 121
| 4
| 46
| 30.25
| 0.897196
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 0.666667
| 1
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 7
|
5e27c8628f03af7a382487274299b91d0f9df20e
| 17,559
|
py
|
Python
|
thrift_ps/server/ps_server.py
|
DS3Lab/LambdaML
|
0afca7819e08632ba116fec8e102084e4040a47a
|
[
"Apache-2.0"
] | 23
|
2021-05-17T09:24:24.000Z
|
2022-01-29T18:40:44.000Z
|
thrift_ps/server/ps_server.py
|
DS3Lab/LambdaML
|
0afca7819e08632ba116fec8e102084e4040a47a
|
[
"Apache-2.0"
] | 2
|
2021-05-17T16:15:12.000Z
|
2021-07-20T09:11:22.000Z
|
thrift_ps/server/ps_server.py
|
DS3Lab/LambdaML
|
0afca7819e08632ba116fec8e102084e4040a47a
|
[
"Apache-2.0"
] | 3
|
2021-05-17T09:31:53.000Z
|
2021-12-02T16:29:59.000Z
|
import sys
import threading
import time
import numpy as np
from thrift_ps.ps_service import ParameterServer
from thrift_ps.ps_service.ttypes import Model, Grad, Update, Operation, InvalidOperation
class PSHandler:
def __init__(self):
self.models = {}
self.model_ts = {}
self.model_parallelism = {}
self.model_pull_count = {}
self.model_push_count = {}
self.w_lock = threading.Lock()
def model_ids(self):
return self.model_ts.keys()
def delete_expired(self, lower):
print("current model on PS >>> {}".format(self.models.keys()))
print("current model parallelism on PS >>> {}".format(self.model_parallelism))
print("current model pull count on PS >>> {}".format(self.model_pull_count))
print("current model push count on PS >>> {}".format(self.model_push_count))
for k, v in self.model_ts.items():
if v <= lower:
self.delete(k)
def delete(self, mid):
print("delete model {}".format(mid))
self.models.pop(mid)
self.model_ts.pop(mid)
self.model_parallelism.pop(mid)
self.model_pull_count.pop(mid)
self.model_push_count.pop(mid)
def ping(self):
print('ping()')
def register_model(self, mid, length, parallelism):
self.models[mid] = 0.2 * np.random.rand(length) - 0.1
self.model_ts[mid] = time.time()
self.model_parallelism[mid] = parallelism
self.model_pull_count[mid] = 0
self.model_push_count[mid] = 0
print("register model >>> id = {}, length = {}, parallelism = {}"
.format(mid, length, parallelism))
def exist_model(self, mid):
return self.models.__contains__(mid)
def can_pull(self, mid, n_iter, worker_id):
#print("worker {} ask can_pull >>> id = {}, iter = {}".format(worker_id, mid, n_iter))
if self.model_push_count.__contains__(mid):
return self.model_push_count[mid] == self.model_parallelism[mid] * n_iter
else:
x = InvalidOperation()
x.whatOp = Operation.CAN_PULL
x.why = 'No model {} in model_push_count on PS'.format(mid)
raise x
def can_push(self, mid, n_iter, worker_id):
#print("worker {} ask can_push >>> id = {}, iter = {}".format(worker_id, mid, n_iter))
if self.model_pull_count.__contains__(mid):
return self.model_pull_count[mid] == self.model_parallelism[mid] * (n_iter+1)
else:
x = InvalidOperation()
x.whatOp = Operation.CAN_PUSH
x.why = 'No model {} in model_pull_count on PS'.format(mid)
raise x
def pull_model(self, mid, n_iter, worker_id):
print("worker {} pull model >>> id = {}, iter = {}".format(worker_id, mid, n_iter))
if self.models.__contains__(mid):
model = Model()
model.id = mid
model.data = self.models[mid].tolist()
model.length = len(model.data)
self.model_pull_count[mid] = self.model_pull_count[mid] + 1
return model
else:
x = InvalidOperation()
x.whatOp = Operation.PULL_MODEL
x.why = 'No model {} on PS'.format(mid)
raise x
def push_grad(self, grad):
print("worker {} push grad >>> id = {}, lr = {}, n_iter = {}"
.format(grad.worker_id, grad.id, grad.learning_rate, grad.n_iter))
update_start = time.time()
if self.models.__contains__(grad.id):
self.w_lock.acquire()
self.models[grad.id] = np.add(self.models.get(grad.id),
np.multiply(grad.data, grad.learning_rate))
self.model_push_count[grad.id] = self.model_push_count[grad.id] + 1
self.w_lock.release()
else:
x = InvalidOperation()
x.whatOp = Operation.PUSH_GRAD
x.why = 'No model {} on PS'.format(grad.id)
raise x
print("update model cost {} s".format(time.time() - update_start))
def push_update(self, update):
print("worker {} push update >>> id = {}, n_iter = {}"
.format(update.worker_id, update.id, update.n_iter))
update_start = time.time()
if self.models.__contains__(update.id):
self.w_lock.acquire()
self.models[update.id] = np.add(self.models.get(update.id), update.data)
self.model_push_count[update.id] = self.model_push_count[update.id] + 1
self.w_lock.release()
else:
x = InvalidOperation()
x.whatOp = Operation.PUSH_UPDATE
x.why = 'No model {} on PS'.format(update.id)
raise x
print("update model cost {} s".format(time.time() - update_start))
# store grad to file, and merge them
class PSHandler2:
def __init__(self, tmp_dir):
self.models = {}
self.model_ts = {}
self.model_ind = {} # locate model in an array
self.num_model = 0
self.model_parallelism = {}
self.model_pull_count = {}
self.model_push_count = {}
self.tmp_dir = tmp_dir
self.num_file = []
self.w_lock = threading.Lock()
def model_ids(self):
return self.model_ts.keys()
def delete_expired(self, lower):
print("current model on PS >>> {}".format(self.models.keys()))
print("current model parallelism on PS >>> {}".format(self.model_parallelism))
print("current model pull count on PS >>> {}".format(self.model_pull_count))
print("current model push count on PS >>> {}".format(self.model_push_count))
for k, v in self.model_ts.items():
if v <= lower:
self.delete(k)
def delete(self, mid):
print("delete model {}".format(mid))
self.models.pop(mid)
self.model_ts.pop(mid)
self.model_ind.pop(mid)
self.model_parallelism.pop(mid)
self.model_pull_count.pop(mid)
self.model_push_count.pop(mid)
def ping(self):
print('ping()')
def register_model(self, mid, length, parallelism):
self.models[mid] = np.random.rand(length)
self.model_ts[mid] = time.time()
self.model_ind[mid] = self.num_model
self.num_model += 1
self.num_file.append(0)
self.model_parallelism[mid] = parallelism
self.model_pull_count[mid] = 0
self.model_push_count[mid] = 0
print("register model >>> id = {}, length = {}, parallelism = {}"
.format(mid, length, parallelism))
def exist_model(self, mid):
return self.models.__contains__(mid)
def inc_num_file(self, mid):
ind = self.model_ind[mid]
self.num_file[ind] += 1
def can_merge(self, mid):
ind = self.model_ind[mid]
n_file = self.num_file[ind]
return n_file == self.model_parallelism[mid]
def merge_grad(self, mid, lr):
for i in range(self.model_parallelism[mid]):
tmp_name = "{}/{}_{}_{}.npy".format(self.tmp_dir, mid, i, self.model_parallelism[mid])
tmp_arr = np.load(tmp_name)
self.models[mid] = np.add(self.models.get(mid), np.multiply(tmp_arr, lr))
self.reset_num_file(mid)
def merge_update(self, mid):
for i in range(self.model_parallelism[mid]):
tmp_name = "{}/{}_{}_{}.npy".format(self.tmp_dir, mid, i, self.model_parallelism[mid])
tmp_arr = np.load(tmp_name)
self.models[mid] = np.add(self.models.get(mid), tmp_arr)
self.reset_num_file(mid)
def reset_num_file(self, mid):
ind = self.model_ind[mid]
self.num_file[ind] = 0
def can_pull(self, mid, n_iter, worker_id):
#print("worker {} ask can_pull >>> id = {}, iter = {}".format(worker_id, mid, n_iter))
if self.model_push_count.__contains__(mid):
return self.model_push_count[mid] == self.model_parallelism[mid] * n_iter
else:
x = InvalidOperation()
x.whatOp = Operation.CAN_PULL
x.why = 'No model {} in model_push_count on PS'.format(mid)
raise x
def can_push(self, mid, n_iter, worker_id):
#print("worker {} ask can_push >>> id = {}, iter = {}".format(worker_id, mid, n_iter))
if self.model_pull_count.__contains__(mid):
return self.model_pull_count[mid] == self.model_parallelism[mid] * (n_iter+1)
else:
x = InvalidOperation()
x.whatOp = Operation.CAN_PUSH
x.why = 'No model {} in model_pull_count on PS'.format(mid)
raise x
def pull_model(self, mid, n_iter, worker_id):
print("worker {} pull model >>> id = {}, iter = {}".format(worker_id, mid, n_iter))
if self.models.__contains__(mid):
model = Model()
model.id = mid
model.data = self.models[mid].tolist()
model.length = len(model.data)
self.model_pull_count[mid] = self.model_pull_count[mid] + 1
return model
else:
x = InvalidOperation()
x.whatOp = Operation.PULL_MODEL
x.why = 'No model {} on PS'.format(mid)
raise x
def push_grad(self, grad):
print("worker {} push grad >>> id = {}, lr = {}, n_iter = {}"
.format(grad.worker_id, grad.id, grad.learning_rate, grad.n_iter))
if self.models.__contains__(grad.id):
save_start = time.time()
f_name = "{}/{}_{}_{}.npy".format(self.tmp_dir, grad.id, grad.worker_id, self.model_parallelism[grad.id])
np.save(f_name, np.array(grad.data))
print("save file {}, cost {} s".format(f_name, time.time() - save_start))
self.inc_num_file(grad.id)
if self.can_merge(grad.id):
merge_start = time.time()
self.merge_grad(grad.id, grad.learning_rate)
print("merge cost {} s".format(time.time() - merge_start))
self.w_lock.acquire()
self.model_push_count[grad.id] = self.model_push_count[grad.id] + 1
self.w_lock.release()
else:
x = InvalidOperation()
x.whatOp = Operation.PUSH_GRAD
x.why = 'No model {} on PS'.format(grad.id)
raise x
def push_update(self, update):
print("worker {} push update >>> id = {}, n_iter = {}"
.format(update.worker_id, update.id, update.n_iter))
if self.models.__contains__(update.id):
save_start = time.time()
f_name = "{}/{}_{}_{}.npy".format(self.tmp_dir, update.id, update.worker_id, self.model_parallelism[update.id])
np.save(f_name, np.array(update.data))
print("save file {}, cost {} s".format(f_name, time.time() - save_start))
self.inc_num_file(update.id)
if self.can_merge(update.id):
merge_start = time.time()
self.merge_update(update.id)
print("merge cost {} s".format(time.time() - merge_start))
self.w_lock.acquire()
self.model_push_count[update.id] = self.model_push_count[update.id] + 1
self.w_lock.release()
else:
x = InvalidOperation()
x.whatOp = Operation.PUSH_UPDATE
x.why = 'No model {} on PS'.format(update.id)
raise x
# store grad in memory
class PSHandler3:
def __init__(self, tmp_dir):
self.models = {}
self.model_ts = {}
self.model_ind = {} # locate model in an array
self.num_model = 0
self.model_parallelism = {}
self.model_pull_count = {}
self.model_push_count = {}
self.tmp_data = []
self.num_data = []
self.w_lock = threading.Lock()
def model_ids(self):
return self.model_ts.keys()
def delete_expired(self, lower):
print("current model on PS >>> {}".format(self.models.keys()))
print("current model parallelism on PS >>> {}".format(self.model_parallelism))
print("current model pull count on PS >>> {}".format(self.model_pull_count))
print("current model push count on PS >>> {}".format(self.model_push_count))
for k, v in self.model_ts.items():
if v <= lower:
self.delete(k)
def delete(self, mid):
print("delete model {}".format(mid))
self.models.pop(mid)
self.model_ts.pop(mid)
self.model_ind.pop(mid)
self.model_parallelism.pop(mid)
self.model_pull_count.pop(mid)
self.model_push_count.pop(mid)
def ping(self):
print('ping()')
def register_model(self, mid, length, parallelism):
self.models[mid] = np.random.rand(length)
self.model_ts[mid] = time.time()
self.model_ind[mid] = self.num_model
self.num_model += 1
self.num_data.append(0)
self.model_parallelism[mid] = parallelism
self.model_pull_count[mid] = 0
self.model_push_count[mid] = 0
print("register model >>> id = {}, length = {}, parallelism = {}"
.format(mid, length, parallelism))
def exist_model(self, mid):
return self.models.__contains__(mid)
def inc_num_data(self, mid):
ind = self.model_ind[mid]
self.num_data[ind] += 1
def can_merge(self, mid):
ind = self.model_ind[mid]
n_file = self.num_data[ind]
return n_file == self.model_parallelism[mid]
def merge_grad(self, mid, lr):
for i in range(self.model_parallelism[mid]):
tmp_arr = self.tmp_data[i]
self.models[mid] = np.add(self.models.get(mid), np.multiply(tmp_arr, lr))
self.reset_num_data(mid)
def merge_update(self, mid):
for i in range(self.model_parallelism[mid]):
tmp_arr = self.tmp_data[i]
self.models[mid] = np.add(self.models.get(mid), tmp_arr)
self.reset_num_data(mid)
def reset_num_data(self, mid):
ind = self.model_ind[mid]
self.num_data[ind] = 0
self.tmp_data.clear()
def can_pull(self, mid, n_iter, worker_id):
#print("worker {} ask can_pull >>> id = {}, iter = {}".format(worker_id, mid, n_iter))
if self.model_push_count.__contains__(mid):
return self.model_push_count[mid] == self.model_parallelism[mid] * n_iter
else:
x = InvalidOperation()
x.whatOp = Operation.CAN_PULL
x.why = 'No model {} in model_push_count on PS'.format(mid)
raise x
def can_push(self, mid, n_iter, worker_id):
#print("worker {} ask can_push >>> id = {}, iter = {}".format(worker_id, mid, n_iter))
if self.model_pull_count.__contains__(mid):
return self.model_pull_count[mid] == self.model_parallelism[mid] * (n_iter+1)
else:
x = InvalidOperation()
x.whatOp = Operation.CAN_PUSH
x.why = 'No model {} in model_pull_count on PS'.format(mid)
raise x
def pull_model(self, mid, n_iter, worker_id):
print("worker {} pull model >>> id = {}, iter = {}".format(worker_id, mid, n_iter))
if self.models.__contains__(mid):
model = Model()
model.id = mid
model.data = self.models[mid].tolist()
model.length = len(model.data)
self.model_pull_count[mid] = self.model_pull_count[mid] + 1
return model
else:
x = InvalidOperation()
x.whatOp = Operation.PULL_MODEL
x.why = 'No model {} on PS'.format(mid)
raise x
def push_grad(self, grad):
print("worker {} push grad >>> id = {}, lr = {}, n_iter = {}"
.format(grad.worker_id, grad.id, grad.learning_rate, grad.n_iter))
if self.models.__contains__(grad.id):
self.tmp_data.append(np.array(grad.data))
self.inc_num_data(grad.id)
if self.can_merge(grad.id):
self.merge_grad(grad.id, grad.learning_rate)
self.w_lock.acquire()
self.model_push_count[grad.id] = self.model_push_count[grad.id] + 1
self.w_lock.release()
else:
x = InvalidOperation()
x.whatOp = Operation.PUSH_GRAD
x.why = 'No model {} on PS'.format(grad.id)
raise x
def push_update(self, update):
print("worker {} push update >>> id = {}, n_iter = {}"
.format(update.worker_id, update.id, update.n_iter))
if self.models.__contains__(update.id):
self.tmp_data.append(np.array(update.data))
self.inc_num_data(update.id)
if self.can_merge(update.id):
self.merge_update(update.id)
self.w_lock.acquire()
self.model_push_count[update.id] = self.model_push_count[update.id] + 1
self.w_lock.release()
else:
x = InvalidOperation()
x.whatOp = Operation.PUSH_UPDATE
x.why = 'No model {} on PS'.format(update.id)
raise x
| 40.551963
| 124
| 0.565351
| 2,269
| 17,559
| 4.152931
| 0.049802
| 0.104107
| 0.053486
| 0.057307
| 0.952881
| 0.935583
| 0.928367
| 0.904171
| 0.882521
| 0.882521
| 0
| 0.002787
| 0.305314
| 17,559
| 433
| 125
| 40.551963
| 0.769716
| 0.035082
| 0
| 0.889488
| 0
| 0
| 0.098715
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.123989
| false
| 0
| 0.016173
| 0.016173
| 0.19407
| 0.097035
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
eadddaf17baf95afb378437003a60cb49cff68e8
| 70
|
py
|
Python
|
web/app/tokengen.py
|
lackita/online-ratings
|
14ceda5ad89c8c388e214e04c054eaadf0055db9
|
[
"MIT"
] | 18
|
2015-04-01T21:58:27.000Z
|
2020-05-24T06:46:42.000Z
|
web/app/tokengen.py
|
lackita/online-ratings
|
14ceda5ad89c8c388e214e04c054eaadf0055db9
|
[
"MIT"
] | 63
|
2015-10-08T00:40:31.000Z
|
2020-09-12T18:35:55.000Z
|
web/app/tokengen.py
|
lackita/online-ratings
|
14ceda5ad89c8c388e214e04c054eaadf0055db9
|
[
"MIT"
] | 12
|
2015-08-16T19:46:17.000Z
|
2020-09-11T23:17:06.000Z
|
from uuid import uuid4
def generate_token():
return str(uuid4())
| 14
| 23
| 0.714286
| 10
| 70
| 4.9
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035088
| 0.185714
| 70
| 4
| 24
| 17.5
| 0.824561
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 8
|
dc6a663498842962d9688f5fdb9fb9473a386601
| 1,086
|
py
|
Python
|
orchestra/models/__init__.py
|
ksbek/orchestra
|
07556717feb57efcf8fb29a1e2e98eebe2313b8c
|
[
"Apache-2.0"
] | null | null | null |
orchestra/models/__init__.py
|
ksbek/orchestra
|
07556717feb57efcf8fb29a1e2e98eebe2313b8c
|
[
"Apache-2.0"
] | null | null | null |
orchestra/models/__init__.py
|
ksbek/orchestra
|
07556717feb57efcf8fb29a1e2e98eebe2313b8c
|
[
"Apache-2.0"
] | 1
|
2021-12-15T01:10:35.000Z
|
2021-12-15T01:10:35.000Z
|
from orchestra.models.communication.models import CommunicationPreference # noqa
from orchestra.models.communication.models import StaffBotRequest # noqa
from orchestra.models.communication.models import StaffingRequestInquiry # noqa
from orchestra.models.communication.models import StaffingResponse # noqa
from orchestra.models.core.models import Workflow # noqa
from orchestra.models.core.models import WorkflowVersion # noqa
from orchestra.models.core.models import Certification # noqa
from orchestra.models.core.models import Step # noqa
from orchestra.models.core.models import Worker # noqa
from orchestra.models.core.models import WorkerCertification # noqa
from orchestra.models.core.models import Project # noqa
from orchestra.models.core.models import Task # noqa
from orchestra.models.core.models import TaskAssignment # noqa
from orchestra.models.core.models import Iteration # noqa
from orchestra.models.core.models import TimeEntry # noqa
from orchestra.models.core.models import TaskTimer # noqa
from orchestra.models.core.models import PayRate # noqa
| 60.333333
| 81
| 0.827808
| 136
| 1,086
| 6.610294
| 0.176471
| 0.245829
| 0.359288
| 0.409344
| 0.773081
| 0.773081
| 0.724138
| 0
| 0
| 0
| 0
| 0
| 0.109576
| 1,086
| 17
| 82
| 63.882353
| 0.929679
| 0.077348
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
dc702703bfded935295e6ad726e5f06f2c1b4171
| 1,709
|
py
|
Python
|
test/regexp/python1.py
|
kylebarron/MagicPython
|
da6fa0793e2c85d3bf7709ff1d4f65ccf468db11
|
[
"MIT"
] | 1,482
|
2015-10-16T21:59:32.000Z
|
2022-03-30T11:44:40.000Z
|
test/regexp/python1.py
|
kylebarron/MagicPython
|
da6fa0793e2c85d3bf7709ff1d4f65ccf468db11
|
[
"MIT"
] | 226
|
2015-10-15T15:53:44.000Z
|
2022-03-25T03:08:27.000Z
|
test/regexp/python1.py
|
kylebarron/MagicPython
|
da6fa0793e2c85d3bf7709ff1d4f65ccf468db11
|
[
"MIT"
] | 129
|
2015-10-20T02:41:49.000Z
|
2022-03-22T01:44:36.000Z
|
a = r'[a-z]'
a = R'[a-z]'
a : source.python
: source.python
= : keyword.operator.assignment.python, source.python
: source.python
r : source.python, storage.type.string.python, string.regexp.quoted.single.python
' : punctuation.definition.string.begin.python, source.python, string.regexp.quoted.single.python
[ : constant.other.set.regexp, meta.character.set.regexp, punctuation.character.set.begin.regexp, source.python, string.regexp.quoted.single.python
a : constant.character.set.regexp, meta.character.set.regexp, source.python, string.regexp.quoted.single.python
- : constant.character.set.regexp, meta.character.set.regexp, source.python, string.regexp.quoted.single.python
z : constant.character.set.regexp, meta.character.set.regexp, source.python, string.regexp.quoted.single.python
] : constant.other.set.regexp, meta.character.set.regexp, punctuation.character.set.end.regexp, source.python, string.regexp.quoted.single.python
' : punctuation.definition.string.end.python, source.python, string.regexp.quoted.single.python
a : source.python
: source.python
= : keyword.operator.assignment.python, source.python
: source.python
R : source.python, storage.type.string.python, string.quoted.raw.single.python
' : punctuation.definition.string.begin.python, source.python, string.quoted.raw.single.python
[a-z] : source.python, string.quoted.raw.single.python
' : punctuation.definition.string.end.python, source.python, string.quoted.raw.single.python
| 65.730769
| 159
| 0.677589
| 202
| 1,709
| 5.732673
| 0.118812
| 0.207254
| 0.15544
| 0.165803
| 0.990501
| 0.983592
| 0.983592
| 0.978411
| 0.868739
| 0.811744
| 0
| 0
| 0.199532
| 1,709
| 25
| 160
| 68.36
| 0.846491
| 0
| 0
| 0.363636
| 0
| 0.318182
| 0.005851
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
dc8ff809cf2bda91eeb360880f94827d6aa10afe
| 95
|
py
|
Python
|
lang/py/cookbook/v2/source/cb2_19_16_exm_2.py
|
ch1huizong/learning
|
632267634a9fd84a5f5116de09ff1e2681a6cc85
|
[
"MIT"
] | null | null | null |
lang/py/cookbook/v2/source/cb2_19_16_exm_2.py
|
ch1huizong/learning
|
632267634a9fd84a5f5116de09ff1e2681a6cc85
|
[
"MIT"
] | null | null | null |
lang/py/cookbook/v2/source/cb2_19_16_exm_2.py
|
ch1huizong/learning
|
632267634a9fd84a5f5116de09ff1e2681a6cc85
|
[
"MIT"
] | null | null | null |
yield p + (1,)
if p and (len(p) < 2 or p[-2] > p[-1]):
yield p[:-1] + (p[-1] + 1,)
| 23.75
| 43
| 0.357895
| 20
| 95
| 1.7
| 0.4
| 0.235294
| 0.411765
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 0.336842
| 95
| 3
| 44
| 31.666667
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 1
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
dcb400b56591dc069d5565f5fc4cf3aaca4a94a2
| 4,819
|
py
|
Python
|
tests/test_desktop_files.py
|
milouse/chwall
|
963045658abd41c94e29850e9f416c8970e06c32
|
[
"WTFPL"
] | 4
|
2019-11-02T12:22:48.000Z
|
2022-01-07T11:40:40.000Z
|
tests/test_desktop_files.py
|
milouse/chwall
|
963045658abd41c94e29850e9f416c8970e06c32
|
[
"WTFPL"
] | 1
|
2022-03-29T18:44:47.000Z
|
2022-03-30T07:04:54.000Z
|
tests/test_desktop_files.py
|
milouse/chwall
|
963045658abd41c94e29850e9f416c8970e06c32
|
[
"WTFPL"
] | null | null | null |
import os
from io import StringIO
import unittest
from unittest.mock import patch
from chwall.utils import ServiceFileManager
from chwall.gui.app import generate_desktop_file
from chwall.client import ChwallClient
@patch("sys.stdout", new_callable=StringIO)
class TestDesktopFiles(unittest.TestCase):
def setUp(self):
self.maxDiff = None
def test_01_create_desktop_file(self, mock_stdout):
with open("tests/proofs/app-desktop", "r") as f:
result = f.read()
os.environ["CHWALL_FAKE_INSTALL"] = "exists"
generate_desktop_file("./locale", "print")
self.assertEqual(mock_stdout.getvalue(), result)
def test_02_create_desktop_file_from_client(self, mock_stdout):
with open("tests/proofs/app-desktop", "r") as f:
result = f.read()
os.environ["CHWALL_FAKE_INSTALL"] = "exists"
try:
ChwallClient(["desktop", "print", "./locale"])
except SystemExit:
pass
self.assertEqual(mock_stdout.getvalue(), result)
def test_03_create_systemd_service_file(self, mock_stdout):
with open("tests/proofs/systemd-unit", "r") as f:
result = f.read()
os.environ["CHWALL_FAKE_INSTALL"] = "exists"
sfm = ServiceFileManager()
sfm.systemd_service_file()
self.assertEqual(mock_stdout.getvalue(), result)
def test_04_create_systemd_service_file_from_client(self, mock_stdout):
with open("tests/proofs/systemd-unit", "r") as f:
result = f.read()
os.environ["CHWALL_FAKE_INSTALL"] = "exists"
try:
ChwallClient(["systemd"])
except SystemExit:
pass
self.assertEqual(mock_stdout.getvalue(), result)
def test_05_create_xdg_autostart_icon_file(self, mock_stdout):
with open("tests/proofs/xdg-icon", "r") as f:
result = f.read()
os.environ["CHWALL_FAKE_INSTALL"] = "exists"
sfm = ServiceFileManager()
sfm.xdg_autostart_file("icon", "TEST ICON", "TEST DESC")
self.assertEqual(mock_stdout.getvalue(), result)
def test_06_create_xdg_autostart_daemon_file(self, mock_stdout):
with open("tests/proofs/xdg-daemon", "r") as f:
result = f.read()
os.environ["CHWALL_FAKE_INSTALL"] = "exists"
sfm = ServiceFileManager()
sfm.xdg_autostart_file("daemon", "TEST DAEMON", "TEST DESC")
self.assertEqual(mock_stdout.getvalue(), result)
def test_07_create_local_desktop_file(self, mock_stdout):
with open("tests/proofs/local-app-desktop", "r") as f:
result = f.read().format(path=os.getcwd())
os.environ["CHWALL_FAKE_INSTALL"] = "absent"
generate_desktop_file("./locale", "print")
self.assertEqual(mock_stdout.getvalue(), result)
def test_08_create_local_desktop_file_from_client(self, mock_stdout):
with open("tests/proofs/local-app-desktop", "r") as f:
result = f.read().format(path=os.getcwd())
os.environ["CHWALL_FAKE_INSTALL"] = "absent"
try:
ChwallClient(["desktop", "print", "./locale"])
except SystemExit:
pass
self.assertEqual(mock_stdout.getvalue(), result)
def test_09_create_local_systemd_service_file(self, mock_stdout):
with open("tests/proofs/local-systemd-unit", "r") as f:
result = f.read().format(path=os.getcwd())
os.environ["CHWALL_FAKE_INSTALL"] = "absent"
sfm = ServiceFileManager()
sfm.systemd_service_file()
self.assertEqual(mock_stdout.getvalue(), result)
def test_10_create_local_systemd_service_file_from_client(self, mock_stdout):
with open("tests/proofs/local-systemd-unit", "r") as f:
result = f.read().format(path=os.getcwd())
os.environ["CHWALL_FAKE_INSTALL"] = "absent"
try:
ChwallClient(["systemd"])
except SystemExit:
pass
self.assertEqual(mock_stdout.getvalue(), result)
def test_11_create_local_xdg_autostart_icon_file(self, mock_stdout):
with open("tests/proofs/local-xdg-icon", "r") as f:
result = f.read().format(path=os.getcwd())
os.environ["CHWALL_FAKE_INSTALL"] = "absent"
sfm = ServiceFileManager()
sfm.xdg_autostart_file("icon", "TEST ICON", "TEST DESC")
self.assertEqual(mock_stdout.getvalue(), result)
def test_12_create_local_xdg_autostart_daemon_file(self, mock_stdout):
with open("tests/proofs/local-xdg-daemon", "r") as f:
result = f.read().format(path=os.getcwd())
os.environ["CHWALL_FAKE_INSTALL"] = "absent"
sfm = ServiceFileManager()
sfm.xdg_autostart_file("daemon", "TEST DAEMON", "TEST DESC")
self.assertEqual(mock_stdout.getvalue(), result)
| 41.188034
| 81
| 0.65055
| 593
| 4,819
| 5.048904
| 0.133221
| 0.08016
| 0.056112
| 0.072144
| 0.869405
| 0.862057
| 0.862057
| 0.862057
| 0.855377
| 0.846025
| 0
| 0.006405
| 0.222453
| 4,819
| 116
| 82
| 41.543103
| 0.792634
| 0
| 0
| 0.732673
| 1
| 0
| 0.169745
| 0.066404
| 0
| 0
| 0
| 0
| 0.118812
| 1
| 0.128713
| false
| 0.039604
| 0.069307
| 0
| 0.207921
| 0.039604
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
f4e44a8eab90de2207593381cbb5fbdd29c75770
| 3,465
|
py
|
Python
|
lib/innvestigate/tests/analyzer/test_wrapper.py
|
vwesselkamp/deepfake-fingerprint-atacks
|
0befc913b081913255399d4264f09bce0d39cbcb
|
[
"MIT"
] | null | null | null |
lib/innvestigate/tests/analyzer/test_wrapper.py
|
vwesselkamp/deepfake-fingerprint-atacks
|
0befc913b081913255399d4264f09bce0d39cbcb
|
[
"MIT"
] | null | null | null |
lib/innvestigate/tests/analyzer/test_wrapper.py
|
vwesselkamp/deepfake-fingerprint-atacks
|
0befc913b081913255399d4264f09bce0d39cbcb
|
[
"MIT"
] | null | null | null |
# Get Python six functionality:
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest
from innvestigate.analyzer import (
AugmentReduceBase,
GaussianSmoother,
Gradient,
PathIntegrator,
WrapperBase,
)
from tests import dryrun
@pytest.mark.fast
@pytest.mark.precommit
def test_fast__WrapperBase():
def method(model):
return WrapperBase(Gradient(model))
dryrun.test_analyzer(method, "trivia.*:mnist.log_reg")
@pytest.mark.precommit
def test_precommit__WrapperBase():
def method(model):
return WrapperBase(Gradient(model))
dryrun.test_analyzer(method, "mnist.*")
@pytest.mark.fast
@pytest.mark.precommit
def test_fast__SerializeWrapperBase():
def method(model):
return WrapperBase(Gradient(model))
dryrun.test_serialize_analyzer(method, "trivia.*:mnist.log_reg")
###############################################################################
###############################################################################
###############################################################################
@pytest.mark.fast
@pytest.mark.precommit
def test_fast__AugmentReduceBase():
def method(model):
return AugmentReduceBase(Gradient(model))
dryrun.test_analyzer(method, "trivia.*:mnist.log_reg")
@pytest.mark.precommit
def test_precommit__AugmentReduceBase():
def method(model):
return AugmentReduceBase(Gradient(model))
dryrun.test_analyzer(method, "mnist.*")
@pytest.mark.fast
@pytest.mark.precommit
def test_fast__SerializeAugmentReduceBase():
def method(model):
return AugmentReduceBase(Gradient(model))
dryrun.test_serialize_analyzer(method, "trivia.*:mnist.log_reg")
###############################################################################
###############################################################################
###############################################################################
@pytest.mark.fast
@pytest.mark.precommit
def test_fast__GaussianSmoother():
def method(model):
return GaussianSmoother(Gradient(model))
dryrun.test_analyzer(method, "trivia.*:mnist.log_reg")
@pytest.mark.precommit
def test_precommit__GaussianSmoother():
def method(model):
return GaussianSmoother(Gradient(model))
dryrun.test_analyzer(method, "mnist.*")
@pytest.mark.fast
@pytest.mark.precommit
def test_fast__SerializeGaussianSmoother():
def method(model):
return GaussianSmoother(Gradient(model))
dryrun.test_serialize_analyzer(method, "trivia.*:mnist.log_reg")
###############################################################################
###############################################################################
###############################################################################
@pytest.mark.fast
@pytest.mark.precommit
def test_fast__PathIntegrator():
def method(model):
return PathIntegrator(Gradient(model))
dryrun.test_analyzer(method, "trivia.*:mnist.log_reg")
@pytest.mark.precommit
def test_precommit__PathIntegrator():
def method(model):
return PathIntegrator(Gradient(model))
dryrun.test_analyzer(method, "mnist.*")
@pytest.mark.fast
@pytest.mark.precommit
def test_fast__SerializePathIntegrator():
def method(model):
return PathIntegrator(Gradient(model))
dryrun.test_serialize_analyzer(method, "trivia.*:mnist.log_reg")
| 25.858209
| 82
| 0.588456
| 309
| 3,465
| 6.381877
| 0.12945
| 0.10142
| 0.115619
| 0.133874
| 0.840771
| 0.840771
| 0.840771
| 0.840771
| 0.840771
| 0.748479
| 0
| 0
| 0.118326
| 3,465
| 133
| 83
| 26.052632
| 0.645499
| 0.008369
| 0
| 0.717949
| 0
| 0
| 0.074917
| 0.064635
| 0
| 0
| 0
| 0
| 0
| 1
| 0.307692
| false
| 0
| 0.051282
| 0.153846
| 0.512821
| 0.012821
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 10
|
f4eb9701828b6a1544c0230f64d59a62aa7c70de
| 41
|
py
|
Python
|
scripts/test_mjp.py
|
richardrl/rlkit
|
088dae169a8d5ba1430094eee66f27b2cb7c4998
|
[
"MIT"
] | null | null | null |
scripts/test_mjp.py
|
richardrl/rlkit
|
088dae169a8d5ba1430094eee66f27b2cb7c4998
|
[
"MIT"
] | null | null | null |
scripts/test_mjp.py
|
richardrl/rlkit
|
088dae169a8d5ba1430094eee66f27b2cb7c4998
|
[
"MIT"
] | null | null | null |
print("test mjp import")
import mujoco_py
| 20.5
| 24
| 0.804878
| 7
| 41
| 4.571429
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097561
| 41
| 2
| 25
| 20.5
| 0.864865
| 0
| 0
| 0
| 0
| 0
| 0.357143
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 7
|
7625ab8defe8c822a563513ac4270c5a4d7f9845
| 2,286
|
py
|
Python
|
data/operations.py
|
indiradutta/Weather-Prediction-Analysis
|
463180c608ad4a6d91a452c30db481b769281e44
|
[
"MIT"
] | 1
|
2021-11-19T18:31:36.000Z
|
2021-11-19T18:31:36.000Z
|
data/operations.py
|
indiradutta/Weather-Prediction-Analysis
|
463180c608ad4a6d91a452c30db481b769281e44
|
[
"MIT"
] | null | null | null |
data/operations.py
|
indiradutta/Weather-Prediction-Analysis
|
463180c608ad4a6d91a452c30db481b769281e44
|
[
"MIT"
] | null | null | null |
import pandas as pd
def unique(dataframe, column, new_column):
'''
column should be entered as a string mentioning from which column unique values should be extracted
'''
'''
new_column should be entered as a string mentioning the name of the column contaning the unique values
'''
new_dataframe = pd.DataFrame(dataframe[column].unique().tolist(),columns=[new_column])
return new_dataframe
def mode(dataframe, column):
'''
column should be entered as a string mentioning which column of the dataframe is to be used to group by
'''
new_dataframe = dataframe.groupby([column]).agg(lambda x:x.value_counts().index[0])
return new_dataframe
def max(dataframe, column, req_column, new_column):
'''
column should be entered as a string mentioning which column of the dataframe is to be used to group by
'''
'''
req_column should be entered as a string mentioning the name of the column whose max values are required
'''
'''
new_column should be entered as a string mentioning the name of the column contaning the max values
'''
new_dataframe = pd.DataFrame(dataframe.groupby([column])[req_column].max().tolist(), columns = [new_column])
return new_dataframe
def min(dataframe, column, req_column, new_column):
'''
column should be entered as a string mentioning which column of the dataframe is to be used to group by
'''
'''
req_column should be entered as a string mentioning the name of the column whose min values are required
'''
'''
new_column should be entered as a string mentioning the name of the column contaning the min values
'''
new_dataframe = pd.DataFrame(dataframe.groupby([column])[req_column].min().tolist(), columns = [new_column])
return new_dataframe
def mean(dataframe, column, req_column, new_column):
'''
column should be entered as a string mentioning which column of the dataframe is to be used to group by
'''
'''
req_column should be entered as a string mentioning the name of the column whose mean values are required
'''
'''
new_column should be entered as a string mentioning the name of the column contaning the mean values
'''
new_dataframe = pd.DataFrame(dataframe.groupby([column])[req_column].mean().tolist(), columns = [new_column])
return new_dataframe
| 36.870968
| 111
| 0.738408
| 344
| 2,286
| 4.813953
| 0.136628
| 0.062802
| 0.101449
| 0.152174
| 0.859903
| 0.859903
| 0.836957
| 0.812802
| 0.734903
| 0.734903
| 0
| 0.000531
| 0.176728
| 2,286
| 62
| 112
| 36.870968
| 0.879384
| 0.225284
| 0
| 0.3125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.3125
| false
| 0
| 0.0625
| 0
| 0.6875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
52060cbf4875a0df863a26150de785d1f6242b49
| 2,235
|
py
|
Python
|
exams/61a-su20-practice-mt/q3/q3.py
|
jjllzhang/CS61A
|
57b68c7c06999210d96499f6d84e4ec99085d396
|
[
"MIT"
] | 1
|
2022-01-22T11:45:01.000Z
|
2022-01-22T11:45:01.000Z
|
exams/61a-su20-practice-mt/q3/q3.py
|
jjllzhang/CS61A
|
57b68c7c06999210d96499f6d84e4ec99085d396
|
[
"MIT"
] | null | null | null |
exams/61a-su20-practice-mt/q3/q3.py
|
jjllzhang/CS61A
|
57b68c7c06999210d96499f6d84e4ec99085d396
|
[
"MIT"
] | null | null | null |
def close(n, smallest=10, d=10):
""" A sequence is near increasing if each element but the last two is smaller than all elements
following its subsequent element. That is, element i must be smaller than elements i + 2, i + 3, i + 4, etc.
Implement close, which takes a non-negative integer n and returns the largest near increasing sequence
of digits within n as an integer. The arguments smallest and d are part of the implementation; you must
determine their purpose. The only values you may use are integers and booleans (True and False) (no lists, strings, etc.).
Return the longest sequence of near-increasing digits in n.
>>> close(123)
123
>>> close(153)
153
>>> close(1523)
153
>>> close(15123)
1123
>>> close(11111111)
11
>>> close(985357)
557
>>> close(14735476)
143576
>>> close(812348567)
1234567
"""
if n == 0:
return ______
no = close(n//10, smallest, d)
if smallest > ______:
yes = ______
return ______(yes, no)
return ______
# ORIGINAL SKELETON FOLLOWS
# def close(n, smallest=10, d=10):
# """ A sequence is near increasing if each element but the last two is smaller than all elements
# following its subsequent element. That is, element i must be smaller than elements i + 2, i + 3, i + 4, etc.
# Implement close, which takes a non-negative integer n and returns the largest near increasing sequence
# of digits within n as an integer. The arguments smallest and d are part of the implementation; you must
# determine their purpose. The only values you may use are integers and booleans (True and False) (no lists, strings, etc.).
# Return the longest sequence of near-increasing digits in n.
# >>> close(123)
# 123
# >>> close(153)
# 153
# >>> close(1523)
# 153
# >>> close(15123)
# 1123
# >>> close(11111111)
# 11
# >>> close(985357)
# 557
# >>> close(14735476)
# 143576
# >>> close(812348567)
# 1234567
# """
# if n == 0:
# return ______
# no = close(n//10, smallest, d)
# if smallest > ______:
# yes = ______
# return ______(yes, no)
# return ______
| 33.358209
| 128
| 0.62953
| 303
| 2,235
| 4.445545
| 0.287129
| 0.062361
| 0.013363
| 0.025241
| 0.982925
| 0.982925
| 0.982925
| 0.982925
| 0.982925
| 0.982925
| 0
| 0.107077
| 0.272931
| 2,235
| 66
| 129
| 33.863636
| 0.721846
| 0.836242
| 0
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
52179c0a1afe3db3a62bf4145082487ad254bb25
| 94
|
py
|
Python
|
gailtf/baselines/bench/__init__.py
|
liytt85/gail-tf-pro
|
b5d9e25400b91a60ce9f8aacccaaec4c4af4e453
|
[
"MIT"
] | 201
|
2017-10-17T16:36:05.000Z
|
2022-02-18T11:15:49.000Z
|
gailtf/baselines/bench/__init__.py
|
inverse-reinforement-learning/gail-tf
|
ad92f41c26c34e8fabc536664fb11b44f25956cf
|
[
"MIT"
] | 20
|
2017-10-18T11:43:26.000Z
|
2020-07-09T03:35:14.000Z
|
gailtf/baselines/bench/__init__.py
|
inverse-reinforement-learning/gail-tf
|
ad92f41c26c34e8fabc536664fb11b44f25956cf
|
[
"MIT"
] | 60
|
2017-10-17T19:04:21.000Z
|
2021-05-29T12:39:58.000Z
|
from gailtf.baselines.bench.benchmarks import *
from gailtf.baselines.bench.monitor import *
| 23.5
| 47
| 0.819149
| 12
| 94
| 6.416667
| 0.583333
| 0.25974
| 0.493506
| 0.623377
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095745
| 94
| 3
| 48
| 31.333333
| 0.905882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
5235806b05b5f8efab89a50b3c3d74a93fb5eda3
| 271
|
py
|
Python
|
django/contrib/flatpages/tests/__init__.py
|
egenerat/gae-django
|
f12379483cf3917ed3cb46ca5ff0b94daf89fc50
|
[
"MIT"
] | 3
|
2016-07-08T23:49:32.000Z
|
2018-04-15T22:55:01.000Z
|
django/contrib/flatpages/tests/__init__.py
|
egenerat/gae-django
|
f12379483cf3917ed3cb46ca5ff0b94daf89fc50
|
[
"MIT"
] | 27
|
2017-02-05T15:57:04.000Z
|
2018-04-15T22:57:26.000Z
|
django/contrib/flatpages/tests/__init__.py
|
egenerat/gae-django
|
f12379483cf3917ed3cb46ca5ff0b94daf89fc50
|
[
"MIT"
] | null | null | null |
from django.contrib.flatpages.tests.csrf import *
from django.contrib.flatpages.tests.forms import *
from django.contrib.flatpages.tests.middleware import *
from django.contrib.flatpages.tests.templatetags import *
from django.contrib.flatpages.tests.views import *
| 45.166667
| 58
| 0.815498
| 35
| 271
| 6.314286
| 0.314286
| 0.226244
| 0.384615
| 0.588235
| 0.809955
| 0.669683
| 0
| 0
| 0
| 0
| 0
| 0
| 0.092251
| 271
| 5
| 59
| 54.2
| 0.898374
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
523947668dcafd74695795c06788f23aa721b50e
| 34,123
|
py
|
Python
|
tests/ope/test_ipw_estimators_slate.py
|
han20192019/newRL
|
53598edab284b4364d127ec5662137de3f9c1206
|
[
"Apache-2.0"
] | null | null | null |
tests/ope/test_ipw_estimators_slate.py
|
han20192019/newRL
|
53598edab284b4364d127ec5662137de3f9c1206
|
[
"Apache-2.0"
] | null | null | null |
tests/ope/test_ipw_estimators_slate.py
|
han20192019/newRL
|
53598edab284b4364d127ec5662137de3f9c1206
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import pytest
from obp.dataset import linear_behavior_policy_logit
from obp.dataset import logistic_reward_function
from obp.dataset import SyntheticSlateBanditDataset
from obp.ope import SlateIndependentIPS
from obp.ope import SlateRewardInteractionIPS
from obp.ope import SlateStandardIPS
# setting
len_list = 3
sips = SlateStandardIPS(len_list=len_list)
iips = SlateIndependentIPS(len_list=len_list)
rips = SlateRewardInteractionIPS(len_list=len_list)
n_rounds = 5
# --- invalid (all slate estimators) ---
# slate_id, reward, pscore, position, evaluation_policy_pscore, description
invalid_input_of_slate_estimators = [
(
np.repeat(np.arange(n_rounds), len_list),
np.zeros(n_rounds * len_list, dtype=int),
np.ones(n_rounds * len_list),
"4", #
np.ones(n_rounds * len_list),
"position must be 1D array",
),
(
np.repeat(np.arange(n_rounds), len_list),
np.zeros(n_rounds * len_list, dtype=int),
np.ones(n_rounds * len_list),
np.tile(np.arange(len_list), n_rounds).reshape((n_rounds, len_list)), #
np.ones(n_rounds * len_list),
"position must be 1D array",
),
(
np.repeat(np.arange(n_rounds), len_list),
np.zeros(n_rounds * len_list, dtype=int),
np.ones(n_rounds * len_list),
np.tile(np.arange(len_list), n_rounds) - 1, #
np.ones(n_rounds * len_list),
"position elements must be non-negative integers",
),
(
np.repeat(np.arange(n_rounds), len_list),
"4", #
np.ones(n_rounds * len_list),
np.tile(np.arange(len_list), n_rounds),
np.ones(n_rounds * len_list),
"reward must be 1D array",
),
(
np.repeat(np.arange(n_rounds), len_list),
np.zeros((n_rounds, len_list), dtype=int), #
np.ones(n_rounds * len_list),
np.tile(np.arange(len_list), n_rounds),
np.ones(n_rounds * len_list),
"reward must be 1D array",
),
(
"4", #
np.zeros(n_rounds * len_list, dtype=int),
np.ones(n_rounds * len_list),
np.tile(np.arange(len_list), n_rounds),
np.ones(n_rounds * len_list),
"slate_id must be 1D array",
),
(
np.repeat(np.arange(n_rounds), len_list).reshape((n_rounds, len_list)), #
np.zeros(n_rounds * len_list, dtype=int),
np.ones(n_rounds * len_list),
np.tile(np.arange(len_list), n_rounds),
np.ones(n_rounds * len_list),
"slate_id must be 1D array",
),
(
np.repeat(np.arange(n_rounds), len_list) - 1, #
np.zeros(n_rounds * len_list, dtype=int),
np.ones(n_rounds * len_list),
np.tile(np.arange(len_list), n_rounds),
np.ones(n_rounds * len_list),
"slate_id elements must be non-negative integers",
),
(
np.repeat(np.arange(n_rounds), len_list), #
np.zeros(n_rounds * len_list, dtype=int),
np.ones(n_rounds * len_list),
np.repeat(np.arange(n_rounds), len_list), #
np.ones(n_rounds * len_list),
"position must not be duplicated in each slate",
),
]
@pytest.mark.parametrize(
"slate_id, reward, pscore, position, evaluation_policy_pscore, description",
invalid_input_of_slate_estimators,
)
def test_slate_estimators_using_invalid_input_data(
slate_id, reward, pscore, position, evaluation_policy_pscore, description
) -> None:
with pytest.raises(ValueError, match=f"{description}*"):
_ = sips.estimate_policy_value(
slate_id=slate_id,
reward=reward,
pscore=pscore,
position=position,
evaluation_policy_pscore=evaluation_policy_pscore,
)
_ = sips.estimate_interval(
slate_id=slate_id,
reward=reward,
pscore=pscore,
position=position,
evaluation_policy_pscore=evaluation_policy_pscore,
)
_ = iips.estimate_policy_value(
slate_id=slate_id,
reward=reward,
pscore_item_position=pscore,
position=position,
evaluation_policy_pscore_item_position=evaluation_policy_pscore,
)
_ = iips.estimate_interval(
slate_id=slate_id,
reward=reward,
pscore_item_position=pscore,
position=position,
evaluation_policy_pscore_item_position=evaluation_policy_pscore,
)
_ = rips.estimate_policy_value(
slate_id=slate_id,
reward=reward,
pscore_cascade=pscore,
position=position,
evaluation_policy_pscore_cascade=evaluation_policy_pscore,
)
_ = rips.estimate_interval(
slate_id=slate_id,
reward=reward,
pscore_cascade=pscore,
position=position,
evaluation_policy_pscore_cascade=evaluation_policy_pscore,
)
# --- valid (all slate estimators) ---
valid_input_of_slate_estimators = [
(
np.repeat(np.arange(n_rounds), len_list),
np.zeros(n_rounds * len_list, dtype=int),
np.ones(n_rounds * len_list),
np.tile(np.arange(len_list), n_rounds),
np.ones(n_rounds * len_list),
"each slate has data of 3 (len_list) positions",
),
(
np.repeat(np.arange(n_rounds), len_list)[:-1],
np.zeros(n_rounds * len_list, dtype=int)[:-1],
np.ones(n_rounds * len_list)[:-1],
np.tile(np.arange(len_list), n_rounds)[:-1],
np.ones(n_rounds * len_list)[:-1],
"last slate has data of 2 (len_list - 1) positions",
),
]
@pytest.mark.parametrize(
"slate_id, reward, pscore, position, evaluation_policy_pscore, description",
valid_input_of_slate_estimators,
)
def test_slate_estimators_using_valid_input_data(
slate_id, reward, pscore, position, evaluation_policy_pscore, description
) -> None:
_ = sips.estimate_policy_value(
slate_id=slate_id,
reward=reward,
pscore=pscore,
position=position,
evaluation_policy_pscore=evaluation_policy_pscore,
)
_ = sips.estimate_interval(
slate_id=slate_id,
reward=reward,
pscore=pscore,
position=position,
evaluation_policy_pscore=evaluation_policy_pscore,
)
_ = iips.estimate_policy_value(
slate_id=slate_id,
reward=reward,
pscore_item_position=pscore,
position=position,
evaluation_policy_pscore_item_position=evaluation_policy_pscore,
)
_ = iips.estimate_interval(
slate_id=slate_id,
reward=reward,
pscore_item_position=pscore,
position=position,
evaluation_policy_pscore_item_position=evaluation_policy_pscore,
)
_ = rips.estimate_policy_value(
slate_id=slate_id,
reward=reward,
pscore_cascade=pscore,
position=position,
evaluation_policy_pscore_cascade=evaluation_policy_pscore,
)
_ = rips.estimate_interval(
slate_id=slate_id,
reward=reward,
pscore_cascade=pscore,
position=position,
evaluation_policy_pscore_cascade=evaluation_policy_pscore,
)
# --- invalid (sips) ---
invalid_input_of_sips = [
(
np.repeat(np.arange(n_rounds), len_list),
np.zeros(n_rounds * len_list, dtype=int),
"4", #
np.tile(np.arange(len_list), n_rounds),
np.ones(n_rounds * len_list),
"pscore must be 1D array",
),
(
np.repeat(np.arange(n_rounds), len_list),
np.zeros(n_rounds * len_list, dtype=int),
np.ones((n_rounds, len_list)), #
np.tile(np.arange(len_list), n_rounds),
np.ones(n_rounds * len_list),
"pscore must be 1D array",
),
(
np.repeat(np.arange(n_rounds), len_list),
np.zeros(n_rounds * len_list, dtype=int),
np.ones(n_rounds * len_list) + 1, #
np.tile(np.arange(len_list), n_rounds),
np.ones(n_rounds * len_list),
"pscore must be in the range of",
),
(
np.repeat(np.arange(n_rounds), len_list),
np.zeros(n_rounds * len_list, dtype=int),
np.ones(n_rounds * len_list) - 1, #
np.tile(np.arange(len_list), n_rounds),
np.ones(n_rounds * len_list),
"pscore must be in the range of",
),
(
np.repeat(np.arange(n_rounds), len_list),
np.zeros(n_rounds * len_list, dtype=int),
np.ones(n_rounds * len_list - 1), #
np.tile(np.arange(len_list), n_rounds),
np.ones(n_rounds * len_list),
"slate_id, position, reward, pscore, and evaluation_policy_pscore must have the same number of samples",
),
(
np.repeat(np.arange(n_rounds), len_list),
np.zeros(n_rounds * len_list, dtype=int),
np.hstack([np.ones(n_rounds * len_list - 1), [0.2]]), #
np.tile(np.arange(len_list), n_rounds),
np.ones(n_rounds * len_list),
"pscore must be unique in each slate",
),
(
np.repeat(np.arange(n_rounds), len_list),
np.zeros(n_rounds * len_list, dtype=int),
np.ones(n_rounds * len_list),
np.tile(np.arange(len_list), n_rounds),
"4", #
"evaluation_policy_pscore must be 1D array",
),
(
np.repeat(np.arange(n_rounds), len_list),
np.zeros(n_rounds * len_list, dtype=int),
np.ones(n_rounds * len_list),
np.tile(np.arange(len_list), n_rounds),
np.ones((n_rounds, len_list)), #
"evaluation_policy_pscore must be 1D array",
),
(
np.repeat(np.arange(n_rounds), len_list),
np.zeros(n_rounds * len_list, dtype=int),
np.ones(n_rounds * len_list),
np.tile(np.arange(len_list), n_rounds),
np.ones(n_rounds * len_list) + 1, #
"evaluation_policy_pscore must be in the range of",
),
(
np.repeat(np.arange(n_rounds), len_list),
np.zeros(n_rounds * len_list, dtype=int),
np.ones(n_rounds * len_list),
np.tile(np.arange(len_list), n_rounds),
np.ones(n_rounds * len_list) - 1.1, #
"evaluation_policy_pscore must be in the range of",
),
(
np.repeat(np.arange(n_rounds), len_list),
np.zeros(n_rounds * len_list, dtype=int),
np.ones(n_rounds * len_list),
np.tile(np.arange(len_list), n_rounds),
np.hstack([np.ones(n_rounds * len_list - 1), [0.2]]), #
"evaluation_policy_pscore must be unique in each slate",
),
]
@pytest.mark.parametrize(
"slate_id, reward, pscore, position, evaluation_policy_pscore, description",
invalid_input_of_sips,
)
def test_sips_using_invalid_input_data(
slate_id, reward, pscore, position, evaluation_policy_pscore, description
) -> None:
with pytest.raises(ValueError, match=f"{description}*"):
_ = sips.estimate_policy_value(
slate_id=slate_id,
reward=reward,
pscore=pscore,
position=position,
evaluation_policy_pscore=evaluation_policy_pscore,
)
_ = sips.estimate_interval(
slate_id=slate_id,
reward=reward,
pscore=pscore,
position=position,
evaluation_policy_pscore=evaluation_policy_pscore,
)
# --- invalid (iips) ---
invalid_input_of_iips = [
(
np.repeat(np.arange(n_rounds), len_list),
np.zeros(n_rounds * len_list, dtype=int),
"4", #
np.tile(np.arange(len_list), n_rounds),
np.ones(n_rounds * len_list),
"pscore_item_position must be 1D array",
),
(
np.repeat(np.arange(n_rounds), len_list),
np.zeros(n_rounds * len_list, dtype=int),
np.ones((n_rounds, len_list)), #
np.tile(np.arange(len_list), n_rounds),
np.ones(n_rounds * len_list),
"pscore_item_position must be 1D array",
),
(
np.repeat(np.arange(n_rounds), len_list),
np.zeros(n_rounds * len_list, dtype=int),
np.ones(n_rounds * len_list) + 1, #
np.tile(np.arange(len_list), n_rounds),
np.ones(n_rounds * len_list),
"pscore_item_position must be in the range of",
),
(
np.repeat(np.arange(n_rounds), len_list),
np.zeros(n_rounds * len_list, dtype=int),
np.ones(n_rounds * len_list) - 1, #
np.tile(np.arange(len_list), n_rounds),
np.ones(n_rounds * len_list),
"pscore_item_position must be in the range of",
),
(
np.repeat(np.arange(n_rounds), len_list),
np.zeros(n_rounds * len_list, dtype=int),
np.ones(n_rounds * len_list - 1), #
np.tile(np.arange(len_list), n_rounds),
np.ones(n_rounds * len_list),
"slate_id, position, reward, pscore_item_position, and evaluation_policy_pscore_item_position must have the same number of samples",
),
(
np.repeat(np.arange(n_rounds), len_list),
np.zeros(n_rounds * len_list, dtype=int),
np.ones(n_rounds * len_list),
np.tile(np.arange(len_list), n_rounds),
"4", #
"evaluation_policy_pscore_item_position must be 1D array",
),
(
np.repeat(np.arange(n_rounds), len_list),
np.zeros(n_rounds * len_list, dtype=int),
np.ones(n_rounds * len_list),
np.tile(np.arange(len_list), n_rounds),
np.ones((n_rounds, len_list)), #
"evaluation_policy_pscore_item_position must be 1D array",
),
(
np.repeat(np.arange(n_rounds), len_list),
np.zeros(n_rounds * len_list, dtype=int),
np.ones(n_rounds * len_list),
np.tile(np.arange(len_list), n_rounds),
np.ones(n_rounds * len_list) + 1, #
"evaluation_policy_pscore_item_position must be in the range of",
),
(
np.repeat(np.arange(n_rounds), len_list),
np.zeros(n_rounds * len_list, dtype=int),
np.ones(n_rounds * len_list),
np.tile(np.arange(len_list), n_rounds),
np.ones(n_rounds * len_list) - 1.1, #
"evaluation_policy_pscore_item_position must be in the range of",
),
]
@pytest.mark.parametrize(
"slate_id, reward, pscore_item_position, position, evaluation_policy_pscore_item_position, description",
invalid_input_of_iips,
)
def test_iips_using_invalid_input_data(
slate_id,
reward,
pscore_item_position,
position,
evaluation_policy_pscore_item_position,
description,
) -> None:
with pytest.raises(ValueError, match=f"{description}*"):
_ = iips.estimate_policy_value(
slate_id=slate_id,
reward=reward,
pscore_item_position=pscore_item_position,
position=position,
evaluation_policy_pscore_item_position=evaluation_policy_pscore_item_position,
)
_ = iips.estimate_interval(
slate_id=slate_id,
reward=reward,
pscore_item_position=pscore_item_position,
position=position,
evaluation_policy_pscore_item_position=evaluation_policy_pscore_item_position,
)
# --- invalid (rips) ---
invalid_input_of_rips = [
(
np.repeat(np.arange(n_rounds), len_list),
np.zeros(n_rounds * len_list, dtype=int),
"4", #
np.tile(np.arange(len_list), n_rounds),
np.ones(n_rounds * len_list),
"pscore_cascade must be 1D array",
),
(
np.repeat(np.arange(n_rounds), len_list),
np.zeros(n_rounds * len_list, dtype=int),
np.ones((n_rounds, len_list)), #
np.tile(np.arange(len_list), n_rounds),
np.ones(n_rounds * len_list),
"pscore_cascade must be 1D array",
),
(
np.repeat(np.arange(n_rounds), len_list),
np.zeros(n_rounds * len_list, dtype=int),
np.ones(n_rounds * len_list) + 1, #
np.tile(np.arange(len_list), n_rounds),
np.ones(n_rounds * len_list),
"pscore_cascade must be in the range of",
),
(
np.repeat(np.arange(n_rounds), len_list),
np.zeros(n_rounds * len_list, dtype=int),
np.ones(n_rounds * len_list) - 1, #
np.tile(np.arange(len_list), n_rounds),
np.ones(n_rounds * len_list),
"pscore_cascade must be in the range of",
),
(
np.repeat(np.arange(n_rounds), len_list),
np.zeros(n_rounds * len_list, dtype=int),
np.ones(n_rounds * len_list - 1), #
np.tile(np.arange(len_list), n_rounds),
np.ones(n_rounds * len_list),
"slate_id, position, reward, pscore_cascade, and evaluation_policy_pscore_cascade must have the same number of samples",
),
(
np.repeat(np.arange(n_rounds), len_list),
np.zeros(n_rounds * len_list, dtype=int),
np.hstack([[0.2], np.ones(n_rounds * len_list - 1)]), #
np.tile(np.arange(len_list), n_rounds),
np.ones(n_rounds * len_list),
"pscore_cascade must be non-increasing sequence in each slate",
),
(
np.repeat(np.arange(n_rounds), len_list),
np.zeros(n_rounds * len_list, dtype=int),
np.ones(n_rounds * len_list),
np.tile(np.arange(len_list), n_rounds),
"4", #
"evaluation_policy_pscore_cascade must be 1D array",
),
(
np.repeat(np.arange(n_rounds), len_list),
np.zeros(n_rounds * len_list, dtype=int),
np.ones(n_rounds * len_list),
np.tile(np.arange(len_list), n_rounds),
np.ones((n_rounds, len_list)), #
"evaluation_policy_pscore_cascade must be 1D array",
),
(
np.repeat(np.arange(n_rounds), len_list),
np.zeros(n_rounds * len_list, dtype=int),
np.ones(n_rounds * len_list),
np.tile(np.arange(len_list), n_rounds),
np.ones(n_rounds * len_list) + 1, #
"evaluation_policy_pscore_cascade must be in the range of",
),
(
np.repeat(np.arange(n_rounds), len_list),
np.zeros(n_rounds * len_list, dtype=int),
np.ones(n_rounds * len_list),
np.tile(np.arange(len_list), n_rounds),
np.ones(n_rounds * len_list) - 1.1, #
"evaluation_policy_pscore_cascade must be in the range of",
),
(
np.repeat(np.arange(n_rounds), len_list),
np.zeros(n_rounds * len_list, dtype=int),
np.ones(n_rounds * len_list),
np.tile(np.arange(len_list), n_rounds),
np.hstack([[0.2], np.ones(n_rounds * len_list - 1)]), #
"evaluation_policy_pscore_cascade must be non-increasing sequence in each slate",
),
]
@pytest.mark.parametrize(
"slate_id, reward, pscore_cascade, position, evaluation_policy_pscore_cascade, description",
invalid_input_of_rips,
)
def test_rips_using_invalid_input_data(
slate_id,
reward,
pscore_cascade,
position,
evaluation_policy_pscore_cascade,
description,
) -> None:
with pytest.raises(ValueError, match=f"{description}*"):
_ = rips.estimate_policy_value(
slate_id=slate_id,
reward=reward,
pscore_cascade=pscore_cascade,
position=position,
evaluation_policy_pscore_cascade=evaluation_policy_pscore_cascade,
)
_ = rips.estimate_interval(
slate_id=slate_id,
reward=reward,
pscore_cascade=pscore_cascade,
position=position,
evaluation_policy_pscore_cascade=evaluation_policy_pscore_cascade,
)
# --- confidence intervals ---
# alpha, n_bootstrap_samples, random_state, err, description
invalid_input_of_estimate_intervals = [
(
0.05,
100,
"s",
ValueError,
"'s' cannot be used to seed a numpy.random.RandomState instance",
),
(0.05, -1, 1, ValueError, "`n_bootstrap_samples`= -1, must be >= 1"),
(
0.05,
"s",
1,
TypeError,
"`n_bootstrap_samples` must be an instance of <class 'int'>, not <class 'str'>",
),
(-1.0, 1, 1, ValueError, "`alpha`= -1.0, must be >= 0.0"),
(2.0, 1, 1, ValueError, "`alpha`= 2.0, must be <= 1.0"),
(
"0",
1,
1,
TypeError,
"`alpha` must be an instance of <class 'float'>, not <class 'str'>",
),
]
valid_input_of_estimate_intervals = [
(0.05, 100, 1, "random_state is 1"),
(0.05, 1, 1, "n_bootstrap_samples is 1"),
]
@pytest.mark.parametrize(
"slate_id, reward, pscore, position, evaluation_policy_pscore, description_1",
valid_input_of_slate_estimators,
)
@pytest.mark.parametrize(
"alpha, n_bootstrap_samples, random_state, err, description_2",
invalid_input_of_estimate_intervals,
)
def test_estimate_intervals_of_all_estimators_using_invalid_input_data(
slate_id,
reward,
pscore,
position,
evaluation_policy_pscore,
description_1,
alpha,
n_bootstrap_samples,
random_state,
err,
description_2,
) -> None:
with pytest.raises(err, match=f"{description_2}*"):
_ = sips.estimate_interval(
slate_id=slate_id,
reward=reward,
pscore=pscore,
position=position,
evaluation_policy_pscore=evaluation_policy_pscore,
alpha=alpha,
n_bootstrap_samples=n_bootstrap_samples,
random_state=random_state,
)
_ = iips.estimate_interval(
slate_id=slate_id,
reward=reward,
pscore_item_position=pscore,
position=position,
evaluation_policy_pscore_item_position=evaluation_policy_pscore,
alpha=alpha,
n_bootstrap_samples=n_bootstrap_samples,
random_state=random_state,
)
_ = rips.estimate_interval(
slate_id=slate_id,
reward=reward,
pscore_cascade=pscore,
position=position,
evaluation_policy_pscore_cascade=evaluation_policy_pscore,
alpha=alpha,
n_bootstrap_samples=n_bootstrap_samples,
random_state=random_state,
)
@pytest.mark.parametrize(
"slate_id, reward, pscore, position, evaluation_policy_pscore, description_1",
valid_input_of_slate_estimators,
)
@pytest.mark.parametrize(
"alpha, n_bootstrap_samples, random_state, description_2",
valid_input_of_estimate_intervals,
)
def test_estimate_intervals_of_all_estimators_using_valid_input_data(
slate_id,
reward,
pscore,
position,
evaluation_policy_pscore,
description_1,
alpha,
n_bootstrap_samples,
random_state,
description_2,
) -> None:
_ = sips.estimate_interval(
slate_id=slate_id,
reward=reward,
pscore=pscore,
position=position,
evaluation_policy_pscore=evaluation_policy_pscore,
alpha=alpha,
n_bootstrap_samples=n_bootstrap_samples,
random_state=random_state,
)
_ = iips.estimate_interval(
slate_id=slate_id,
reward=reward,
pscore_item_position=pscore,
position=position,
evaluation_policy_pscore_item_position=evaluation_policy_pscore,
alpha=alpha,
n_bootstrap_samples=n_bootstrap_samples,
random_state=random_state,
)
_ = rips.estimate_interval(
slate_id=slate_id,
reward=reward,
pscore_cascade=pscore,
position=position,
evaluation_policy_pscore_cascade=evaluation_policy_pscore,
alpha=alpha,
n_bootstrap_samples=n_bootstrap_samples,
random_state=random_state,
)
def test_slate_ope_performance_using_cascade_additive_log():
# set parameters
n_unique_action = 10
len_list = 3
dim_context = 2
reward_type = "binary"
random_state = 12345
n_rounds = 1000
reward_structure = "cascade_additive"
click_model = None
behavior_policy_function = linear_behavior_policy_logit
reward_function = logistic_reward_function
dataset = SyntheticSlateBanditDataset(
n_unique_action=n_unique_action,
len_list=len_list,
dim_context=dim_context,
reward_type=reward_type,
reward_structure=reward_structure,
click_model=click_model,
random_state=random_state,
behavior_policy_function=behavior_policy_function,
base_reward_function=reward_function,
)
random_behavior_dataset = SyntheticSlateBanditDataset(
n_unique_action=n_unique_action,
len_list=len_list,
dim_context=dim_context,
reward_type=reward_type,
reward_structure=reward_structure,
click_model=click_model,
random_state=random_state,
behavior_policy_function=None,
base_reward_function=reward_function,
)
# obtain feedback
bandit_feedback = dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds)
slate_id = bandit_feedback["slate_id"]
reward = bandit_feedback["reward"]
pscore = bandit_feedback["pscore"]
pscore_item_position = bandit_feedback["pscore_item_position"]
pscore_cascade = bandit_feedback["pscore_cascade"]
position = bandit_feedback["position"]
# obtain random behavior feedback
random_behavior_feedback = random_behavior_dataset.obtain_batch_bandit_feedback(
n_rounds=n_rounds
)
sips_estimated_policy_value = sips.estimate_policy_value(
slate_id=slate_id,
reward=reward,
pscore=pscore,
position=position,
evaluation_policy_pscore=random_behavior_feedback["pscore"],
)
iips_estimated_policy_value = iips.estimate_policy_value(
slate_id=slate_id,
reward=reward,
pscore_item_position=pscore_item_position,
position=position,
evaluation_policy_pscore_item_position=random_behavior_feedback[
"pscore_item_position"
],
)
rips_estimated_policy_value = rips.estimate_policy_value(
slate_id=slate_id,
reward=reward,
pscore_cascade=pscore_cascade,
position=position,
evaluation_policy_pscore_cascade=random_behavior_feedback["pscore_cascade"],
)
# compute statistics of ground truth policy value
q_pi_e = (
random_behavior_feedback["reward"]
.reshape((n_rounds, dataset.len_list))
.sum(axis=1)
)
gt_mean = q_pi_e.mean()
gt_std = q_pi_e.std(ddof=1)
print("Cascade additive")
# check the performance of OPE
ci_bound = gt_std * 3 / np.sqrt(q_pi_e.shape[0])
print(f"gt_mean: {gt_mean}, 3 * gt_std / sqrt(n): {ci_bound}")
estimated_policy_value = {
"sips": sips_estimated_policy_value,
"iips": iips_estimated_policy_value,
"rips": rips_estimated_policy_value,
}
for key in estimated_policy_value:
print(
f"estimated_value: {estimated_policy_value[key]} ------ estimator: {key}, "
)
# test the performance of each estimator
assert (
np.abs(gt_mean - estimated_policy_value[key]) <= ci_bound
), f"OPE of {key} did not work well (absolute error is greater than 3*sigma)"
def test_slate_ope_performance_using_independent_log():
# set parameters
n_unique_action = 10
len_list = 3
dim_context = 2
reward_type = "binary"
random_state = 12345
n_rounds = 1000
reward_structure = "independent"
click_model = None
behavior_policy_function = linear_behavior_policy_logit
reward_function = logistic_reward_function
dataset = SyntheticSlateBanditDataset(
n_unique_action=n_unique_action,
len_list=len_list,
dim_context=dim_context,
reward_type=reward_type,
reward_structure=reward_structure,
click_model=click_model,
random_state=random_state,
behavior_policy_function=behavior_policy_function,
base_reward_function=reward_function,
)
random_behavior_dataset = SyntheticSlateBanditDataset(
n_unique_action=n_unique_action,
len_list=len_list,
dim_context=dim_context,
reward_type=reward_type,
reward_structure=reward_structure,
click_model=click_model,
random_state=random_state,
behavior_policy_function=None,
base_reward_function=reward_function,
)
# obtain feedback
bandit_feedback = dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds)
slate_id = bandit_feedback["slate_id"]
reward = bandit_feedback["reward"]
pscore = bandit_feedback["pscore"]
pscore_item_position = bandit_feedback["pscore_item_position"]
pscore_cascade = bandit_feedback["pscore_cascade"]
position = bandit_feedback["position"]
# obtain random behavior feedback
random_behavior_feedback = random_behavior_dataset.obtain_batch_bandit_feedback(
n_rounds=n_rounds
)
sips_estimated_policy_value = sips.estimate_policy_value(
slate_id=slate_id,
reward=reward,
pscore=pscore,
position=position,
evaluation_policy_pscore=random_behavior_feedback["pscore"],
)
iips_estimated_policy_value = iips.estimate_policy_value(
slate_id=slate_id,
reward=reward,
pscore_item_position=pscore_item_position,
position=position,
evaluation_policy_pscore_item_position=random_behavior_feedback[
"pscore_item_position"
],
)
rips_estimated_policy_value = rips.estimate_policy_value(
slate_id=slate_id,
reward=reward,
pscore_cascade=pscore_cascade,
position=position,
evaluation_policy_pscore_cascade=random_behavior_feedback["pscore_cascade"],
)
# compute statistics of ground truth policy value
q_pi_e = (
random_behavior_feedback["reward"]
.reshape((n_rounds, dataset.len_list))
.sum(axis=1)
)
gt_mean = q_pi_e.mean()
gt_std = q_pi_e.std(ddof=1)
print("Independent")
# check the performance of OPE
ci_bound = gt_std * 3 / np.sqrt(q_pi_e.shape[0])
print(f"gt_mean: {gt_mean}, 3 * gt_std / sqrt(n): {ci_bound}")
estimated_policy_value = {
"sips": sips_estimated_policy_value,
"iips": iips_estimated_policy_value,
"rips": rips_estimated_policy_value,
}
for key in estimated_policy_value:
print(
f"estimated_value: {estimated_policy_value[key]} ------ estimator: {key}, "
)
# test the performance of each estimator
assert (
np.abs(gt_mean - estimated_policy_value[key]) <= ci_bound
), f"OPE of {key} did not work well (absolute error is greater than 3*sigma)"
def test_slate_ope_performance_using_standard_additive_log():
# set parameters
n_unique_action = 10
len_list = 3
dim_context = 2
reward_type = "binary"
random_state = 12345
n_rounds = 1000
reward_structure = "standard_additive"
click_model = None
behavior_policy_function = linear_behavior_policy_logit
reward_function = logistic_reward_function
dataset = SyntheticSlateBanditDataset(
n_unique_action=n_unique_action,
len_list=len_list,
dim_context=dim_context,
reward_type=reward_type,
reward_structure=reward_structure,
click_model=click_model,
random_state=random_state,
behavior_policy_function=behavior_policy_function,
base_reward_function=reward_function,
)
random_behavior_dataset = SyntheticSlateBanditDataset(
n_unique_action=n_unique_action,
len_list=len_list,
dim_context=dim_context,
reward_type=reward_type,
reward_structure=reward_structure,
click_model=click_model,
random_state=random_state,
behavior_policy_function=None,
base_reward_function=reward_function,
)
# obtain feedback
bandit_feedback = dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds)
slate_id = bandit_feedback["slate_id"]
reward = bandit_feedback["reward"]
pscore = bandit_feedback["pscore"]
pscore_item_position = bandit_feedback["pscore_item_position"]
pscore_cascade = bandit_feedback["pscore_cascade"]
position = bandit_feedback["position"]
# obtain random behavior feedback
random_behavior_feedback = random_behavior_dataset.obtain_batch_bandit_feedback(
n_rounds=n_rounds
)
sips_estimated_policy_value = sips.estimate_policy_value(
slate_id=slate_id,
reward=reward,
pscore=pscore,
position=position,
evaluation_policy_pscore=random_behavior_feedback["pscore"],
)
iips_estimated_policy_value = iips.estimate_policy_value(
slate_id=slate_id,
reward=reward,
pscore_item_position=pscore_item_position,
position=position,
evaluation_policy_pscore_item_position=random_behavior_feedback[
"pscore_item_position"
],
)
rips_estimated_policy_value = rips.estimate_policy_value(
slate_id=slate_id,
reward=reward,
pscore_cascade=pscore_cascade,
position=position,
evaluation_policy_pscore_cascade=random_behavior_feedback["pscore_cascade"],
)
# compute statistics of ground truth policy value
q_pi_e = (
random_behavior_feedback["reward"]
.reshape((n_rounds, dataset.len_list))
.sum(axis=1)
)
gt_mean = q_pi_e.mean()
gt_std = q_pi_e.std(ddof=1)
print("Standard additive")
# check the performance of OPE
ci_bound = gt_std * 3 / np.sqrt(q_pi_e.shape[0])
print(f"gt_mean: {gt_mean}, 3 * gt_std / sqrt(n): {ci_bound}")
estimated_policy_value = {
"sips": sips_estimated_policy_value,
"iips": iips_estimated_policy_value,
"rips": rips_estimated_policy_value,
}
for key in estimated_policy_value:
print(
f"estimated_value: {estimated_policy_value[key]} ------ estimator: {key}, "
)
# test the performance of each estimator
assert (
np.abs(gt_mean - estimated_policy_value[key]) <= ci_bound
), f"OPE of {key} did not work well (absolute error is greater than 3*sigma)"
| 34.05489
| 140
| 0.645811
| 4,311
| 34,123
| 4.755973
| 0.040826
| 0.078525
| 0.079501
| 0.111301
| 0.944691
| 0.939082
| 0.932985
| 0.927474
| 0.921865
| 0.912891
| 0
| 0.007257
| 0.252879
| 34,123
| 1,001
| 141
| 34.088911
| 0.796972
| 0.024939
| 0
| 0.791398
| 0
| 0
| 0.124386
| 0.027678
| 0
| 0
| 0
| 0
| 0.003226
| 1
| 0.010753
| false
| 0
| 0.008602
| 0
| 0.019355
| 0.009677
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
526aababcea0ce6cf7980a6052f5bc834a6c14a7
| 19,762
|
py
|
Python
|
tqsdk/test/api/test_td_trade.py
|
Asnebula/tqsdk-python
|
53d5b3d56653021a5896311dfb16a1305a3e2267
|
[
"Apache-2.0"
] | 2
|
2020-01-23T15:08:02.000Z
|
2020-07-30T04:05:30.000Z
|
tqsdk/test/api/test_td_trade.py
|
Asnebula/tqsdk-python
|
53d5b3d56653021a5896311dfb16a1305a3e2267
|
[
"Apache-2.0"
] | 7
|
2019-11-08T05:02:32.000Z
|
2021-01-29T04:01:21.000Z
|
tqsdk/test/api/test_td_trade.py
|
Asnebula/tqsdk-python
|
53d5b3d56653021a5896311dfb16a1305a3e2267
|
[
"Apache-2.0"
] | null | null | null |
#!usr/bin/env python3
#-*- coding:utf-8 -*-
"""
@author: yanqiong
@file: test_td_trade.py
@create_on: 2020/6/12
@description:
"""
import os
import random
import unittest
from tqsdk import TqApi, TqAccount, utils
from tqsdk.test.api.helper import MockInsServer, MockServer
class TestTdTrade(unittest.TestCase):
"""
实盘账户下,insert_order 各种情况测试
"""
def setUp(self):
self.ins = MockInsServer(5000)
self.mock = MockServer(td_url_character="q7.htfutures.com")
self.ins_url_2020_06_16 = "http://127.0.0.1:5000/t/md/symbols/2020-06-16.json"
self.md_url = "ws://127.0.0.1:5100/"
self.td_url = "ws://127.0.0.1:5200/"
def tearDown(self):
self.ins.close()
self.mock.close()
def test_insert_order_shfe_anyprice(self):
dir_path = os.path.dirname(os.path.realpath(__file__))
self.mock.run(os.path.join(dir_path, "log_file", "test_insert_order_shfe_anyprice.script"))
# 测试
account = TqAccount("H海通期货", "83011119", "********")
utils.RD = random.Random(4)
# 测试
with self.assertRaises(Exception):
with TqApi(account=account, _ins_url=self.ins_url_2020_06_16, _md_url=self.md_url, _td_url=self.td_url,
debug=False) as api:
order1 = api.insert_order("SHFE.au2012", "BUY", "OPEN", 1)
def test_insert_order_shfe_limit_fok(self):
dir_path = os.path.dirname(os.path.realpath(__file__))
self.mock.run(os.path.join(dir_path, "log_file", "test_insert_order_shfe_limit_fok.script"))
# 测试
account = TqAccount("H海通期货", "83011119", "********")
utils.RD = random.Random(4)
with TqApi(account=account, _ins_url=self.ins_url_2020_06_16, _md_url=self.md_url, _td_url=self.td_url, debug=False) as api:
order1 = api.insert_order("SHFE.rb2010", "BUY", "OPEN", 2, limit_price=3500, advanced="FOK", order_id="PYSDK_insert_SHFE_limit_FOK")
while True:
api.wait_update()
if order1.status == "FINISHED":
break
self.assertEqual("PYSDK_insert_SHFE_limit_FOK", order1.order_id)
self.assertEqual(" 25169789", order1.exchange_order_id)
self.assertEqual("SHFE", order1.exchange_id)
self.assertEqual("rb2010", order1.instrument_id)
self.assertEqual("BUY", order1.direction)
self.assertEqual("OPEN", order1.offset)
self.assertEqual(2, order1.volume_orign)
self.assertEqual(2, order1.volume_left)
self.assertEqual(3500.0, order1.limit_price)
self.assertEqual(1593585599000000000, order1.insert_date_time)
self.assertEqual("FINISHED", order1.status)
self.assertEqual("LIMIT", order1.price_type)
self.assertEqual("ALL", order1.volume_condition)
self.assertEqual("IOC", order1.time_condition)
self.assertEqual("已撤单报单已提交", order1.last_msg)
self.assertEqual("{'order_id': 'PYSDK_insert_SHFE_limit_FOK', 'exchange_order_id': ' 25169789', 'exchange_id': 'SHFE', 'instrument_id': 'rb2010', 'direction': 'BUY', 'offset': 'OPEN', 'volume_orign': 2, 'volume_left': 2, 'limit_price': 3500.0, 'price_type': 'LIMIT', 'volume_condition': 'ALL', 'time_condition': 'IOC', 'insert_date_time': 1593585599000000000, 'last_msg': '已撤单报单已提交', 'status': 'FINISHED', 'seqno': 19, 'user_id': '83011119'}",
str(order1))
def test_insert_order_shfe_limit_fak(self):
dir_path = os.path.dirname(os.path.realpath(__file__))
self.mock.run(os.path.join(dir_path, "log_file", "test_insert_order_shfe_limit_fak.script"))
# 测试
account = TqAccount("H海通期货", "83011119", "********")
utils.RD = random.Random(4)
with TqApi(account=account, _ins_url=self.ins_url_2020_06_16, _md_url=self.md_url, _td_url=self.td_url, debug=False) as api:
order1 = api.insert_order("SHFE.rb2010", "BUY", "OPEN", 2, limit_price=3500, advanced="FAK", order_id="PYSDK_insert_SHFE_limit_FAK")
while True:
api.wait_update()
if order1.status == "FINISHED":
break
self.assertEqual("PYSDK_insert_SHFE_limit_FAK", order1.order_id)
self.assertEqual(" 25308102", order1.exchange_order_id)
self.assertEqual("SHFE", order1.exchange_id)
self.assertEqual("rb2010", order1.instrument_id)
self.assertEqual("BUY", order1.direction)
self.assertEqual("OPEN", order1.offset)
self.assertEqual(2, order1.volume_orign)
self.assertEqual(2, order1.volume_left)
self.assertEqual(3500.0, order1.limit_price)
self.assertEqual(1593585743000000000, order1.insert_date_time)
self.assertEqual("FINISHED", order1.status)
self.assertEqual("LIMIT", order1.price_type)
self.assertEqual("ANY", order1.volume_condition)
self.assertEqual("IOC", order1.time_condition)
self.assertEqual("已撤单报单已提交", order1.last_msg)
self.assertEqual("{'order_id': 'PYSDK_insert_SHFE_limit_FAK', 'exchange_order_id': ' 25308102', 'exchange_id': 'SHFE', 'instrument_id': 'rb2010', 'direction': 'BUY', 'offset': 'OPEN', 'volume_orign': 2, 'volume_left': 2, 'limit_price': 3500.0, 'price_type': 'LIMIT', 'volume_condition': 'ANY', 'time_condition': 'IOC', 'insert_date_time': 1593585743000000000, 'last_msg': '已撤单报单已提交', 'status': 'FINISHED', 'seqno': 21, 'user_id': '83011119'}",
str(order1))
def test_insert_order_dec_best(self):
dir_path = os.path.dirname(os.path.realpath(__file__))
self.mock.run(os.path.join(dir_path, "log_file", "test_insert_order_dec_best.script"))
# 测试
account = TqAccount("H海通期货", "83011119", "********")
utils.RD = random.Random(4)
# 测试
with self.assertRaises(Exception):
with TqApi(account=account, _ins_url=self.ins_url_2020_06_16, _md_url=self.md_url, _td_url=self.td_url,
debug=False) as api:
order1 = api.insert_order("DCE.m2009", "BUY", "OPEN", 1, limit_price="BEST", order_id="PYSDK_insert_DCE_BEST")
def test_insert_order_dec_fivelevel(self):
dir_path = os.path.dirname(os.path.realpath(__file__))
self.mock.run(os.path.join(dir_path, "log_file", "test_insert_order_dec_fivelevel.script"))
# 测试
account = TqAccount("H海通期货", "83011119", "********")
utils.RD = random.Random(4)
# 测试
with self.assertRaises(Exception):
with TqApi(account=account, _ins_url=self.ins_url_2020_06_16, _md_url=self.md_url,
_td_url=self.td_url,
debug=False) as api:
order1 = api.insert_order("DCE.m2009", "BUY", "OPEN", 1, limit_price="FIVELEVEL",
order_id="PYSDK_insert_DCE_FIVELEVEL")
def test_insert_order_dce_anyprice(self):
dir_path = os.path.dirname(os.path.realpath(__file__))
self.mock.run(os.path.join(dir_path, "log_file", "test_insert_order_dce_anyprice.script"))
# 测试
account = TqAccount("H海通期货", "83011119", "********")
utils.RD = random.Random(4)
with TqApi(account=account, _ins_url=self.ins_url_2020_06_16, _md_url=self.md_url, _td_url=self.td_url,
debug=False) as api:
order1 = api.insert_order("DCE.m2009", "BUY", "OPEN", 1, order_id="PYSDK_insert_DCE_any")
while True:
api.wait_update()
if order1.status == "FINISHED":
break
self.assertEqual("PYSDK_insert_DCE_any", order1.order_id)
self.assertEqual(" 15350014", order1.exchange_order_id)
self.assertEqual("DCE", order1.exchange_id)
self.assertEqual("m2009", order1.instrument_id)
self.assertEqual("BUY", order1.direction)
self.assertEqual("OPEN", order1.offset)
self.assertEqual(1, order1.volume_orign)
self.assertEqual(0, order1.volume_left)
self.assertEqual(0.0, order1.limit_price)
self.assertEqual(1593586583000000000, order1.insert_date_time)
self.assertEqual("FINISHED", order1.status)
self.assertEqual("ANY", order1.price_type)
self.assertEqual("ANY", order1.volume_condition)
self.assertEqual("IOC", order1.time_condition)
self.assertEqual("全部成交", order1.last_msg)
self.assertEqual(
"{'order_id': 'PYSDK_insert_DCE_any', 'exchange_order_id': ' 15350014', 'exchange_id': 'DCE', 'instrument_id': 'm2009', 'direction': 'BUY', 'offset': 'OPEN', 'volume_orign': 1, 'volume_left': 0, 'limit_price': 0.0, 'price_type': 'ANY', 'volume_condition': 'ANY', 'time_condition': 'IOC', 'insert_date_time': 1593586583000000000, 'last_msg': '全部成交', 'status': 'FINISHED', 'seqno': 38, 'user_id': '83011119'}",
str(order1))
def test_insert_order_dce_anyprice_fok(self):
dir_path = os.path.dirname(os.path.realpath(__file__))
self.mock.run(os.path.join(dir_path, "log_file", "test_insert_order_dce_anyprice_fok.script"))
# 测试
account = TqAccount("H海通期货", "83011119", "********")
utils.RD = random.Random(4)
with TqApi(account=account, _ins_url=self.ins_url_2020_06_16, _md_url=self.md_url, _td_url=self.td_url,
debug=False) as api:
order1 = api.insert_order("DCE.m2009", "BUY", "CLOSE", 2, advanced="FOK", order_id="PYSDK_insert_DCE_any_FOK")
while True:
api.wait_update()
if order1.status == "FINISHED":
break
self.assertEqual("PYSDK_insert_DCE_any_FOK", order1.order_id)
self.assertEqual(" 13681949", order1.exchange_order_id)
self.assertEqual("DCE", order1.exchange_id)
self.assertEqual("m2009", order1.instrument_id)
self.assertEqual("BUY", order1.direction)
self.assertEqual("CLOSE", order1.offset)
self.assertEqual(2, order1.volume_orign)
self.assertEqual(0, order1.volume_left)
self.assertEqual(0.0, order1.limit_price)
self.assertEqual(1593657995000000000, order1.insert_date_time)
self.assertEqual("FINISHED", order1.status)
self.assertEqual("ANY", order1.price_type)
self.assertEqual("ALL", order1.volume_condition)
self.assertEqual("IOC", order1.time_condition)
self.assertEqual("全部成交", order1.last_msg)
self.assertEqual(
"{'order_id': 'PYSDK_insert_DCE_any_FOK', 'exchange_order_id': ' 13681949', 'exchange_id': 'DCE', 'instrument_id': 'm2009', 'direction': 'BUY', 'offset': 'CLOSE', 'volume_orign': 2, 'volume_left': 0, 'limit_price': 0.0, 'price_type': 'ANY', 'volume_condition': 'ALL', 'time_condition': 'IOC', 'insert_date_time': 1593657995000000000, 'last_msg': '全部成交', 'status': 'FINISHED', 'seqno': 6, 'user_id': '83011119'}",
str(order1))
def test_insert_order_dce_limit_fak(self):
dir_path = os.path.dirname(os.path.realpath(__file__))
self.mock.run(os.path.join(dir_path, "log_file", "test_insert_order_dce_limit_fak.script"))
# 测试
account = TqAccount("H海通期货", "83011119", "********")
utils.RD = random.Random(4)
with TqApi(account=account, _ins_url=self.ins_url_2020_06_16, _md_url=self.md_url, _td_url=self.td_url,
debug=False) as api:
order1 = api.insert_order("DCE.m2009", "BUY", "OPEN", 2, limit_price=2800, advanced="FAK", order_id="PYSDK_insert_DCE_limit_FAK")
while True:
api.wait_update()
if order1.status == "FINISHED":
break
self.assertEqual("PYSDK_insert_DCE_limit_FAK", order1.order_id)
self.assertEqual(" 15189608", order1.exchange_order_id)
self.assertEqual("DCE", order1.exchange_id)
self.assertEqual("m2009", order1.instrument_id)
self.assertEqual("BUY", order1.direction)
self.assertEqual("OPEN", order1.offset)
self.assertEqual(2, order1.volume_orign)
self.assertEqual(2, order1.volume_left)
self.assertEqual(2800.0, order1.limit_price)
self.assertEqual(1593585989000000000, order1.insert_date_time)
self.assertEqual("FINISHED", order1.status)
self.assertEqual("LIMIT", order1.price_type)
self.assertEqual("ANY", order1.volume_condition)
self.assertEqual("IOC", order1.time_condition)
self.assertEqual("已撤单", order1.last_msg)
self.assertEqual(
"{'order_id': 'PYSDK_insert_DCE_limit_FAK', 'exchange_order_id': ' 15189608', 'exchange_id': 'DCE', 'instrument_id': 'm2009', 'direction': 'BUY', 'offset': 'OPEN', 'volume_orign': 2, 'volume_left': 2, 'limit_price': 2800.0, 'price_type': 'LIMIT', 'volume_condition': 'ANY', 'time_condition': 'IOC', 'insert_date_time': 1593585989000000000, 'last_msg': '已撤单', 'status': 'FINISHED', 'seqno': 24, 'user_id': '83011119'}",
str(order1))
def test_insert_order_dce_limit_fok(self):
dir_path = os.path.dirname(os.path.realpath(__file__))
self.mock.run(os.path.join(dir_path, "log_file", "test_insert_order_dce_limit_fok.script"))
# 测试
account = TqAccount("H海通期货", "83011119", "********")
utils.RD = random.Random(4)
with TqApi(account=account, _ins_url=self.ins_url_2020_06_16, _md_url=self.md_url, _td_url=self.td_url,
debug=False) as api:
order1 = api.insert_order("DCE.m2009", "BUY", "OPEN", 2, limit_price=2800, advanced="FOK", order_id="PYSDK_insert_DCE_limit_FOK")
while True:
api.wait_update()
if order1.status == "FINISHED":
break
self.assertEqual("PYSDK_insert_DCE_limit_FOK", order1.order_id)
self.assertEqual(" 15236982", order1.exchange_order_id)
self.assertEqual("DCE", order1.exchange_id)
self.assertEqual("m2009", order1.instrument_id)
self.assertEqual("BUY", order1.direction)
self.assertEqual("OPEN", order1.offset)
self.assertEqual(2, order1.volume_orign)
self.assertEqual(2, order1.volume_left)
self.assertEqual(2800.0, order1.limit_price)
self.assertEqual(1593586120000000000, order1.insert_date_time)
self.assertEqual("FINISHED", order1.status)
self.assertEqual("LIMIT", order1.price_type)
self.assertEqual("ALL", order1.volume_condition)
self.assertEqual("IOC", order1.time_condition)
self.assertEqual("已撤单", order1.last_msg)
self.assertEqual(
"{'order_id': 'PYSDK_insert_DCE_limit_FOK', 'exchange_order_id': ' 15236982', 'exchange_id': 'DCE', 'instrument_id': 'm2009', 'direction': 'BUY', 'offset': 'OPEN', 'volume_orign': 2, 'volume_left': 2, 'limit_price': 2800.0, 'price_type': 'LIMIT', 'volume_condition': 'ALL', 'time_condition': 'IOC', 'insert_date_time': 1593586120000000000, 'last_msg': '已撤单', 'status': 'FINISHED', 'seqno': 27, 'user_id': '83011119'}",
str(order1))
def test_insert_order_dce_limit_fak1(self):
dir_path = os.path.dirname(os.path.realpath(__file__))
self.mock.run(os.path.join(dir_path, "log_file", "test_insert_order_dce_limit_fak1.script"))
# 测试
account = TqAccount("H海通期货", "83011119", "********")
utils.RD = random.Random(4)
with TqApi(account=account, _ins_url=self.ins_url_2020_06_16, _md_url=self.md_url, _td_url=self.td_url,
debug=False) as api:
order1 = api.insert_order("DCE.m2009", "BUY", "OPEN", 1, limit_price=2890, advanced="FAK", order_id="PYSDK_insert_DCE_limit_FAK1")
while True:
api.wait_update()
if order1.status == "FINISHED":
break
self.assertEqual("PYSDK_insert_DCE_limit_FAK1", order1.order_id)
self.assertEqual(" 15266799", order1.exchange_order_id)
self.assertEqual("DCE", order1.exchange_id)
self.assertEqual("m2009", order1.instrument_id)
self.assertEqual("BUY", order1.direction)
self.assertEqual("OPEN", order1.offset)
self.assertEqual(1, order1.volume_orign)
self.assertEqual(0, order1.volume_left)
self.assertEqual(2890.0, order1.limit_price)
self.assertEqual(1593586261000000000, order1.insert_date_time)
self.assertEqual("FINISHED", order1.status)
self.assertEqual("LIMIT", order1.price_type)
self.assertEqual("ANY", order1.volume_condition)
self.assertEqual("IOC", order1.time_condition)
self.assertEqual("全部成交", order1.last_msg)
self.assertEqual(
"{'order_id': 'PYSDK_insert_DCE_limit_FAK1', 'exchange_order_id': ' 15266799', 'exchange_id': 'DCE', 'instrument_id': 'm2009', 'direction': 'BUY', 'offset': 'OPEN', 'volume_orign': 1, 'volume_left': 0, 'limit_price': 2890.0, 'price_type': 'LIMIT', 'volume_condition': 'ANY', 'time_condition': 'IOC', 'insert_date_time': 1593586261000000000, 'last_msg': '全部成交', 'status': 'FINISHED', 'seqno': 30, 'user_id': '83011119'}",
str(order1))
def test_insert_order_dce_limit_fok1(self):
dir_path = os.path.dirname(os.path.realpath(__file__))
self.mock.run(os.path.join(dir_path, "log_file", "test_insert_order_dce_limit_fok1.script"))
# 测试
account = TqAccount("H海通期货", "83011119", "********")
utils.RD = random.Random(4)
with TqApi(account=account, _ins_url=self.ins_url_2020_06_16, _md_url=self.md_url, _td_url=self.td_url,
debug=False) as api:
order1 = api.insert_order("DCE.m2009", "SELL", "OPEN", 2, limit_price=2905, advanced="FOK", order_id="PYSDK_insert_DCE_limit_FOK1")
while True:
api.wait_update()
if order1.status == "FINISHED":
break
self.assertEqual("PYSDK_insert_DCE_limit_FOK1", order1.order_id)
self.assertEqual(" 13619123", order1.exchange_order_id)
self.assertEqual("DCE", order1.exchange_id)
self.assertEqual("m2009", order1.instrument_id)
self.assertEqual("SELL", order1.direction)
self.assertEqual("OPEN", order1.offset)
self.assertEqual(2, order1.volume_orign)
self.assertEqual(0, order1.volume_left)
self.assertEqual(2905.0, order1.limit_price)
self.assertEqual(1593657671000000000, order1.insert_date_time)
self.assertEqual("FINISHED", order1.status)
self.assertEqual("LIMIT", order1.price_type)
self.assertEqual("ALL", order1.volume_condition)
self.assertEqual("IOC", order1.time_condition)
self.assertEqual("全部成交", order1.last_msg)
self.assertEqual(
"{'order_id': 'PYSDK_insert_DCE_limit_FOK1', 'exchange_order_id': ' 13619123', 'exchange_id': 'DCE', 'instrument_id': 'm2009', 'direction': 'SELL', 'offset': 'OPEN', 'volume_orign': 2, 'volume_left': 0, 'limit_price': 2905.0, 'price_type': 'LIMIT', 'volume_condition': 'ALL', 'time_condition': 'IOC', 'insert_date_time': 1593657671000000000, 'last_msg': '全部成交', 'status': 'FINISHED', 'seqno': 2, 'user_id': '83011119'}",
str(order1))
| 61.372671
| 458
| 0.63025
| 2,385
| 19,762
| 4.932495
| 0.069602
| 0.16321
| 0.046243
| 0.027542
| 0.911
| 0.886433
| 0.845461
| 0.821404
| 0.807208
| 0.790547
| 0
| 0.075894
| 0.22857
| 19,762
| 321
| 459
| 61.563863
| 0.695769
| 0.009463
| 0
| 0.689286
| 0
| 0.032143
| 0.273174
| 0.054358
| 0
| 0
| 0
| 0
| 0.467857
| 1
| 0.046429
| false
| 0
| 0.017857
| 0
| 0.067857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5275a0a18805f0a1c157c55d1a7a90be9247fdb6
| 275
|
py
|
Python
|
nmigen/vendor/lattice_machxo_2_3l.py
|
psumesh/nmigen
|
7d611b8fc1d9e58853ff268ec38ff8f4131a9774
|
[
"BSD-2-Clause"
] | 528
|
2020-01-28T18:21:00.000Z
|
2021-12-09T06:27:51.000Z
|
nmigen/vendor/lattice_machxo_2_3l.py
|
DX-MON/nmigen
|
a6a13dd612ee1c9215719c70a5aa410a8775ffdb
|
[
"BSD-2-Clause"
] | 360
|
2020-01-28T18:34:30.000Z
|
2021-12-10T08:03:32.000Z
|
nmigen/vendor/lattice_machxo_2_3l.py
|
DX-MON/nmigen
|
a6a13dd612ee1c9215719c70a5aa410a8775ffdb
|
[
"BSD-2-Clause"
] | 100
|
2020-02-06T21:55:46.000Z
|
2021-11-25T19:20:44.000Z
|
from amaranth.vendor.lattice_machxo_2_3l import *
from amaranth.vendor.lattice_machxo_2_3l import __all__
import warnings
warnings.warn("instead of nmigen.vendor.lattice_machxo_2_3l, use amaranth.vendor.lattice_machxo_2_3l",
DeprecationWarning, stacklevel=2)
| 34.375
| 102
| 0.814545
| 39
| 275
| 5.333333
| 0.435897
| 0.25
| 0.365385
| 0.384615
| 0.634615
| 0.528846
| 0.384615
| 0.384615
| 0
| 0
| 0
| 0.03719
| 0.12
| 275
| 7
| 103
| 39.285714
| 0.822314
| 0
| 0
| 0
| 0
| 0
| 0.309091
| 0.250909
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
5286d3be853c16499865fc67466ef1e2d49c741b
| 18,115
|
py
|
Python
|
dexguru_sdk/sdk/dg_sdk.py
|
sprataa/dg-sdk-python
|
4cbc231f067167ecae21d74db6b7011645f68a13
|
[
"MIT"
] | 11
|
2021-09-15T14:29:13.000Z
|
2022-03-23T01:38:10.000Z
|
dexguru_sdk/sdk/dg_sdk.py
|
sprataa/dg-sdk-python
|
4cbc231f067167ecae21d74db6b7011645f68a13
|
[
"MIT"
] | null | null | null |
dexguru_sdk/sdk/dg_sdk.py
|
sprataa/dg-sdk-python
|
4cbc231f067167ecae21d74db6b7011645f68a13
|
[
"MIT"
] | 6
|
2021-09-26T02:50:10.000Z
|
2022-02-01T14:13:18.000Z
|
import urllib.parse
from typing import List, Optional, Union
from pydantic import HttpUrl, conint
from dexguru_sdk import models
from dexguru_sdk.client.aiohttp_client import HTTPClient
from dexguru_sdk.models.choices import *
from dexguru_sdk.utils.get_query import get_query_from_params
START_BLOCK_TIMESTAMP = 1588723228
DEFAULT_DOMAIN = 'https://api.dev.dex.guru'
API_VERSION = 'v1/'
class DexGuru:
"""Main class for getting data.
For initialization, pass the api key of your project.
If you have especial domain address, put it into 'domain' arg.
Read more about methods and args on https://docs.dex.guru/api.
Args:
api_key (str): API key of dev.dex.guru project.
domain (str, optional): Especial API domain address.
"""
def __init__(self, api_key: str, domain: Optional[HttpUrl] = DEFAULT_DOMAIN):
domain = urllib.parse.urljoin(domain, API_VERSION)
self._client = HTTPClient(headers={'api-key': api_key}, domain=domain)
self._chain_prefix = 'chain'
async def get_chains(self) -> models.ChainsListModel:
response: dict = await self._client.get(f'{self._chain_prefix}')
return models.ChainsListModel.parse_obj(response)
async def get_chain(self, chain_id: int) -> models.ChainModel:
response: dict = await self._client.get(f'{self._chain_prefix}/{chain_id}')
return models.ChainModel.parse_obj(response)
async def get_transactions(
self,
chain_id: int,
amm: AmmChoices = None,
sort_by: str = None,
limit: conint(gt=0, le=100) = 10,
offset: conint(ge=0) = 0,
begin_timestamp: conint(ge=START_BLOCK_TIMESTAMP) = START_BLOCK_TIMESTAMP,
end_timestamp: conint(ge=START_BLOCK_TIMESTAMP) = None,
wallet_category: CategoriesChoices = None,
) -> models.SwapsBurnsMintsListModel:
query = get_query_from_params(**locals())
response: dict = await self._client.get(f'{self._chain_prefix}/{chain_id}/transactions?{query}')
return models.SwapsBurnsMintsListModel.parse_obj(response)
async def get_txs_swaps(
self,
chain_id: int,
amm: AmmChoices = None,
sort_by: str = None,
limit: conint(gt=0, le=100) = 10,
offset: conint(ge=0) = 0,
begin_timestamp: conint(ge=START_BLOCK_TIMESTAMP) = START_BLOCK_TIMESTAMP,
end_timestamp: conint(ge=START_BLOCK_TIMESTAMP) = None,
wallet_category: CategoriesChoices = None,
) -> models.SwapsBurnsMintsListModel:
query = get_query_from_params(**locals())
response: dict = await self._client.get(f'{self._chain_prefix}/{chain_id}/transactions/swaps/?{query}')
return models.SwapsBurnsMintsListModel.parse_obj(response)
async def get_txs_burns(
self,
chain_id: int,
amm: AmmChoices = None,
sort_by: str = None,
limit: conint(gt=0, le=100) = 10,
offset: conint(ge=0) = 0,
begin_timestamp: conint(ge=START_BLOCK_TIMESTAMP) = START_BLOCK_TIMESTAMP,
end_timestamp: conint(ge=START_BLOCK_TIMESTAMP) = None,
) -> models.SwapsBurnsMintsListModel:
query = get_query_from_params(**locals())
response: dict = await self._client.get(f'{self._chain_prefix}/{chain_id}/transactions/burns/?{query}')
return models.SwapsBurnsMintsListModel.parse_obj(response)
async def get_txs_mints(
self,
chain_id: int,
amm: AmmChoices = None,
sort_by: str = None,
limit: conint(gt=0, le=100) = 10,
offset: conint(ge=0) = 0,
begin_timestamp: conint(ge=START_BLOCK_TIMESTAMP) = START_BLOCK_TIMESTAMP,
end_timestamp: conint(ge=START_BLOCK_TIMESTAMP) = None,
) -> models.SwapsBurnsMintsListModel:
query = get_query_from_params(**locals())
response: dict = await self._client.get(f'{self._chain_prefix}/{chain_id}/transactions/mints/?{query}')
return models.SwapsBurnsMintsListModel.parse_obj(response)
async def search_tokens_by_name_or_symbol(
self,
chain_id: int,
search_string: str = None,
limit: conint(gt=0, le=100) = 10,
offset: conint(ge=0) = 0,
verified: bool = True,
) -> models.TokensInventoryListModel:
query = get_query_from_params(**locals())
response: dict = await self._client.get(f'{self._chain_prefix}/{chain_id}/tokens/?{query}')
return models.TokensInventoryListModel.parse_obj(response)
async def get_tokens_finance(
self,
chain_id: int,
token_addresses: List[str] = None,
verified: bool = None,
sort_by: str = None,
limit: conint(gt=0, le=100) = 10,
offset: conint(ge=0) = 0,
) -> models.TokensFinanceListModel:
if token_addresses:
token_addresses = ','.join(token_addresses)
query = get_query_from_params(**locals())
response: dict = await self._client.get(f'{self._chain_prefix}/{chain_id}/tokens/market/?{query}')
return models.TokensFinanceListModel.parse_obj(response)
async def get_token_inventory_by_address(
self,
chain_id: int,
token_address: str,
) -> models.TokenInventoryModel:
query = get_query_from_params(**locals())
response: dict = await self._client.get(f'{self._chain_prefix}/{chain_id}/tokens/{token_address}/?{query}')
return models.TokenInventoryModel.parse_obj(response)
async def get_token_finance(
self,
chain_id: int,
token_address: str,
) -> models.TokenFinanceModel:
query = get_query_from_params(**locals())
response: dict = await self._client.get(f'{self._chain_prefix}/{chain_id}/tokens/{token_address}/market/?{query}')
return models.TokenFinanceModel.parse_obj(response)
async def get_token_transactions(
self,
chain_id: int,
token_address: str,
amm: AmmChoices = None,
wallet_category: CategoriesChoices = None,
sort_by: str = None,
limit: conint(gt=0, le=100) = 10,
offset: conint(ge=0) = 0,
begin_timestamp: conint(ge=START_BLOCK_TIMESTAMP) = START_BLOCK_TIMESTAMP,
end_timestamp: conint(ge=START_BLOCK_TIMESTAMP) = None,
) -> models.SwapsBurnsMintsListModel:
query = get_query_from_params(**locals())
response: dict = await self._client.get(f'{self._chain_prefix}/{chain_id}/tokens/{token_address}/transactions/?{query}')
return models.SwapsBurnsMintsListModel.parse_obj(response)
async def get_token_swaps(
self,
chain_id: int,
token_address: str,
amm: AmmChoices = None,
wallet_category: CategoriesChoices = None,
sort_by: str = None,
limit: conint(gt=0, le=100) = 10,
offset: conint(ge=0) = 0,
begin_timestamp: conint(ge=START_BLOCK_TIMESTAMP) = START_BLOCK_TIMESTAMP,
end_timestamp: conint(ge=START_BLOCK_TIMESTAMP) = None,
) -> models.SwapsBurnsMintsListModel:
query = get_query_from_params(**locals())
response: dict = await self._client.get(f'{self._chain_prefix}/{chain_id}/tokens/{token_address}/transactions/swaps/?{query}')
return models.SwapsBurnsMintsListModel.parse_obj(response)
async def get_token_burns(
self,
chain_id: int,
token_address: str,
amm: AmmChoices = None,
sort_by: str = None,
limit: conint(gt=0, le=100) = 10,
offset: conint(ge=0) = 0,
begin_timestamp: conint(ge=START_BLOCK_TIMESTAMP) = START_BLOCK_TIMESTAMP,
end_timestamp: conint(ge=START_BLOCK_TIMESTAMP) = None,
) -> models.SwapsBurnsMintsListModel:
query = get_query_from_params(**locals())
response: dict = await self._client.get(f'{self._chain_prefix}/{chain_id}/tokens/{token_address}/transactions/burns/?{query}')
return models.SwapsBurnsMintsListModel.parse_obj(response)
async def get_token_mints(
self,
chain_id: int,
token_address: str,
amm: AmmChoices = None,
sort_by: str = None,
limit: conint(gt=0, le=100) = 10,
offset: conint(ge=0) = 0,
begin_timestamp: conint(ge=START_BLOCK_TIMESTAMP) = START_BLOCK_TIMESTAMP,
end_timestamp: conint(ge=START_BLOCK_TIMESTAMP) = None,
) -> models.SwapsBurnsMintsListModel:
query = get_query_from_params(**locals())
response: dict = await self._client.get(f'{self._chain_prefix}/{chain_id}/tokens/{token_address}/transactions/mints/?{query}')
return models.SwapsBurnsMintsListModel.parse_obj(response)
async def get_token_market_history(
self,
chain_id: int,
token_address: str,
begin_timestamp: conint(ge=START_BLOCK_TIMESTAMP) = START_BLOCK_TIMESTAMP,
end_timestamp: conint(ge=START_BLOCK_TIMESTAMP) = None,
) -> models.TokensHistoryListModel:
query = get_query_from_params(**locals())
response: dict = await self._client.get(f'{self._chain_prefix}/{chain_id}/tokens/{token_address}/market/history/?{query}')
return models.TokensHistoryListModel.parse_obj(response)
async def get_wallets_info(
self,
chain_id: int,
wallet_addresses: List[str]
) -> models.WalletsListModel:
wallet_addresses = ','.join(wallet_addresses)
query = get_query_from_params(**locals())
response: dict = await self._client.get(f'{self._chain_prefix}/{chain_id}/wallets/?{query}')
return models.WalletsListModel.parse_obj(response)
async def get_wallet_info(
self,
chain_id: int,
wallet_address: str
) -> models.WalletModel:
response: dict = await self._client.get(f'{self._chain_prefix}/{chain_id}/wallets/{wallet_address}')
return models.WalletModel.parse_obj(response)
async def get_wallet_transactions(
self,
chain_id: int,
wallet_address: str,
amm: AmmChoices = None,
sort_by: str = None,
limit: conint(gt=0, le=100) = 10,
offset: conint(ge=0) = 0,
begin_timestamp: conint(ge=START_BLOCK_TIMESTAMP) = START_BLOCK_TIMESTAMP,
end_timestamp: conint(ge=START_BLOCK_TIMESTAMP) = None,
) -> models.SwapsBurnsMintsListModel:
query = get_query_from_params(**locals())
response: dict = await self._client.get(f'{self._chain_prefix}/{chain_id}/wallets/{wallet_address}/transactions/?{query}')
return models.SwapsBurnsMintsListModel.parse_obj(response)
async def get_wallet_swaps(
self,
chain_id: int,
wallet_address: str,
amm: AmmChoices = None,
sort_by: str = None,
limit: conint(gt=0, le=100) = 10,
offset: conint(ge=0) = 0,
begin_timestamp: conint(ge=START_BLOCK_TIMESTAMP) = START_BLOCK_TIMESTAMP,
end_timestamp: conint(ge=START_BLOCK_TIMESTAMP) = None,
) -> models.SwapsBurnsMintsListModel:
query = get_query_from_params(**locals())
response: dict = await self._client.get(f'{self._chain_prefix}/{chain_id}/wallets/{wallet_address}/transactions/swaps/?{query}')
return models.SwapsBurnsMintsListModel.parse_obj(response)
async def get_wallet_burns(
self,
chain_id: int,
wallet_address: str,
amm: AmmChoices = None,
sort_by: str = None,
limit: conint(gt=0, le=100) = 10,
offset: conint(ge=0) = 0,
begin_timestamp: conint(ge=START_BLOCK_TIMESTAMP) = START_BLOCK_TIMESTAMP,
end_timestamp: conint(ge=START_BLOCK_TIMESTAMP) = None,
) -> models.SwapsBurnsMintsListModel:
query = get_query_from_params(**locals())
response: dict = await self._client.get(f'{self._chain_prefix}/{chain_id}/wallets/{wallet_address}/transactions/burns/?{query}')
return models.SwapsBurnsMintsListModel.parse_obj(response)
async def get_wallet_mints(
self,
chain_id: int,
wallet_address: str,
amm: AmmChoices = None,
sort_by: str = None,
limit: conint(gt=0, le=100) = 10,
offset: conint(ge=0) = 0,
begin_timestamp: conint(ge=START_BLOCK_TIMESTAMP) = START_BLOCK_TIMESTAMP,
end_timestamp: conint(ge=START_BLOCK_TIMESTAMP) = None,
) -> models.SwapsBurnsMintsListModel:
query = get_query_from_params(**locals())
response: dict = await self._client.get(f'{self._chain_prefix}/{chain_id}/wallets/{wallet_address}/transactions/mints/?{query}')
return models.SwapsBurnsMintsListModel.parse_obj(response)
async def get_amms_swaps(
self,
chain_id: int,
amms: List[str] = None,
token_address: Optional[str] = None,
sort_by: str = None,
limit: conint(gt=0, le=100) = 10,
offset: conint(ge=0) = 0,
begin_timestamp: conint(ge=START_BLOCK_TIMESTAMP) = START_BLOCK_TIMESTAMP,
end_timestamp: conint(ge=START_BLOCK_TIMESTAMP) = None,
wallet_category: CategoriesChoices = None,
) -> models.SwapsBurnsMintsListModel:
if isinstance(amms, list):
amms = ','.join(amms)
query = get_query_from_params(**locals())
response: dict = await self._client.get(f'{self._chain_prefix}/{chain_id}/amms/swaps/?{query}')
return models.SwapsBurnsMintsListModel.parse_obj(response)
async def get_amms_burns(
self,
chain_id: int,
amms: Union[List[str], str] = None,
token_address: Optional[str] = None,
sort_by: str = None,
limit: conint(gt=0, le=100) = 10,
offset: conint(ge=0) = 0,
begin_timestamp: conint(ge=START_BLOCK_TIMESTAMP) = START_BLOCK_TIMESTAMP,
end_timestamp: conint(ge=START_BLOCK_TIMESTAMP) = None,
) -> models.SwapsBurnsMintsListModel:
if isinstance(amms, list):
amms = ','.join(amms)
query = get_query_from_params(**locals())
response: dict = await self._client.get(f'{self._chain_prefix}/{chain_id}/amms/burns/?{query}')
return models.SwapsBurnsMintsListModel.parse_obj(response)
async def get_amms_mints(
self,
chain_id: int,
amms: Union[List[str], str] = None,
token_address: Optional[str] = None,
sort_by: str = None,
limit: conint(gt=0, le=100) = 10,
offset: conint(ge=0) = 0,
begin_timestamp: conint(ge=START_BLOCK_TIMESTAMP) = START_BLOCK_TIMESTAMP,
end_timestamp: conint(ge=START_BLOCK_TIMESTAMP) = None,
) -> models.SwapsBurnsMintsListModel:
if isinstance(amms, list):
amms = ','.join(amms)
query = get_query_from_params(**locals())
response: dict = await self._client.get(f'{self._chain_prefix}/{chain_id}/amms/mints/?{query}')
return models.SwapsBurnsMintsListModel.parse_obj(response)
async def get_amm_swaps(
self,
chain_id: int,
amm: AmmChoices,
token_address: Optional[str] = None,
sort_by: str = None,
limit: conint(gt=0, le=100) = 10,
offset: conint(ge=0) = 0,
begin_timestamp: conint(ge=START_BLOCK_TIMESTAMP) = START_BLOCK_TIMESTAMP,
end_timestamp: conint(ge=START_BLOCK_TIMESTAMP) = None,
wallet_category: CategoriesChoices = None,
) -> models.SwapsBurnsMintsListModel:
query = get_query_from_params(**locals())
response: dict = await self._client.get(f'{self._chain_prefix}/{chain_id}/amms/{amm}/swaps?{query}')
return models.SwapsBurnsMintsListModel.parse_obj(response)
async def get_amm_burns(
self,
chain_id: int,
amm: AmmChoices,
token_address: Optional[str] = None,
sort_by: str = None,
limit: conint(gt=0, le=100) = 10,
offset: conint(ge=0) = 0,
begin_timestamp: conint(ge=START_BLOCK_TIMESTAMP) = START_BLOCK_TIMESTAMP,
end_timestamp: conint(ge=START_BLOCK_TIMESTAMP) = None,
) -> models.SwapsBurnsMintsListModel:
query = get_query_from_params(**locals())
response: dict = await self._client.get(f'{self._chain_prefix}/{chain_id}/amms/{amm}/burns?{query}')
return models.SwapsBurnsMintsListModel.parse_obj(response)
async def get_amm_mints(
self,
chain_id: int,
amm: AmmChoices,
token_address: Optional[str] = None,
sort_by: str = None,
limit: conint(gt=0, le=100) = 10,
offset: conint(ge=0) = 0,
begin_timestamp: conint(ge=START_BLOCK_TIMESTAMP) = START_BLOCK_TIMESTAMP,
end_timestamp: conint(ge=START_BLOCK_TIMESTAMP) = None,
) -> models.SwapsBurnsMintsListModel:
query = get_query_from_params(**locals())
response: dict = await self._client.get(f'{self._chain_prefix}/{chain_id}/amms/{amm}/mints?{query}')
return models.SwapsBurnsMintsListModel.parse_obj(response)
async def get_all_amm_inventory(self, chain_id: int) -> models.AmmListModel:
response: dict = await self._client.get(f'{self._chain_prefix}/{chain_id}/amms')
return models.AmmListModel.parse_obj(response)
async def get_amm_inventory(self, chain_id: int, amm: AmmChoices) -> models.AmmModel:
response: dict = await self._client.get(f'{self._chain_prefix}/{chain_id}/amms/{amm}')
return models.AmmModel.parse_obj(response)
| 45.174564
| 136
| 0.634778
| 2,098
| 18,115
| 5.220686
| 0.066254
| 0.052954
| 0.100612
| 0.076326
| 0.857025
| 0.845613
| 0.824523
| 0.798868
| 0.792477
| 0.792477
| 0
| 0.012674
| 0.255203
| 18,115
| 400
| 137
| 45.2875
| 0.79914
| 0.017996
| 0
| 0.714689
| 0
| 0
| 0.100879
| 0.097274
| 0
| 0
| 0
| 0
| 0
| 1
| 0.002825
| false
| 0
| 0.019774
| 0
| 0.107345
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8706f77b41da9a93a1d75abe2bf5204d7116717b
| 14,531
|
py
|
Python
|
mystery/tests/match_tests.py
|
anselmbradford/collab-mystery-meet
|
cda20bd1888edf8666f290c87817e63d8921f3bd
|
[
"CC0-1.0"
] | 2
|
2015-07-11T17:52:13.000Z
|
2016-08-15T04:04:03.000Z
|
mystery/tests/match_tests.py
|
anselmbradford/collab-mystery-meet
|
cda20bd1888edf8666f290c87817e63d8921f3bd
|
[
"CC0-1.0"
] | null | null | null |
mystery/tests/match_tests.py
|
anselmbradford/collab-mystery-meet
|
cda20bd1888edf8666f290c87817e63d8921f3bd
|
[
"CC0-1.0"
] | 3
|
2017-07-14T03:20:05.000Z
|
2021-02-20T10:40:57.000Z
|
from django.test import TestCase
from mystery.models import Interest
from mystery.tests.utils import mock_req, random_user
from mystery import views
from mock import patch
from django.contrib.auth import get_user_model
from core.models import OrgGroup, OfficeLocation
from django.core.urlresolvers import reverse
class MatchTest(TestCase):
fixtures = ['core-test-fixtures', ]
def test_pending_match_default_page(self):
""" Verify pending match is default page after submission """
self.client.login(username='test1@example.com', password='1')
office = OfficeLocation.objects.all()[0]
org = OrgGroup.objects.filter(parent__isnull=True)[0]
self.client.post(reverse('mystery:mystery'), {'meet_choice':Interest.CHOICE_COFFEE,
'departments':[org.pk],
'locations':[office.pk]})
self.assertEqual(Interest.objects.count(), 1)
resp = self.client.get(reverse('mystery:mystery'))
self.assertContains(resp, "Cancel this", status_code=200)
def test_pending_must_be_logged_in(self):
""" A user must be logged in to view pending match page """
self.client.login(username='test1@example.com', password='1')
office = OfficeLocation.objects.all()[0]
org = OrgGroup.objects.filter(parent__isnull=True)[0]
self.client.post(reverse('mystery:mystery'), {'meet_choice':Interest.CHOICE_COFFEE,
'departments':[org.pk],
'locations':[office.pk]})
self.client.logout()
resp = self.client.get(reverse('mystery:mystery'))
self.assertEqual(resp.status_code, 302)
self.assertIn('login', resp['Location'])
def test_cancel_pending_match(self):
""" Test cancellation before match is complete """
user1 = get_user_model().objects.get(username='test1@example.com')
self.client.login(username='test1@example.com', password='1')
office = OfficeLocation.objects.all()[0]
org = OrgGroup.objects.filter(parent__isnull=True)[0]
submission = Interest()
submission.owner = user1
submission.for_coffee = True
submission.save()
submission.locations.add(office)
submission.departments.add(org)
self.assertEqual(submission.is_active, True)
resp = self.client.get(reverse('mystery:close_cancel', args=(submission.id,)))
self.assertEqual(resp.status_code, 302)
self.assertIn(reverse('mystery:mystery'), resp['Location'])
self.assertEqual(Interest.objects.get(id=submission.id).is_active, False)
def test_assigned_match(self):
""" Test a valid match results in assigned match page """
user1 = get_user_model().objects.get(username='test1@example.com')
self.client.login(username='test1@example.com', password='1')
office = OfficeLocation.objects.all()[0]
org = OrgGroup.objects.filter(parent__isnull=True)[0]
submission1 = Interest()
submission1.owner = user1
submission1.for_coffee = True
submission1.save()
submission1.locations.add(office)
submission1.departments.add(org)
resp = self.client.get(reverse('mystery:mystery'))
self.assertContains(resp, "Cancel this", status_code=200)
user2 = random_user()
submission2 = Interest()
submission2.owner = user2
submission2.is_active = False
submission2.save()
submission2.for_coffee = True
submission2.locations.add(office)
submission2.departments.add(org)
submission2.is_active = True
submission2.save()
resp = self.client.get(reverse('mystery:mystery'))
self.assertContains(resp, reverse("mystery:close_incomplete", args=(submission1.id,)))
self.assertContains(resp, reverse("mystery:close_complete", args=(submission1.id,)))
# verify assigned match page requires login
self.client.logout()
resp = self.client.get(reverse('mystery:mystery'))
self.assertEqual(resp.status_code, 302)
self.assertIn('login', resp['Location'])
def test_assigned_video_match(self):
""" Test a valid video match results in assigned match page """
user1 = get_user_model().objects.get(username='test1@example.com')
self.client.login(username='test1@example.com', password='1')
office = OfficeLocation.objects.all()[0]
org = OrgGroup.objects.filter(parent__isnull=True)[0]
submission1 = Interest()
submission1.owner = user1
submission1.video_chat = True
submission1.save()
submission1.departments.add(org)
resp = self.client.get(reverse('mystery:mystery'))
self.assertContains(resp, "Cancel this", status_code=200)
user2 = random_user()
submission2 = Interest()
submission2.owner = user2
submission2.is_active = False
submission2.save()
submission2.video_chat = True
submission2.departments.add(org)
submission2.is_active = True
submission2.save()
self.assertEqual(submission2.is_active, True)
resp = self.client.get(reverse('mystery:mystery'))
self.assertContains(resp, "Success", status_code=200)
def test_cancel_assigned_match(self):
""" Test cancellation of assigned match """
user1 = get_user_model().objects.get(username='test1@example.com')
self.client.login(username='test1@example.com', password='1')
office = OfficeLocation.objects.all()[0]
org = OrgGroup.objects.filter(parent__isnull=True)[0]
submission1 = Interest()
submission1.owner = user1
submission1.for_coffee = True
submission1.save()
submission1.locations.add(office)
submission1.departments.add(org)
user2 = random_user()
submission2 = Interest()
submission2.owner = user2
submission2.is_active = False
submission2.save()
submission2.for_coffee = True
submission2.locations.add(office)
submission2.departments.add(org)
submission2.is_active = True
submission2.save()
resp = self.client.get(reverse('mystery:close_incomplete', args=(submission1.id,)))
self.assertEqual(resp.status_code, 302)
self.assertIn('forms', resp['Location'])
self.assertEqual(Interest.objects.get(id=submission1.id).is_active, False)
self.assertEqual(Interest.objects.get(id=submission2.id).is_active, True)
def test_complete_assigned_match(self):
""" Test closure of assigned match """
user1 = get_user_model().objects.get(username='test1@example.com')
self.client.login(username='test1@example.com', password='1')
office = OfficeLocation.objects.all()[0]
org = OrgGroup.objects.filter(parent__isnull=True)[0]
submission1 = Interest()
submission1.owner = user1
submission1.for_coffee = True
submission1.save()
submission1.locations.add(office)
submission1.departments.add(org)
user2 = random_user()
submission2 = Interest()
submission2.owner = user2
submission2.is_active = False
submission2.save()
submission2.for_coffee = True
submission2.locations.add(office)
submission2.departments.add(org)
submission2.is_active = True
submission2.save()
resp = self.client.get(reverse('mystery:close_complete', args=(submission1.id,)))
self.assertEqual(resp.status_code, 302)
self.assertIn('forms', resp['Location'])
self.assertEqual(Interest.objects.get(id=submission1.id).is_active, False)
self.assertEqual(Interest.objects.get(id=submission2.id).is_active, True)
def test_non_matching_type(self):
""" Verify registrations with different meet type (lunch, etc) do not register as a match. """
user1 = get_user_model().objects.get(username='test1@example.com')
self.client.login(username='test1@example.com', password='1')
office = OfficeLocation.objects.all()[0]
org = OrgGroup.objects.filter(parent__isnull=True)[0]
submission1 = Interest()
submission1.owner = user1
submission1.for_coffee = True
submission1.save()
submission1.locations.add(office)
submission1.departments.add(org)
resp = self.client.get(reverse('mystery:mystery'))
self.assertContains(resp, "Cancel this", status_code=200)
user2 = random_user()
submission2 = Interest()
submission2.owner = user2
submission2.is_active = False
submission2.save()
submission2.for_lunch = True
submission2.locations.add(office)
submission2.departments.add(org)
submission2.is_active = True
submission2.save()
resp = self.client.get(reverse('mystery:mystery'))
self.assertContains(resp, "Cancel this", status_code=200)
def test_non_matching_org(self):
""" Verify registrations with different org do not register as a match. """
user1 = get_user_model().objects.get(username='test1@example.com')
self.client.login(username='test1@example.com', password='1')
office = OfficeLocation.objects.all()[0]
org = OrgGroup.objects.filter(parent__isnull=True)[0]
submission1 = Interest()
submission1.owner = user1
submission1.for_coffee = True
submission1.save()
submission1.locations.add(office)
submission1.departments.add(org)
resp = self.client.get(reverse('mystery:mystery'))
self.assertContains(resp, "Cancel this", status_code=200)
user2 = random_user()
org2 = OrgGroup()
org2.parent = org
org2.title = "test org"
org2.save()
submission2 = Interest()
submission2.owner = user2
submission2.is_active = False
submission2.save()
submission2.for_coffee = True
submission2.locations.add(office)
submission2.departments.add(org2)
submission2.is_active = True
submission2.save()
resp = self.client.get(reverse('mystery:mystery'))
self.assertContains(resp, "Cancel this", status_code=200)
def test_non_matching_location(self):
""" Verify registrations with different location do not register as a match. """
user1 = get_user_model().objects.get(username='test1@example.com')
self.client.login(username='test1@example.com', password='1')
office = OfficeLocation.objects.all()[0]
org = OrgGroup.objects.filter(parent__isnull=True)[0]
submission1 = Interest()
submission1.owner = user1
submission1.for_coffee = True
submission1.save()
submission1.locations.add(office)
submission1.departments.add(org)
resp = self.client.get(reverse('mystery:mystery'))
self.assertContains(resp, "Cancel this", status_code=200)
user2 = random_user()
office2 = OfficeLocation()
office2.id = "test_id"
office2.street = "test office"
office2.city = "test office"
office2.state = "test office"
office2.zip = "test office"
office2.save()
submission2 = Interest()
submission2.owner = user2
submission2.is_active = False
submission2.save()
submission2.for_coffee = True
submission2.locations.add(office2)
submission2.departments.add(org)
submission2.is_active = True
submission2.save()
resp = self.client.get(reverse('mystery:mystery'))
self.assertContains(resp, "Cancel this", status_code=200)
def test_non_matching_active(self):
""" Verify registrations with different location do not register as a match. """
user1 = get_user_model().objects.get(username='test1@example.com')
self.client.login(username='test1@example.com', password='1')
office = OfficeLocation.objects.all()[0]
org = OrgGroup.objects.filter(parent__isnull=True)[0]
submission1 = Interest()
submission1.owner = user1
submission1.for_coffee = True
submission1.save()
submission1.locations.add(office)
submission1.departments.add(org)
resp = self.client.get(reverse('mystery:mystery'))
self.assertContains(resp, "Cancel this", status_code=200)
user2 = random_user()
submission2 = Interest()
submission2.owner = user2
submission2.is_active = False
submission2.save()
submission2.for_coffee = True
submission2.locations.add(office)
submission2.departments.add(org)
submission2.save()
resp = self.client.get(reverse('mystery:mystery'))
self.assertContains(resp, "Cancel this", status_code=200)
def test_interest_save(self):
""" Test interest initial_save function """
user1 = get_user_model().objects.get(username='test1@example.com')
self.client.login(username='test1@example.com', password='1')
office_list = OfficeLocation.objects.all()
org_list = OrgGroup.objects.filter(parent__isnull=True)
submission1 = Interest()
submission1.owner = user1
submission1.for_coffee = True
submission1.initial_save(locations=office_list, departments=org_list)
self.assertNotEqual(submission1.id, None)
self.assertEqual(submission1.locations.count(), len(office_list))
self.assertEqual(submission1.departments.count(), len(org_list))
self.assertEqual(submission1.match, None)
user2 = random_user()
submission2 = Interest()
submission2.owner = user2
submission2.for_coffee = True
submission2.save()
submission2.locations.add(office_list[0])
submission2.departments.add(org_list[0])
submission2.save()
self.assertNotEqual(submission2.id, None)
self.assertEqual(submission2.locations.count(), 1)
self.assertEqual(submission2.departments.count(), 1)
self.assertEqual(submission2.match, submission1)
submission1 = Interest.objects.get(id=submission1.id) # refresh
self.assertEqual(submission1.match, submission2)
| 39.167116
| 102
| 0.652192
| 1,583
| 14,531
| 5.87808
| 0.085281
| 0.036539
| 0.047286
| 0.054379
| 0.809242
| 0.785814
| 0.776357
| 0.770231
| 0.758732
| 0.740785
| 0
| 0.028112
| 0.233776
| 14,531
| 370
| 103
| 39.272973
| 0.807616
| 0.049618
| 0
| 0.758621
| 0
| 0
| 0.079718
| 0.006698
| 0
| 0
| 0
| 0
| 0.141379
| 1
| 0.041379
| false
| 0.041379
| 0.027586
| 0
| 0.075862
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
871d7a2c25f91de4856ad7fe03c139bcb16fbff1
| 12,824
|
py
|
Python
|
arcos4py/.ipynb_checkpoints/summary-checkpoint.py
|
marc-rauckhorst/arcos-py
|
c195a29e47d4041e787eedb59552c4e92364627e
|
[
"MIT"
] | null | null | null |
arcos4py/.ipynb_checkpoints/summary-checkpoint.py
|
marc-rauckhorst/arcos-py
|
c195a29e47d4041e787eedb59552c4e92364627e
|
[
"MIT"
] | null | null | null |
arcos4py/.ipynb_checkpoints/summary-checkpoint.py
|
marc-rauckhorst/arcos-py
|
c195a29e47d4041e787eedb59552c4e92364627e
|
[
"MIT"
] | null | null | null |
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Summary Functions\n",
"\n",
"def get_summ_combined_county_annual(state, county = '',verification = True, key = 'WaPo'):\n",
" '''(str(two letter abbreviation), str, bool, str) -> pd.df\n",
" Returns seller details such as addresses\n",
"\n",
" >>>get_summ_combined_county_annual('OH', 'Summit')\n",
" EXAMPLE OUTPUT\n",
" '''\n",
"\n",
" base_url = 'https://arcos-api.ext.nile.works/v1/'\n",
" function_url = 'combined_county_annual?'\n",
" add_state = 'state=' + state\n",
" add_county = '&county=' + county\n",
" add_key = '&key=' + key\n",
" full_url = base_url + function_url + add_state + add_county + add_key\n",
"\n",
" if verification == True:\n",
" print(full_url)\n",
" combined_county_annual_df = json_normalize(requests.get(full_url).json())\n",
" return combined_county_annual_df\n",
" else:\n",
" print('Problem encountered, not returning data:')\n",
" print('Either verification == False')\n",
" print('Or problem with API encountered, please verify URL, state and county are correct: ', full_url)\n",
" \n",
" \n",
"def get_summ_combined_county_monthly(state, county = '',verification = True, key = 'WaPo'):\n",
" '''(str(two letter abbreviation), str, bool, str) -> pd.df\n",
" Returns seller details such as addresses\n",
"\n",
" >>>get_summ_combined_county_monthly('OH', 'Summit')\n",
" EXAMPLE OUTPUT\n",
" '''\n",
"\n",
" base_url = 'https://arcos-api.ext.nile.works/v1/'\n",
" function_url = 'combined_county_monthly?'\n",
" add_state = 'state=' + state\n",
" add_county = '&county=' + county\n",
" add_key = '&key=' + key\n",
" full_url = base_url + function_url + add_state + add_county + add_key\n",
"\n",
" if verification == True:\n",
" print(full_url)\n",
" combined_county_monthly_df = json_normalize(requests.get(full_url).json())\n",
" return combined_county_monthly_df\n",
" else:\n",
" print('Problem encountered, not returning data:')\n",
" print('Either verification == False')\n",
" print('Or problem with API encountered, please verify URL, state and county are correct: ', full_url)\n",
" \n",
"def get_summ_total_pharmacies_county(state, county = '',verification = True, key = 'WaPo'):\n",
" '''(str(two letter abbreviation), str, bool, str) -> pd.df\n",
" Returns all pharmacy totals by county (Will be large and could take extra time to load)\n",
"\n",
" >>>get_summ_total_pharmacies_county('OH', 'Summit')\n",
" EXAMPLE OUTPUT\n",
" '''\n",
"\n",
" base_url = 'https://arcos-api.ext.nile.works/v1/'\n",
" function_url = 'total_pharmacies_county?'\n",
" add_state = 'state=' + state\n",
" add_county = '&county=' + county\n",
" add_key = '&key=' + key\n",
" full_url = base_url + function_url + add_state + add_county + add_key\n",
"\n",
" if verification == True:\n",
" print(full_url)\n",
" total_pharmacies_county_df = json_normalize(requests.get(full_url).json())\n",
" return total_pharmacies_county_df\n",
" else:\n",
" print('Problem encountered, not returning data:')\n",
" print('Either verification == False')\n",
" print('Or problem with API encountered, please verify URL, state and county are correct: ', full_url)\n",
" \n",
"def get_summ_total_manufacturers_county(state, county = '',verification = True, key = 'WaPo'):\n",
" '''(str(two letter abbreviation), str, bool, str) -> pd.df\n",
" Returns all Manufacturer totals by county (Will be large and could take extra time to load)\n",
"\n",
" >>>get_summ_total_manufacturers_county('OH', 'Summit')\n",
" EXAMPLE OUTPUT\n",
" '''\n",
"\n",
" base_url = 'https://arcos-api.ext.nile.works/v1/'\n",
" function_url = 'total_manufacturers_county?'\n",
" add_state = 'state=' + state\n",
" add_county = '&county=' + county\n",
" add_key = '&key=' + key\n",
" full_url = base_url + function_url + add_state + add_county + add_key\n",
"\n",
" if verification == True:\n",
" print(full_url)\n",
" total_manufacturers_county_df = json_normalize(requests.get(full_url).json())\n",
" return total_manufacturers_county_df\n",
" else:\n",
" print('Problem encountered, not returning data:')\n",
" print('Either verification == False')\n",
" print('Or problem with API encountered, please verify URL, state and county are correct: ', full_url)\n",
" \n",
"def get_summ_total_distributors_county(state, county = '',verification = True, key = 'WaPo'):\n",
" '''(str(two letter abbreviation), str, bool, str) -> pd.df\n",
" Returns all Distributor totals by county (Will be large and could take extra time to load)\n",
"\n",
" >>>get_summ_total_distributors_county('OH', 'Summit')\n",
" EXAMPLE OUTPUT\n",
" '''\n",
"\n",
" base_url = 'https://arcos-api.ext.nile.works/v1/'\n",
" function_url = 'total_distributors_county?'\n",
" add_state = 'state=' + state\n",
" add_county = '&county=' + county\n",
" add_key = '&key=' + key\n",
" full_url = base_url + function_url + add_state + add_county + add_key\n",
"\n",
" if verification == True:\n",
" print(full_url)\n",
" total_distributors_county_df = json_normalize(requests.get(full_url).json())\n",
" return total_distributors_county_df\n",
" else:\n",
" print('Problem encountered, not returning data:')\n",
" print('Either verification == False')\n",
" print('Or problem with API encountered, please verify URL, state and county are correct: ', full_url)\n",
" \n",
"def get_summ_total_pharmacies_state(state,verification = True, key = 'WaPo'):\n",
" '''(str(two letter abbreviation), str, bool, str) -> pd.df\n",
" Returns all pharmacy totals by state (Will be large and could take extra time to load)\n",
"\n",
" >>>get_summ_total_pharmacies_state('OH')\n",
" EXAMPLE OUTPUT\n",
" '''\n",
"\n",
" base_url = 'https://arcos-api.ext.nile.works/v1/'\n",
" function_url = 'total_pharmacies_state?'\n",
" add_state = 'state=' + state\n",
" add_key = '&key=' + key\n",
" full_url = base_url + function_url + add_state + add_key\n",
"\n",
" if verification == True:\n",
" print(full_url)\n",
" total_pharmacies_state_df = json_normalize(requests.get(full_url).json())\n",
" return total_pharmacies_state_df\n",
" else:\n",
" print('Problem encountered, not returning data:')\n",
" print('Either verification == False')\n",
" print('Or problem with API encountered, please verify URL, state and county are correct: ', full_url)\n",
" \n",
"def get_summ_total_manufacturers_state(state,verification = True, key = 'WaPo'):\n",
" '''(str(two letter abbreviation), str, bool, str) -> pd.df\n",
" Returns all Manufacturer totals by state (Will be large and could take extra time to load) \n",
"\n",
" >>>get_summ_total_manufacturers_state('OH', 'Summit')\n",
" EXAMPLE OUTPUT\n",
" '''\n",
"\n",
" base_url = 'https://arcos-api.ext.nile.works/v1/'\n",
" function_url = 'total_manufacturers_state?'\n",
" add_state = 'state=' + state\n",
" add_key = '&key=' + key\n",
" full_url = base_url + function_url + add_state + add_key\n",
"\n",
" if verification == True:\n",
" print(full_url)\n",
" total_manufacturers_state_df = json_normalize(requests.get(full_url).json())\n",
" return total_manufacturers_state_df\n",
" else:\n",
" print('Problem encountered, not returning data:')\n",
" print('Either verification == False')\n",
" print('Or problem with API encountered, please verify URL, state and county are correct: ', full_url)\n",
" \n",
"def get_summ_total_distributors_state(state,verification = True, key = 'WaPo'):\n",
" '''(str(two letter abbreviation), str, bool, str) -> pd.df\n",
" Returns all Distributor totals by state (Will be large and could take extra time to load) \n",
"\n",
" >>>get_summ_total_distributors_state('OH', 'Summit')\n",
" EXAMPLE OUTPUT\n",
" '''\n",
"\n",
" base_url = 'https://arcos-api.ext.nile.works/v1/'\n",
" function_url = 'total_distributors_state?'\n",
" add_state = 'state=' + state\n",
" add_key = '&key=' + key\n",
" full_url = base_url + function_url + add_state + add_key\n",
"\n",
" if verification == True:\n",
" print(full_url)\n",
" total_distributors_state_df = json_normalize(requests.get(full_url).json())\n",
" return total_distributors_state_df\n",
" else:\n",
" print('Problem encountered, not returning data:')\n",
" print('Either verification == False')\n",
" print('Or problem with API encountered, please verify URL and state are correct: ', full_url)\n",
"\n",
"def get_summ_combined_buyer_annual(state, county = '',verification = True, key = 'WaPo'):\n",
" '''(str(two letter abbreviation), str, bool, str) -> pd.df\n",
" Returns summarized annual dosages of pharmacies and practitioners by state and county \n",
"\n",
" >>>get_summ_combined_buyer_annual('OH', 'Summit')\n",
" EXAMPLE OUTPUT\n",
" '''\n",
"\n",
" base_url = 'https://arcos-api.ext.nile.works/v1/'\n",
" function_url = 'combined_buyer_annual?'\n",
" add_state = 'state=' + state\n",
" add_county = '&county=' + county\n",
" add_key = '&key=' + key\n",
" full_url = base_url + function_url + add_state + add_county + add_key\n",
"\n",
" if verification == True:\n",
" print(full_url)\n",
" combined_buyer_annual_df = json_normalize(requests.get(full_url).json())\n",
" return combined_buyer_annual_df\n",
" else:\n",
" print('Problem encountered, not returning data:')\n",
" print('Either verification == False')\n",
" print('Or problem with API encountered, please verify URL, state and county are correct: ', full_url)\n",
" \n",
"def get_summ_combined_buyer_monthly(state, year, county = '',verification = True, key = 'WaPo'):\n",
" '''(str(two letter abbreviation), str, bool, str) -> pd.df\n",
" Returns dosages by pharmacy or practitioner by county, state, and yea \n",
"\n",
" >>>get_summ_combined_buyer_monthly('OH', 'Summit')\n",
" EXAMPLE OUTPUT\n",
" '''\n",
"\n",
" base_url = 'https://arcos-api.ext.nile.works/v1/'\n",
" function_url = 'combined_buyer_monthly?'\n",
" add_state = 'state=' + state\n",
" add_county = '&county=' + county\n",
" add_year = '&year=' + year\n",
" add_key = '&key=' + key\n",
" full_url = base_url + function_url + add_state + add_county + add_year + add_key\n",
"\n",
" if verification == True:\n",
" print(full_url)\n",
" combined_buyer_monthly_df = json_normalize(requests.get(full_url).json())\n",
" return combined_buyer_monthly_df\n",
" else:\n",
" print('Problem encountered, not returning data:')\n",
" print('Either verification == False')\n",
" print('Or problem with API encountered, please verify URL, state and county are correct: ', full_url)\n",
" "
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.9"
}
},
"nbformat": 4,
"nbformat_minor": 4
}
| 46.632727
| 118
| 0.564333
| 1,579
| 12,824
| 4.383787
| 0.082331
| 0.014736
| 0.023115
| 0.011557
| 0.934845
| 0.932101
| 0.925744
| 0.918376
| 0.918376
| 0.918376
| 0
| 0.002013
| 0.264036
| 12,824
| 274
| 119
| 46.80292
| 0.731405
| 0
| 0
| 0.649635
| 0
| 0.032847
| 0.828837
| 0.167109
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0.145985
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
873413c87444f3251dd5321fc6be49af7248457a
| 6,183
|
py
|
Python
|
exponent/models.py
|
AthenaExplorer/xm_s_explorer
|
b8ccd57e7bd493f46c493a967ec22e42171a6091
|
[
"MIT"
] | null | null | null |
exponent/models.py
|
AthenaExplorer/xm_s_explorer
|
b8ccd57e7bd493f46c493a967ec22e42171a6091
|
[
"MIT"
] | null | null | null |
exponent/models.py
|
AthenaExplorer/xm_s_explorer
|
b8ccd57e7bd493f46c493a967ec22e42171a6091
|
[
"MIT"
] | null | null | null |
from django.db import models
class MinerBase(models.Model):
"""
矿工算力-基础数据
"""
miner_no = models.CharField("矿工号", max_length=128)
total_power_v = models.DecimalField('总算力', max_digits=40, decimal_places=0, default=0)
avg_reward_v = models.DecimalField('单T奖励', max_digits=8, decimal_places=4, default=0)
power_increase = models.DecimalField('算力增长', max_digits=40, decimal_places=0, default=0)
create_gas = models.DecimalField('生产成本', max_digits=40, decimal_places=0, default=0)
keep_gas = models.DecimalField('维护成本', max_digits=40, decimal_places=0, default=0)
section_all = models.IntegerField('扇区累计总数', default=0)
section_fault = models.IntegerField('坏扇区数量', default=0)
new_sector = models.IntegerField('新增扇区', default=0)
block_reward = models.DecimalField('单日出块奖励', max_digits=34, decimal_places=0, default=0)
day = models.DateField('日期', db_index=True)
join_date = models.DateField("加入时间")
create_time = models.DateTimeField('创建时间', auto_now_add=True)
objects = models.Manager()
class Meta:
ordering = ["-create_time"]
class MinerIndex(models.Model):
"""
矿工算力-指数,评价指标原始值
"""
miner_type_choices = ((1, "大矿工"), (2, "小矿工")) # 区分为总算力大于10PiB
miner_no = models.CharField("矿工号", max_length=128)
day = models.DateField('日期', db_index=True)
# 具体数值
avg_reward_v = models.DecimalField('单T收益', max_digits=15, decimal_places=10, default=0)
total_power_v = models.DecimalField('总算力', max_digits=40, decimal_places=0, default=0)
day_inc_rate_v = models.FloatField(verbose_name="单日算力增长率")
avg_inc_rate_v = models.FloatField(verbose_name="历史日平均增长率")
create_gas_week_v = models.DecimalField('七日单T生产成本', max_digits=25, decimal_places=0, default=0)
keep_gas_week_v = models.DecimalField('七日单T维护成本', max_digits=25, decimal_places=0, default=0)
section_fault_rate_v = models.DecimalField('七日错误扇区占比', max_digits=15, decimal_places=8, default=0)
power_increment_7day_v = models.DecimalField('七日算力平均增量', max_digits=40, decimal_places=0, default=0)
# 指数
avg_reward_i = models.FloatField('单T收益', null=True)
total_power_i = models.FloatField('总算力', null=True)
day_inc_rate_i = models.FloatField(verbose_name="单日算力增长率", null=True)
avg_inc_rate_i = models.FloatField(verbose_name="历史日平均增长率", null=True)
create_gas_week_i = models.FloatField('七日单T生产成本', null=True)
keep_gas_week_i = models.FloatField('七日单T维护成本', null=True)
section_fault_rate_i = models.FloatField('七日错误扇区占比', null=True)
power_increment_7day_i = models.FloatField('七日算力平均增量', null=True)
synthesize_i = models.DecimalField("综合得分", null=True, max_digits=15, decimal_places=10, )
synthesize_rank = models.IntegerField("综合得分排名", null=True)
miner_type = models.IntegerField("矿工类型", null=True, choices=miner_type_choices)
create_time = models.DateTimeField('创建时间', auto_now_add=True)
objects = models.Manager()
class Meta:
ordering = ["-create_time"]
class CompanyMinerIndex(models.Model):
"""
矿商算力-指数,评价指标原始值
"""
miner_type_choices = ((1, "大矿工"), (2, "小矿工")) # 暂无区分条件
company_name = models.CharField("矿商名称", max_length=128)
company_code = models.CharField("矿商编码,这个编码不会改变", max_length=128)
day = models.DateField('日期', db_index=True)
# 具体数值
avg_reward_v = models.DecimalField('单T收益', max_digits=15, decimal_places=10, default=0)
total_power_v = models.DecimalField('总算力', max_digits=40, decimal_places=0, default=0)
day_inc_rate_v = models.FloatField(verbose_name="单日算力增长率")
avg_inc_rate_v = models.FloatField(verbose_name="历史日平均增长率")
create_gas_week_v = models.DecimalField('七日单T生产成本', max_digits=25, decimal_places=0, default=0)
keep_gas_week_v = models.DecimalField('七日单T维护成本', max_digits=25, decimal_places=0, default=0)
section_fault_rate_v = models.DecimalField('七日错误扇区占比', max_digits=15, decimal_places=8, default=0)
power_increment_7day_v = models.DecimalField('七日算力平均增量', max_digits=40, decimal_places=0, default=0)
# 指数
avg_reward_i = models.FloatField('单T收益', null=True) # 4
total_power_i = models.FloatField('总算力', null=True) # 4
day_inc_rate_i = models.FloatField(verbose_name="单日算力增长率", null=True) # 无
avg_inc_rate_i = models.FloatField(verbose_name="历史日平均增长率", null=True) # 无
create_gas_week_i = models.FloatField('七日单T生产成本', null=True) # 无
keep_gas_week_i = models.FloatField('七日单T维护成本', null=True) # 2
section_fault_rate_i = models.FloatField('七日错误扇区占比', null=True) # 2
power_increment_7day_i = models.FloatField('七日算力平均增量', null=True) # 1
synthesize_i = models.DecimalField("综合得分", null=True, max_digits=15, decimal_places=10, )
synthesize_rank = models.IntegerField("综合得分排名", null=True)
miner_type = models.IntegerField("矿工类型", null=True, choices=miner_type_choices, default=1)
create_time = models.DateTimeField('创建时间', auto_now_add=True)
objects = models.Manager()
class Meta:
ordering = ["-create_time"]
class CompanyBase(models.Model):
"""
矿商算力-基础数据
"""
company_name = models.CharField("矿商名称", max_length=128)
company_code = models.CharField("矿商编码,这个编码不会改变", max_length=128, null=True)
total_power_v = models.DecimalField('总算力', max_digits=40, decimal_places=0, default=0)
avg_reward_v = models.DecimalField('单T奖励', max_digits=12, decimal_places=6, default=0)
power_increase = models.DecimalField('算力增长', max_digits=40, decimal_places=0, default=0)
create_gas = models.DecimalField('生产成本', max_digits=40, decimal_places=0, default=0)
keep_gas = models.DecimalField('维护成本', max_digits=40, decimal_places=0, default=0)
section_all = models.IntegerField('扇区累计总数', default=0)
section_fault = models.IntegerField('坏扇区数量', default=0)
new_sector = models.IntegerField('新增扇区', default=0)
block_reward = models.DecimalField('单日出块奖励', max_digits=34, decimal_places=0, default=0)
day = models.DateField('日期', db_index=True)
join_date = models.DateField("加入时间")
create_time = models.DateTimeField('创建时间', auto_now_add=True)
objects = models.Manager()
class Meta:
ordering = ["-create_time"]
| 50.268293
| 104
| 0.724082
| 843
| 6,183
| 5.052195
| 0.139976
| 0.056351
| 0.059169
| 0.088753
| 0.948814
| 0.948814
| 0.948814
| 0.948814
| 0.916412
| 0.811693
| 0
| 0.027369
| 0.143134
| 6,183
| 122
| 105
| 50.680328
| 0.776519
| 0.016982
| 0
| 0.879121
| 0
| 0
| 0.075949
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.010989
| 0
| 0.956044
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
5e43fd72c6419e0d862824ae1f134e2a6bc4ef79
| 48,752
|
py
|
Python
|
_serverApp/_emailServices.py
|
leandrou-technology-forward/ganimides_api_server
|
8787927e2cf7568a070c1c65294ee76d89177908
|
[
"MIT"
] | null | null | null |
_serverApp/_emailServices.py
|
leandrou-technology-forward/ganimides_api_server
|
8787927e2cf7568a070c1c65294ee76d89177908
|
[
"MIT"
] | 1
|
2021-06-02T00:36:03.000Z
|
2021-06-02T00:36:03.000Z
|
_serverApp/_emailServices.py
|
leandrou-technology-forward/ganimides_api_server
|
8787927e2cf7568a070c1c65294ee76d89177908
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import sys
if not (os.path.dirname(__file__) in sys.path): sys.path.append(os.path.dirname(__file__))
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from mailjet_rest import Client
import _appEnvironment as thisApp
from _utilities import string_translate
from _processServices import set_process_identity_dict, set_process_caller_area,build_process_signature, build_process_call_area
from _debugServices import get_debug_option_as_level,get_debug_files,get_debug_level
from _logProcessServices import log_process_start, log_process_finish, log_process_message, log_process_result,log_process_data, log_process_input, log_process_output,log_process_parameter
from _moduleConfigServices import retrieve_module_configuration
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
module_ProgramName = os.path.splitext(os.path.basename(__file__))[0]
module_id = '{}'.format(module_ProgramName)
module_version = 0.1
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# using SendGrid's Python Library
# https://github.com/sendgrid/sendgrid-python
#import sendgrid
#from sendgrid.helpers.mail import *
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
def get_template(template,application_name=''): #under construction
subject = ''
text = ''
html = ''
return (subject,text,html)
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
def send_email(From='', To='', Cc='', Bcc='', Subject='', text_body='', html_body='', email_template='', data_record={}, attachments=[], application_name='', language='En', caller_area={}):
"""
send_email (wrapper)
"""
_process_name = 'send_email'
_process_entity = 'email'
_process_action = 'send_email'
_process_msgID = f'process:[{_process_name}]'
_process_identity_kwargs = {'type': 'process', 'module': module_id, 'name': _process_name, 'action': _process_action, 'entity': _process_entity, 'msgID': _process_msgID,}
_process_adapters_kwargs = {'dbsession': None}
_process_log_kwargs = {'indent_method': 'AUTO', 'indent_level': None}
_process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs)
_process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs)
_process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files}
_process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs)
_process_call_area = build_process_call_area(_process_signature, caller_area)
log_process_start(_process_msgID,**_process_call_area)
log_process_input('', 'From', From,**_process_call_area)
log_process_input('', 'To', To,**_process_call_area)
log_process_input('', 'Cc', Cc,**_process_call_area)
log_process_input('', 'Bcc', Bcc,**_process_call_area)
log_process_input('', 'Subject', Subject, **_process_call_area)
log_process_input('', 'text_body', text_body, **_process_call_area)
log_process_input('', 'html_body', html_body, **_process_call_area)
log_process_input('', 'email_template', email_template, **_process_call_area)
log_process_input('', 'application_name', application_name, **_process_call_area)
log_process_input('', 'attachments', attachments, **_process_call_area)
log_process_input('', 'caller_area', caller_area, **_process_call_area)
if not From:
From = thisApp.application_configuration.get('mail_sender')
log_process_data('', 'From', From,**_process_call_area)
# if not From:
# From='ganimides@gmail.com'
if not(From):
msg = f'mail sender not defined'
api_result = {'api_status': 'error', 'api_message': msg}
log_process_finish(_process_msgID, api_result, **_process_call_area)
return api_result
if not(To):
msg = f'email recipient not defined'
api_result = {'api_status': 'error', 'api_message': msg}
log_process_finish(_process_msgID, api_result, **_process_call_area)
return api_result
if not(Subject):
msg = f'email Subject not defined'
api_result = {'api_status': 'error', 'api_message': msg}
log_process_finish(_process_msgID, api_result, **_process_call_area)
return api_result
if not(text_body) and not(html_body) and not(email_template):
msg = f'no body or template defined'
api_result = {'api_status': 'error', 'api_message': msg}
log_process_message('', 'warning', msg,**_process_call_area)
else:
if email_template:
(t1, t2, t3) = get_template(email_template,application_name)
if t1 or t2 or t3:
Subject = t1
text_body = t2
html_body = t3
else:
msg = f'email template {email_template} not found'
api_result = {'api_status': 'error', 'api_message': msg}
log_process_finish(_process_msgID, api_result, **_process_call_area)
return api_result
# # Create the body of the message (a plain-text and an HTML version).
# template_Text = "Hi!\nHow are you?\nHere is the link you wanted:\nhttp://www.python.org"
# template_Html = """\
# <html>
# <head></head>
# <body>
# <p>Hi!<br>
# How are you?<br>
# Here is the <a href="http://www.python.org">link</a> you wanted.
# </p>
# </body>
# </html>
# """
if text_body.find('#')>=0:
text_body = string_translate(text_body, data_record)
log_process_data('', 'translated text_body', text_body,**_process_call_area)
if html_body.find('#')>=0:
html_body = string_translate(html_body, data_record)
log_process_data('', 'translated html_body', html_body,**_process_call_area)
if not(text_body) and not(html_body):
msg = f'content build FAILED'
api_result = {'api_status': 'error', 'api_message': msg}
log_process_finish(_process_msgID, api_result, **_process_call_area)
return api_result
if Subject.find('#')>=0:
Subject = string_translate(Subject, data_record)
log_process_data('', 'translated Subject', Subject,**_process_call_area)
MAIL_SERVER_PROVIDER = thisApp.application_configuration.get('MAIL_SERVER_PROVIDER')
MAIL_SERVER = thisApp.application_configuration.get('MAIL_SERVER')
MAIL_PORT = thisApp.application_configuration.get('MAIL_PORT')
MAIL_USE_TLS = thisApp.application_configuration.get('MAIL_USE_TLS')
MAIL_USE_SSL = thisApp.application_configuration.get('MAIL_USE_SSL')
MAIL_USERNAME = thisApp.application_configuration.get('MAIL_USERNAME')
MAIL_PASSWORD = thisApp.application_configuration.get('MAIL_PASSWORD')
MAIL_APIKEY_PUBLIC = thisApp.application_configuration.get('MAIL_APIKEY_PUBLIC')
MAIL_APIKEY_PRIVATE = thisApp.application_configuration.get('MAIL_APIKEY_PRIVATE')
MAIL_SEND_METHOD = thisApp.application_configuration.get('MAIL_SEND_METHOD')
log_process_parameter('', 'config param', 'MAIL_SERVER', MAIL_SERVER, **_process_call_area)
log_process_parameter('', 'config param', 'MAIL_SEND_METHOD', MAIL_SEND_METHOD, **_process_call_area)
log_process_parameter('', 'config param', 'MAIL_PORT', MAIL_PORT, **_process_call_area)
log_process_parameter('', 'config param', 'MAIL_USE_TLS', MAIL_USE_TLS, **_process_call_area)
log_process_parameter('', 'config param', 'MAIL_USE_SSL', MAIL_USE_SSL, **_process_call_area)
log_process_parameter('', 'config param', 'MAIL_USERNAME', MAIL_USERNAME, **_process_call_area)
log_process_parameter('', 'config param', 'MAIL_PASSWORD', MAIL_PASSWORD, **_process_call_area)
log_process_parameter('', 'config param', 'MAIL_APIKEY_PUBLIC', MAIL_APIKEY_PUBLIC, **_process_call_area)
log_process_parameter('', 'config param', 'MAIL_APIKEY_PRIVATE', MAIL_APIKEY_PRIVATE, **_process_call_area)
try:
if MAIL_SERVER_PROVIDER.upper() == 'MAILJET':
if MAIL_SEND_METHOD.upper() == 'SMTP':
send_result=sendEmail_using_SMTP(From, To, Cc, Bcc, Subject, text_body, html_body, attachments, caller_area=_process_call_area)
else:
send_result=sendEmail_thru_mailjet(From, To, Cc, Bcc, Subject, text_body, html_body, attachments, caller_area=_process_call_area)
else:
if MAIL_SERVER_PROVIDER == 'YANDEX':
if MAIL_SEND_METHOD =='SMTP':
send_result=sendEmail_using_SMTP(From, To, Cc, Bcc, Subject, text_body, html_body, attachments, caller_area=_process_call_area)
else:
send_result=sendEmail_thru_sendgrid(From, To, Cc, Bcc, Subject, text_body, html_body, attachments, caller_area=_process_call_area)
else:
send_result=sendEmail_using_SMTP(From, To, Cc, Bcc, Subject, text_body, html_body, attachments, caller_area=_process_call_area)
#send_result=sendEmail_thru_google(From, To, Cc, Bcc, Subject, text_body, html_body,parContentTemplate)
except Exception as error_text:
msg= f'email send failed. system error:{error_text}'
log_process_message('', 'error', msg,**_process_call_area)
api_result = {'api_status': 'error', 'api_message': msg}
log_process_finish(_process_msgID, api_result, **_process_call_area)
return api_result
if send_result.get('api_status')=='success':
msg= f'OK. email send To [{To}] with Subject [[{Subject}]]'
api_result = {'api_status': 'success', 'api_message': msg}
else:
api_result = send_result
log_process_finish(_process_msgID, api_result, **_process_call_area)
return api_result
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
def send_outlook_email(To='', Cc='', Bcc='', Subject='', text_body='', html_body='', email_template='', data_record={}, attachments=[], application_name='', caller_area={}):
_process_name = 'send_outlook_email'
_process_entity = 'email'
_process_action = 'send_email'
_process_msgID = f'process:[{_process_name}]'
_process_identity_kwargs = {'type': 'process', 'module': module_id, 'name': _process_name, 'action': _process_action, 'entity': _process_entity, 'msgID': _process_msgID,}
_process_adapters_kwargs = {'dbsession': None}
_process_log_kwargs = {'indent_method': 'AUTO', 'indent_level': None}
_process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs)
_process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs)
_process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files}
_process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs)
_process_call_area = build_process_call_area(_process_signature, caller_area)
log_process_start(_process_msgID,**_process_call_area)
log_process_input('', 'To', To,**_process_call_area)
log_process_input('', 'Cc', Cc,**_process_call_area)
log_process_input('', 'Bcc', Bcc,**_process_call_area)
log_process_input('', 'Subject', Subject, **_process_call_area)
log_process_input('', 'text_body', text_body, **_process_call_area)
log_process_input('', 'html_body', html_body, **_process_call_area)
log_process_input('', 'email_template', email_template, **_process_call_area)
log_process_input('', 'application_name', application_name, **_process_call_area)
log_process_input('', 'attachments', attachments, **_process_call_area)
log_process_input('', 'caller_area', caller_area, **_process_call_area)
# MAIL_APIKEY_PUBLIC = thisApp.application_configuration.get('MAIL_APIKEY_PUBLIC')
# MAIL_APIKEY_PRIVATE = thisApp.application_configuration.get('MAIL_APIKEY_PRIVATE')
# log_process_parameter('', 'config param', 'MAIL_APIKEY_PUBLIC', MAIL_APIKEY_PUBLIC, **_process_call_area)
# log_process_parameter('', 'config param', 'MAIL_APIKEY_PRIVATE', MAIL_APIKEY_PRIVATE, **_process_call_area)
msg='start sending email thru outlook'
log_process_message('', '', msg,**_process_call_area)
import win32com.client as win32
# if not From:
# From = thisApp.application_configuration.get('mail_sender')
# log_process_data('', 'From', From,**_process_call_area)
# if not(From):
# msg = f'mail sender not defined'
# api_result = {'api_status': 'error', 'api_message': msg}
# log_process_finish(_process_msgID, api_result, **_process_call_area)
# return api_result
if not(To):
msg = f'email recipient not defined'
api_result = {'api_status': 'error', 'api_message': msg}
log_process_finish(_process_msgID, api_result, **_process_call_area)
return api_result
if not(Subject):
msg = f'email Subject not defined'
api_result = {'api_status': 'error', 'api_message': msg}
log_process_finish(_process_msgID, api_result, **_process_call_area)
return api_result
if not(text_body) and not(html_body) and not(email_template):
msg = f'no body or template defined'
api_result = {'api_status': 'error', 'api_message': msg}
log_process_message('', 'warning', msg,**_process_call_area)
else:
if email_template:
(t1, t2, t3) = get_template(email_template,application_name)
if t1 or t2 or t3:
Subject = t1
text_body = t2
html_body = t3
else:
msg = f'email template {email_template} not found'
api_result = {'api_status': 'error', 'api_message': msg}
log_process_finish(_process_msgID, api_result, **_process_call_area)
return api_result
if text_body.find('#')>=0:
text_body = string_translate(text_body, data_record)
log_process_data('', 'translated text_body', text_body,**_process_call_area)
if html_body.find('#')>=0:
html_body = string_translate(html_body, data_record)
log_process_data('', 'translated html_body', html_body,**_process_call_area)
if not(text_body) and not(html_body):
msg = f'content build FAILED'
api_result = {'api_status': 'error', 'api_message': msg}
log_process_finish(_process_msgID, api_result, **_process_call_area)
return api_result
if Subject.find('#')>=0:
Subject = string_translate(Subject, data_record)
log_process_data('', 'translated Subject', Subject,**_process_call_area)
#########
try:
outlook = win32.Dispatch('outlook.application')
mail = outlook.CreateItem(0)
mail.To = To
mail.Subject = Subject
if Cc:
mail.Cc = Cc
if text_body:
mail.Body = text_body
if html_body:
mail.HTMLBody = html_body
# To attach a file To the email (optional):
for ix in range(0, len(attachments)):
attachment_file = attachments[ix]
if attachment_file:
mail.Attachments.Add(attachment_file)
# if attachment1:
# mail.Attachments.Add(attachment1)
# if attachment2:
# mail.Attachments.Add(attachment2)
# if attachment3:
# mail.Attachments.Add(attachment3)
# if attachment4:
# mail.Attachments.Add(attachment4)
# if attachment5:
# mail.Attachments.Add(attachment5)
#mail.Send() or mail.display()
mail.display()
#mail.Send()
msg= f'OK. email send To [{To}] with Subject [[{Subject}]]'
api_result = {'api_status': 'success', 'api_message': msg}
log_process_finish(_process_msgID, api_result, **_process_call_area)
return api_result
except Exception as error_text:
msg= f'sending email thru outlook system error:{error_text}'
log_process_message('', 'error', msg,**_process_call_area)
api_result = {'api_status': 'error', 'api_message': msg}
log_process_finish(_process_msgID, api_result, **_process_call_area)
return api_result
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
def sendEmail_using_SMTP(From, To, Cc, Bcc, Subject, text_body, html_body, attachments=[], caller_area={}):
"""
sendEmail_using_SMTP
"""
_process_name = 'sendEmail_using_SMTP'
_process_entity = 'email'
_process_action = 'send_email'
_process_msgID = f'process:[{_process_name}]'
_process_identity_kwargs = {'type': 'process', 'module': module_id, 'name': _process_name, 'action': _process_action, 'entity': _process_entity, 'msgID': _process_msgID,}
_process_adapters_kwargs = {'dbsession': None}
_process_log_kwargs = {'indent_method': 'AUTO', 'indent_level': None}
_process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs)
_process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs)
_process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files}
_process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs)
_process_call_area = build_process_call_area(_process_signature, caller_area)
log_process_start(_process_msgID,**_process_call_area)
log_process_input('', 'From', From,**_process_call_area)
log_process_input('', 'To', To,**_process_call_area)
log_process_input('', 'Cc', Cc,**_process_call_area)
log_process_input('', 'Bcc', Bcc,**_process_call_area)
log_process_input('', 'Subject', Subject, **_process_call_area)
log_process_input('', 'text_body', text_body, **_process_call_area)
log_process_input('', 'html_body', html_body, **_process_call_area)
log_process_input('', 'attachments', attachments, **_process_call_area)
log_process_input('', 'caller_area', caller_area, **_process_call_area)
MAIL_SERVER = thisApp.application_configuration.get('MAIL_SERVER')
MAIL_PORT = thisApp.application_configuration.get('MAIL_PORT')
MAIL_USERNAME = thisApp.application_configuration.get('MAIL_USERNAME')
MAIL_PASSWORD = thisApp.application_configuration.get('MAIL_PASSWORD')
log_process_parameter('', 'config param', 'MAIL_SERVER', MAIL_SERVER, **_process_call_area)
log_process_parameter('', 'config param', 'MAIL_PORT', MAIL_PORT, **_process_call_area)
log_process_parameter('', 'config param', 'MAIL_USERNAME', MAIL_USERNAME, **_process_call_area)
log_process_parameter('', 'config param', 'MAIL_PASSWORD', MAIL_PASSWORD, **_process_call_area)
try:
email_message = MIME_email_message(From, To, Cc, Bcc, Subject, text_body, html_body, caller_area=_process_call_area)
if not(email_message):
msg= f'can not format email message'
api_result = {'api_status': 'error', 'api_message': msg}
log_process_finish(_process_msgID, api_result, **_process_call_area)
return api_result
except Exception as error_text:
msg= f'can not format email message. system error:{error_text}'
log_process_message('', 'error', msg,**_process_call_area)
api_result = {'api_status': 'error', 'api_message': msg}
log_process_finish(_process_msgID, api_result, **_process_call_area)
return api_result
try:
msg='start sending email using SMTP method'
log_process_message('', '', msg,**_process_call_area)
mail = smtplib.SMTP(MAIL_SERVER, MAIL_PORT)
mail.ehlo()
mail.starttls()
mail.login(MAIL_USERNAME, MAIL_PASSWORD)
#mail.login('scantzochoiros@gmail.com','philea13')
mail.sendmail(From, To, email_message.as_string())
mail.quit()
msg='email sent using SMTP method'
log_process_message('', 'success', msg,**_process_call_area)
except Exception as error_text:
msg= f'sending email system error:{error_text}'
log_process_message('', 'error', msg,**_process_call_area)
api_result = {'api_status': 'error', 'api_message': msg}
log_process_finish(_process_msgID, api_result, **_process_call_area)
return api_result
msg= f'email send To [{To}] with Subject [[{Subject}]]'
api_result = {'api_status': 'success', 'api_message': msg}
log_process_finish(_process_msgID, api_result, **_process_call_area)
return api_result
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
def sendEmail_thru_google(From, To, Cc, Bcc, Subject, text_body, html_body, attachments=[], caller_area={}):
"""
sendEmail_thru_google
"""
_process_name = 'sendEmail_thru_google'
_process_entity = 'email'
_process_action = 'send_email'
_process_msgID = f'process:[{_process_name}]'
_process_identity_kwargs = {'type': 'process', 'module': module_id, 'name': _process_name, 'action': _process_action, 'entity': _process_entity, 'msgID': _process_msgID,}
_process_adapters_kwargs = {'dbsession': None}
_process_log_kwargs = {'indent_method': 'AUTO', 'indent_level': None}
_process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs)
_process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs)
_process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files}
_process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs)
_process_call_area = build_process_call_area(_process_signature, caller_area)
log_process_start(_process_msgID,**_process_call_area)
log_process_input('', 'From', From,**_process_call_area)
log_process_input('', 'To', To,**_process_call_area)
log_process_input('', 'Cc', Cc,**_process_call_area)
log_process_input('', 'Bcc', Bcc,**_process_call_area)
log_process_input('', 'Subject', Subject, **_process_call_area)
log_process_input('', 'text_body', text_body, **_process_call_area)
log_process_input('', 'html_body', html_body, **_process_call_area)
log_process_input('', 'attachments', attachments, **_process_call_area)
log_process_input('', 'caller_area', caller_area, **_process_call_area)
MAIL_SERVER = thisApp.application_configuration.get('MAIL_SERVER')
MAIL_PORT = thisApp.application_configuration.get('MAIL_PORT')
MAIL_USERNAME = thisApp.application_configuration.get('MAIL_USERNAME')
MAIL_PASSWORD = thisApp.application_configuration.get('MAIL_PASSWORD')
log_process_parameter('', 'config param', 'MAIL_SERVER', MAIL_SERVER, **_process_call_area)
log_process_parameter('', 'config param', 'MAIL_PORT', MAIL_PORT, **_process_call_area)
log_process_parameter('', 'config param', 'MAIL_USERNAME', MAIL_USERNAME, **_process_call_area)
log_process_parameter('', 'config param', 'MAIL_PASSWORD', MAIL_PASSWORD, **_process_call_area)
try:
email_message = MIME_email_message(From, To, Cc, Bcc, Subject, text_body, html_body, caller_area=_process_call_area)
if not(email_message):
msg= f'can not format email message'
api_result = {'api_status': 'error', 'api_message': msg}
log_process_finish(_process_msgID, api_result, **_process_call_area)
return api_result
except Exception as error_text:
msg= f'can not format email message. system error:{error_text}'
log_process_message('', 'error', msg,**_process_call_area)
api_result = {'api_status': 'error', 'api_message': msg}
log_process_finish(_process_msgID, api_result, **_process_call_area)
return api_result
try:
msg='start sending email thru google'
log_process_message('', '', msg,**_process_call_area)
mail = smtplib.SMTP(MAIL_SERVER, MAIL_PORT)
mail.ehlo()
mail.starttls()
mail.login(MAIL_USERNAME, MAIL_PASSWORD)
#mail.login('bstarr131@gmail.com', 'bstarr13')
#mail.login('scantzochoiros@gmail.com', 'philea13')
mail.sendmail(From, To, email_message.as_string())
mail.quit()
msg='email sent thru google'
log_process_message('', 'success', msg,**_process_call_area)
except Exception as error_text:
msg= f'sending email system error:{error_text}'
log_process_message('', 'error', msg,**_process_call_area)
api_result = {'api_status': 'error', 'api_message': msg}
log_process_finish(_process_msgID, api_result, **_process_call_area)
return api_result
msg= f'email send To [{To}] with Subject [[{Subject}]]'
api_result = {'api_status': 'success', 'api_message': msg}
log_process_finish(_process_msgID, api_result, **_process_call_area)
return api_result
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
def sendEmail_thru_mailjet(From, To, Cc, Bcc, Subject, text_body, html_body, attachments=[], caller_area={}):
"""
sendEmail_thru_mailjet
"""
_process_name = 'sendEmail_thru_mailjet'
_process_entity = 'email'
_process_action = 'send_email'
_process_msgID = f'process:[{_process_name}]'
_process_identity_kwargs = {'type': 'process', 'module': module_id, 'name': _process_name, 'action': _process_action, 'entity': _process_entity, 'msgID': _process_msgID,}
_process_adapters_kwargs = {'dbsession': None}
_process_log_kwargs = {'indent_method': 'AUTO', 'indent_level': None}
_process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs)
_process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs)
_process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files}
_process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs)
_process_call_area = build_process_call_area(_process_signature, caller_area)
log_process_start(_process_msgID,**_process_call_area)
log_process_input('', 'From', From,**_process_call_area)
log_process_input('', 'To', To,**_process_call_area)
log_process_input('', 'Cc', Cc,**_process_call_area)
log_process_input('', 'Bcc', Bcc,**_process_call_area)
log_process_input('', 'Subject', Subject, **_process_call_area)
log_process_input('', 'text_body', text_body, **_process_call_area)
log_process_input('', 'html_body', html_body, **_process_call_area)
log_process_input('', 'attachments', attachments, **_process_call_area)
log_process_input('', 'caller_area', caller_area, **_process_call_area)
MAIL_SERVER = thisApp.application_configuration.get('MAIL_SERVER')
MAIL_PORT = thisApp.application_configuration.get('MAIL_PORT')
MAIL_USERNAME = thisApp.application_configuration.get('MAIL_USERNAME')
MAIL_PASSWORD = thisApp.application_configuration.get('MAIL_PASSWORD')
log_process_parameter('', 'config param', 'MAIL_SERVER', MAIL_SERVER, **_process_call_area)
log_process_parameter('', 'config param', 'MAIL_PORT', MAIL_PORT, **_process_call_area)
log_process_parameter('', 'config param', 'MAIL_USERNAME', MAIL_USERNAME, **_process_call_area)
log_process_parameter('', 'config param', 'MAIL_PASSWORD', MAIL_PASSWORD, **_process_call_area)
try:
email_message = MIME_email_message(From, To, Cc, Bcc, Subject, text_body, html_body, caller_area=_process_call_area)
if not(email_message):
msg= f'can not format email message'
api_result = {'api_status': 'error', 'api_message': msg}
log_process_finish(_process_msgID, api_result, **_process_call_area)
return api_result
except Exception as error_text:
msg= f'can not format email message. system error:{error_text}'
log_process_message('', 'error', msg,**_process_call_area)
api_result = {'api_status': 'error', 'api_message': msg}
log_process_finish(_process_msgID, api_result, **_process_call_area)
return api_result
try:
msg='start sending email thru mailjet'
log_process_message('', '', msg,**_process_call_area)
mail = smtplib.SMTP(MAIL_SERVER, MAIL_PORT)
mail.ehlo()
mail.starttls()
mail.login(MAIL_USERNAME, MAIL_PASSWORD)
mail.sendmail(From, To, email_message.as_string())
mail.quit()
msg='email sent thru mailjet'
log_process_message('', 'success', msg,**_process_call_area)
except Exception as error_text:
msg= f'sending email system error:{error_text}'
log_process_message('', 'error', msg,**_process_call_area)
api_result = {'api_status': 'error', 'api_message': msg}
log_process_finish(_process_msgID, api_result, **_process_call_area)
return api_result
msg= f'email send To [{To}] with Subject [[{Subject}]]'
api_result = {'api_status': 'success', 'api_message': msg}
log_process_finish(_process_msgID, api_result, **_process_call_area)
return api_result
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
def sendEmail_thru_sendgrid(From, To, Cc, Bcc, Subject, text_body, html_body, attachments=[], caller_area={}):
"""
sendEmail_thru_sendgrid
"""
_process_name = 'sendEmail_thru_sendgrid'
_process_entity = 'email'
_process_action = 'send_email'
_process_msgID = f'process:[{_process_name}]'
_process_identity_kwargs = {'type': 'process', 'module': module_id, 'name': _process_name, 'action': _process_action, 'entity': _process_entity, 'msgID': _process_msgID,}
_process_adapters_kwargs = {'dbsession': None}
_process_log_kwargs = {'indent_method': 'AUTO', 'indent_level': None}
_process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs)
_process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs)
_process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files}
_process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs)
_process_call_area = build_process_call_area(_process_signature, caller_area)
log_process_start(_process_msgID,**_process_call_area)
log_process_input('', 'From', From,**_process_call_area)
log_process_input('', 'To', To,**_process_call_area)
log_process_input('', 'Cc', Cc,**_process_call_area)
log_process_input('', 'Bcc', Bcc,**_process_call_area)
log_process_input('', 'Subject', Subject, **_process_call_area)
log_process_input('', 'text_body', text_body, **_process_call_area)
log_process_input('', 'html_body', html_body, **_process_call_area)
log_process_input('', 'attachments', attachments, **_process_call_area)
log_process_input('', 'caller_area', caller_area, **_process_call_area)
MAIL_APIKEY_PUBLIC = thisApp.application_configuration.get('MAIL_APIKEY_PUBLIC')
MAIL_APIKEY_PRIVATE = thisApp.application_configuration.get('MAIL_APIKEY_PRIVATE')
log_process_parameter('', 'config param', 'MAIL_APIKEY_PUBLIC', MAIL_APIKEY_PUBLIC, **_process_call_area)
log_process_parameter('', 'config param', 'MAIL_APIKEY_PRIVATE', MAIL_APIKEY_PRIVATE, **_process_call_area)
try:
msg='start sending email thru sendgrid'
log_process_message('', '', msg,**_process_call_area)
# mail = smtplib.SMTP(MAIL_SERVER, MAIL_PORT)
# mail.ehlo()
# mail.starttls()
# mail.login(MAIL_USERNAME, MAIL_PASSWORD)
# mail.sendmail(From, To, msg.as_string())
# mail.quit()
#echo "export SENDGRID_API_KEY='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'" > sendgrid.env
#echo "sendgrid.env" >> .gitignore
#source ./sendgrid.env
SENDGRID_API_KEY='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'
sg = sendgrid.SendGridAPIClient(apikey=SENDGRID_API_KEY)
From="noreply@ganimides.com"
from_email = Email(From)
To_email = Email(To)
Subject = Subject
content = Content("text/plain", "and easy To do anywhere, even with Python")
mail = Mail(from_email, Subject, To_email, content)
response = sg.client.mail.send.post(request_body=mail.get())
log_process_data('', 'response.status_code', response.status_code,**_process_call_area)
log_process_data('', 'response.body', response.body,**_process_call_area)
log_process_data('', 'response.headers', response.headers,**_process_call_area)
msg='email sent thru sendgrid'
log_process_message('', 'success', msg,**_process_call_area)
except Exception as error_text:
msg= f'sending email system error:{error_text}'
log_process_message('', 'error', msg,**_process_call_area)
api_result = {'api_status': 'error', 'api_message': msg}
log_process_finish(_process_msgID, api_result, **_process_call_area)
return api_result
msg= f'email send To [{To}] with Subject [[{Subject}]]'
api_result = {'api_status': 'success', 'api_message': msg}
log_process_finish(_process_msgID, api_result, **_process_call_area)
return api_result
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
def sendEmail_thru_mailjet_api(From, To, Cc, Bcc, Subject, text_body, html_body, attachments=[], caller_area={}):
"""
sendEmail_thru_mailjet_api
"""
_process_name = 'sendEmail_thru_mailjet_api'
_process_entity = 'email'
_process_action = 'send_email'
_process_msgID = f'process:[{_process_name}]'
_process_identity_kwargs = {'type': 'process', 'module': module_id, 'name': _process_name, 'action': _process_action, 'entity': _process_entity, 'msgID': _process_msgID,}
_process_adapters_kwargs = {'dbsession': None}
_process_log_kwargs = {'indent_method': 'AUTO', 'indent_level': None}
_process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs)
_process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs)
_process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files}
_process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs)
_process_call_area = build_process_call_area(_process_signature, caller_area)
log_process_start(_process_msgID,**_process_call_area)
log_process_input('', 'From', From,**_process_call_area)
log_process_input('', 'To', To,**_process_call_area)
log_process_input('', 'Cc', Cc,**_process_call_area)
log_process_input('', 'Bcc', Bcc,**_process_call_area)
log_process_input('', 'Subject', Subject, **_process_call_area)
log_process_input('', 'text_body', text_body, **_process_call_area)
log_process_input('', 'html_body', html_body, **_process_call_area)
log_process_input('', 'attachments', attachments, **_process_call_area)
log_process_input('', 'caller_area', caller_area, **_process_call_area)
MAIL_APIKEY_PUBLIC = thisApp.application_configuration.get('MAIL_APIKEY_PUBLIC')
MAIL_APIKEY_PRIVATE = thisApp.application_configuration.get('MAIL_APIKEY_PRIVATE')
log_process_parameter('', 'config param', 'MAIL_APIKEY_PUBLIC', MAIL_APIKEY_PUBLIC, **_process_call_area)
log_process_parameter('', 'config param', 'MAIL_APIKEY_PRIVATE', MAIL_APIKEY_PRIVATE, **_process_call_area)
msg='start sending email thru mailjet_api'
log_process_message('', '', msg,**_process_call_area)
try:
#mailjet = Client(auth=(api_key, api_secret), version='v1.3.0')
mailjet = Client(auth=(MAIL_APIKEY_PUBLIC, MAIL_APIKEY_PRIVATE))
msg=f'mailjet_api CONNECT OK'
log_process_message('', 'success', msg,**_process_call_area)
except Exception as error_text:
msg = f'mailjet_api ERROR api authorization failed: {error_text}'
log_process_message('', 'error', msg,**_process_call_area)
api_result = {'api_status': 'error', 'api_message': msg}
log_process_finish(_process_msgID, api_result, **_process_call_area)
return api_result
data1 = {
'FromEmail': 'your sender email'
,'Subject': 'Hello Mailjet!'
,'Text-Part': 'Welcome Onboard'
,'Recipients': [{'Email': 'recipient email'}]
}
data = {
'Messages': [
{
"From": {
"Email": From,
"Name": "Mailjet Pilot"
},
"To": [
{
"Email": To,
"Name": "passenger"
}
],
"Subject": Subject,
"TemplateLanguage": True,
"TextPart": "Dear {{data:firstname:\"passenger\"}}, welcome To Mailjet! ",
"HTMLPart": "Dear {{data:firstname:\"passenger\"}}, welcome To Mailjet!"
}
]
}
# print(' sendEmail_thru_mailjet_api DATA=',data)
# msg='start sending email thru sendgrid'
log_process_data('', 'email_data', data,**_process_call_area)
try:
result = mailjet.send.create(data=data)
log_process_data('', 'result.status_code', result.status_code,**_process_call_area)
log_process_data('', 'result.json', str(result.json()),**_process_call_area)
msg='email sent thru mailjet_api'
log_process_message('', 'success', msg, **_process_call_area)
except Exception as error_text:
msg = f'send email thru mailjet_api system error: {error_text}'
log_process_message('', 'error', msg,**_process_call_area)
api_result = {'api_status': 'error', 'api_message': msg}
log_process_finish(_process_msgID, api_result, **_process_call_area)
return api_result
msg= f'email send To [{To}] with Subject [[{Subject}]]'
api_result = {'api_status': 'success', 'api_message': msg}
log_process_finish(_process_msgID, api_result, **_process_call_area)
return api_result
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
def MIME_email_message(From, To, Cc, Bcc, Subject, text_body, html_body, caller_area={}):
# Create message container - the correct MIME type is multipart/alternative.
_process_name = 'MIME_email_message'
_process_entity = 'email'
_process_action = 'format_email'
_process_msgID = f'process:[{_process_name}]'
_process_identity_kwargs = {'type': 'process', 'module': module_id, 'name': _process_name, 'action': _process_action, 'entity': _process_entity, 'msgID': _process_msgID,}
_process_adapters_kwargs = {'dbsession': None}
_process_log_kwargs = {'indent_method': 'AUTO', 'indent_level': None}
_process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs)
_process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs)
_process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files}
_process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs)
_process_call_area = build_process_call_area(_process_signature, caller_area)
log_process_start(_process_msgID,**_process_call_area)
log_process_input('', 'From', From,**_process_call_area)
log_process_input('', 'To', To,**_process_call_area)
log_process_input('', 'Cc', Cc,**_process_call_area)
log_process_input('', 'Bcc', Bcc,**_process_call_area)
log_process_input('', 'Subject', Subject, **_process_call_area)
log_process_input('', 'text_body', text_body, **_process_call_area)
log_process_input('', 'html_body', html_body, **_process_call_area)
log_process_input('', 'caller_area', caller_area, **_process_call_area)
MIME_msg = MIMEMultipart('alternative')
MIME_msg['Subject'] = Subject
MIME_msg['From'] = From
MIME_msg['To'] = To
MIME_msg['Cc'] = Cc
MIME_msg['Bcc'] = Bcc
# Attach parts inTo message container.
# According To RFC 2046, the last part of a multipart message, in this case
# the HTML message, is best and preferred.
# Record the MIME types of both parts - text/plain and text/html.
if text_body:
part1 = MIMEText(text_body, 'plain')
MIME_msg.attach(part1)
if html_body:
part2 = MIMEText(html_body, 'html','utf8')
MIME_msg.attach(part2)
msg= f'OK. email formatted according To MIME'
log_process_message('', 'success', msg,**_process_call_area)
api_result = {'api_status': 'success', 'api_message': msg,'api_data':MIME_msg}
api_result = {'api_status': 'success', 'api_message': msg}
log_process_finish(_process_msgID, api_result, **_process_call_area)
return MIME_msg
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
# module initialization
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
module_ProgramName = os.path.splitext(os.path.basename(__file__))[0]
module_id = '{}'.format(module_ProgramName)
module_version = 0.1
module_identityDictionary = {
'module_ProgramName':module_ProgramName,
'module_id':module_id,
'module_version':module_version,
'module_is_externally_configurable':False,
}
module_configuration = {}
module_configuration = retrieve_module_configuration(__file__, module_identityDictionary, module_configuration, print_enabled=thisApp.DEBUG_ON, filelog_enabled=thisApp.FILELOG_ON, handle_as_init=False)
msg = f'module [{module_id}] [[version {module_version}]] loaded.'
if thisApp.get_module_debug_level(module_id):
print_message(msg)
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#(print_enabled, filelog_enabled, log_file, errors_file,consolelog_enabled)=get_globals_from_configuration(module_configuration)
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#module_configuration = add_methods_To_configuration('database_actions', module_configuration, leandroutechnologyforward_database_session_class, ['ALL'], ['_init_'])
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
# methods == collect_method_names_from_class(leandroutechnologyforward_database_session_class, methods_ids=['ALL'])
# print(methods)
# exit(0)
# module_configuration = add_apis_To_configuration('database_actions', module_configuration, thisModuleObj, functions_ids, exclude_functions_ids)
#save_module_configuration(module_identityDictionary, module_configuration, print_enabled=consolelog_enabled, filelog_enabled=filelog_enabled)
#thisApp.pair_module_configuration('database_actions',module_configuration)
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
# main
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
if __name__ == '__main__':
#tests/research
print(__file__)
client = {'name': 'PHILIPPOS', 'mobile': '+35799359864'}
print(string_translate('hello #NAME#, Today is #TODAY#', client))
print(send_email(From='noreply@leandrou.com', To='philippos.leandrou@gmail.com', Subject='#NAME# test from gani', text_body='hello #NAME#, Today is #TODAY#', data_record=client, caller_area={'debug_level': 99}))
print(send_email(From='noreply@leandrou.com',To='philippos.leandrou@gmail.com', Subject='hi #MOBILE#, this is a test from gani', text_body='hello #NAME#, Today is #TODAY#', data_record=client, caller_area={'debug_level': 0}))
print(send_outlook_email(To='philippos.leandrou@gmail.com', Subject='#NAME# test from gani', text_body='hello #NAME#, Today is #TODAY#', data_record=client, caller_area={'debug_level': 99}))
| 55.526196
| 229
| 0.632036
| 5,368
| 48,752
| 5.245343
| 0.060171
| 0.080477
| 0.109742
| 0.062009
| 0.835671
| 0.826295
| 0.811095
| 0.800618
| 0.786838
| 0.78627
| 0
| 0.002649
| 0.163706
| 48,752
| 877
| 230
| 55.58951
| 0.687964
| 0.169019
| 0
| 0.7456
| 0
| 0
| 0.170393
| 0.012508
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0144
| false
| 0.0224
| 0.0208
| 0
| 0.0864
| 0.0112
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5e7bea062c2fb5bec261c62665d8cdc42e6a20ce
| 20,979
|
py
|
Python
|
wallet/views.py
|
reeshabhkumarranjan/SocPay
|
ba3f3ea0b7b814e1ca40293b14f192b6d40adbbd
|
[
"MIT"
] | null | null | null |
wallet/views.py
|
reeshabhkumarranjan/SocPay
|
ba3f3ea0b7b814e1ca40293b14f192b6d40adbbd
|
[
"MIT"
] | null | null | null |
wallet/views.py
|
reeshabhkumarranjan/SocPay
|
ba3f3ea0b7b814e1ca40293b14f192b6d40adbbd
|
[
"MIT"
] | null | null | null |
import pyotp
from django.core.exceptions import PermissionDenied
from django.core.mail import send_mail
from django.db.models import Q
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
# Create your views here.
from django.urls import reverse
from main_app import utils
from main_app.utils import get_friends, are_friend
from users.models import CustomUser
# from wallet.forms import transaction_form
from wallet.models import Transaction
from datetime import datetime
from .utils import getOTP
import django
def wallet_home(request):
if not request.user.is_authenticated:
raise PermissionDenied
user1 = request.user
d = {'name': user1.username, 'bal': user1.user_balance, 'trans': user1.user_no_of_transactions}
return render(request, 'wallet.html', context=d)
def transactions_to_be_accepted(request):
if not request.user.is_authenticated:
raise PermissionDenied
user1 = request.user
# print('I AM HERE')
trans_list = []
trans_list = Transaction.objects.filter(transaction_accepted=False) & Transaction.objects.filter(
transaction_user_2=user1)
d = {}
d['transactions'] = trans_list
return render(request, 'transactions_list.html', context=d)
def transactions_completed(request):
if not request.user.is_authenticated:
raise PermissionDenied
user1 = request.user
# print('I AM HERE')
trans_list = []
trans_list = Transaction.objects.filter(transaction_accepted=True) & (
Transaction.objects.filter(transaction_user_2=user1) | Transaction.objects.filter(
transaction_user_1=user1))
d = {}
d['trans_list'] = trans_list
return render(request, 'transactions_completed.html', context=d)
def transactions_pending(request):
if not request.user.is_authenticated:
raise PermissionDenied
raise PermissionDenied
user1 = request.user
# print('I AM HERE')
trans_list = []
trans_list = Transaction.objects.filter(transaction_accepted=False) & Transaction.objects.filter(
transaction_user_1=user1)
d = {}
d['trans_list'] = trans_list
return render(request, 'transactions_pending.html', context=d)
def transfer(request):
if not request.user.is_authenticated:
raise PermissionDenied
if request.method == 'POST':
if (request.user.user_ongoing_transaction):
django.contrib.auth.logout(request)
return HttpResponseRedirect(reverse('logout'))
request.user.user_ongoing_transaction = True
# request.user.user_ongoing_transaction = False
request.user.save()
user2_username = request.POST.get("username", "null")
user2 = CustomUser.objects.get(username=user2_username)
amount = 0
try:
amount = int(request.POST.get("amount", "null"))
except:
message = 'Please enter valid input.'
d = {}
d['message'] = message
request.user.user_ongoing_transaction = False
request.user.save()
return render(request, 'display_message_1.html', context=d)
if(user2.username=='admin'):
message = 'You Cannot Send Money To Admin'
d = {}
d['message'] = message
request.user.user_ongoing_transaction = False
request.user.save()
return render(request, 'display_message_1.html', context=d)
# return HttpResponse('''<h1>You Cannot Send Money To Admin<br><a href="wallet_home">GO BACK</a>''')
user1 = request.user
# print(request.user.user_last_transaction)
# print((datetime.now() - timecheck).seconds)
am = amount
if (am <= 0):
message = 'Positive value required'
d = {}
d['message'] = message
request.user.user_ongoing_transaction = False
request.user.save()
return render(request, 'display_message_1.html', context=d)
# return HttpResponse('''<h1>Positive value required<br><a href="wallet_home">GO BACK</a>''')
if user1.user_type != 5 and not are_friend(user1, user2):
return utils.raise_exception(request, "Become a commercial user to send money to strangers.")
if (user1.username == user2.username):
message = 'You cannot transfer money to yourself'
d = {}
d['message'] = message
request.user.user_ongoing_transaction = False
request.user.save()
return render(request, 'display_message_1.html', context=d)
# return HttpResponse(
# "<h1>You cannot transfer money to yourself<br><a href='wallet_home'>GO BACK</a>")
if user1.user_no_of_transactions + 1 > user1.user_no_of_transactions_allowed: # MAX LIMIT ----> CHANGE
message = 'You have reached max. transaction limit'
d = {}
d['message'] = message
request.user.user_ongoing_transaction = False
request.user.save()
return render(request, 'display_message_1.html', context=d)
# return HttpResponse(
# "<h1>You have reached max. transaction limit<br><a href='wallet_home'>GO BACK</a>")
if (am > user1.user_balance):
message = 'Insufficient Balance to transfer entered amount'
d = {}
d['message'] = message
request.user.user_ongoing_transaction = False
request.user.save()
return render(request, 'display_message_1.html', context=d)
# return HttpResponse(
# "<h1>Insufficient Balance to transfer entered amount<br><a href='wallet_home'>GO BACK</a>")
timecheck = datetime.strptime(user1.user_last_transaction_for_begin, "%d-%b-%Y (%H:%M:%S.%f)")
if ((datetime.now() - timecheck).seconds < 80):
message = 'Try after 80 seconds'
d = {}
d['message'] = message
request.user.user_ongoing_transaction = False
request.user.save()
return render(request, 'display_message_1.html', context=d)
# return HttpResponse("<h1>Try after 80 seconds<br><a href='wallet_home'>GO BACK</a>")
# user1.user_last_transaction_for_begin = datetime.now().strftime("%d-%b-%Y (%H:%M:%S.%f)")
# user1.save()
# totp = pyotp.TOTP('base32secret3232')
curr_otp = getOTP()
# request.session['date_time'] = str(datetime.datet)
# print(curr_otp)
# print(curr_otp)
send_mail('SocPay | NoReply', 'Your OTP is : ' + str(curr_otp), 'accounts@socpay.in', [user1.email], fail_silently=False)
user1.user_last_transaction_for_begin = datetime.now().strftime("%d-%b-%Y (%H:%M:%S.%f)")
user1.save()
request.session['user1'] = user1.username
request.session['user2'] = user2.username
request.session['am'] = str(am)
request.session['curr_otp'] = str(curr_otp)
request.session['time'] = datetime.now().strftime("%d-%b-%Y (%H:%M:%S.%f)")
return render(request, 'otp_tranfer.html')
# return HttpResponseRedirect('/thanks/')
else:
all_friends = get_friends(request.user)
if (request.user.user_ongoing_transaction):
django.contrib.auth.logout(request)
return HttpResponseRedirect(reverse('logout'))
if request.user.user_type == 5:
all_friends = CustomUser.objects.filter(~Q(username="admin")) & CustomUser.objects.filter(~Q(username=request.user.username))
context = {'all_friends':all_friends}
return render(request, 'transfer_money.html', context=context)
# form = transaction_form(request.user)
# print(form)
# u2 = 0
# am = 0
#
# form =
# try:
# u2 = str(request.GET.get('to'))
# am = int(request.GET.get('amount'))
# except:
# return HttpResponse("<h1>Please enter valid values<br><a href='http://google.com'>GO BACK</a>")
def make_changes(request):
if not request.user.is_authenticated:
raise PermissionDenied
# print(request.session['user1'], request.session['user2'], request.session['am'], request.session['curr_otp'])
timenow = datetime.now()
timethen = datetime.strptime(request.session['time'],"%d-%b-%Y (%H:%M:%S.%f)")
if((timenow - timethen).seconds > 60):
message = 'OTP Timeout'
d = {}
d['message'] = message
return render(request, 'display_message_1.html', context=d)
# return HttpResponse("<h1>OTP Timeout<br><a href='wallet_home'>GO BACK</a>")
user1 = CustomUser.objects.get(username=request.session['user1'])
timecheck = datetime.strptime(user1.user_last_transaction_for_otp, "%d-%b-%Y (%H:%M:%S.%f)")
if ((datetime.now() - timecheck).seconds < 80):
message = 'Please try after 80 seconds.'
d = {}
d['message'] = message
return render(request, 'display_message_1.html', context=d)
# return HttpResponse("<h1>Please try after 80 seconds.<br><a href='wallet_home'>GO BACK</a>")
# timecheck = datetime.strptime(user1.user_last_transaction,"%d-%b-%Y (%H:%M:%S.%f)")
# if((datetime.now() - timecheck).seconds < 76):
# return HttpResponse("<h1>Something Went Wrong<br><a href='http://google.com'>GO BACK</a>")
user2 = CustomUser.objects.get(username=request.session['user2'])
am = int(request.session['am'])
curr_otp = request.session['curr_otp']
otp1 = str(request.POST.get('otp'))
# print(otp1,curr_otp)
try:
y = int(otp1)
except:
message = 'OTP invalid'
d = {}
d['message'] = message
return render(request, 'display_message_1.html', context=d)
# return HttpResponse("<h1>OTP invalid<br><a href='wallet_home'>GO BACK</a>")
if (int(otp1) != int(curr_otp)):
# print(otp1, curr_otp)
message = 'OTP does not match'
d = {}
d['message'] = message
return render(request, 'display_message_1.html', context=d)
# return HttpResponse("<h1>OTP does not match<br><a href='wallet_home'>GO BACK</a>")
# user1 = 0
# user2 = 0
user1.user_balance -= am;
# user2.user_balance += am;
user1.user_no_of_transactions += 1;
dt = datetime.now()
Transaction.objects.create(transaction_user_1=user1, transaction_user_2=user2, transaction_amount=am,
transaction_date=dt, transaction_time=dt, transaction_accepted=False)
# tempS = "from : "+str(user1.username)+" "+"to : "+str(user2.username)+" "+"amount : "+str(am)+" "+"date & time : "+str(dt)
# user1.user_transactions_list+=tempS+'\n'
# user2.user_transactions_list+=tempS+'\n'
user1.user_last_transaction_for_otp = datetime.now().strftime("%d-%b-%Y (%H:%M:%S.%f)")
user1.user_ongoing_transaction = False
user1.save()
user2.save()
message = 'Money Requested Successfully'
d = {}
d['message'] = message
return render(request, 'display_message_1.html', context=d)
# return HttpResponse("<h1>Money Requested Successfully<br><a href='wallet_home'>GO BACK</a>")
def add_money(request):
if not request.user.is_authenticated:
raise PermissionDenied
return render(request, 'add_money.html')
def add_money_work(request):
if not request.user.is_authenticated:
raise PermissionDenied
if (request.user.user_ongoing_transaction):
django.contrib.auth.logout(request)
return HttpResponseRedirect(reverse('logout'))
request.user.user_ongoing_transaction = True
# request.user.user_ongoing_transaction = False
request.user.save()
user1 = request.user
amount = 0
try:
amount = float(request.POST.get('amount'))
amount = int(amount)
except:
message = 'Enter Valid Value'
request.user.user_ongoing_transaction = False
request.user.save()
d = {}
d['message'] = message
return render(request, 'display_message_1.html', context=d)
# return HttpResponse('''<h1>Value >=1 Required<br><a href="wallet_home">GO BACK</a>''')
if (amount <= 0):
message = 'Value >1 Required'
request.user.user_ongoing_transaction = False
request.user.save()
d = {}
d['message'] = message
return render(request, 'display_message_1.html', context=d)
# return HttpResponse('''<h1>Value >1 Required<br><a href="wallet_home">GO BACK</a>''')
if user1.user_no_of_transactions + 1 > user1.user_no_of_transactions_allowed: # MAX LIMIT ----> CHANGE
message = 'You have reached max. transaction limit'
d = {}
request.user.user_ongoing_transaction = False
request.user.save()
d['message'] = message
return render(request, 'display_message_1.html', context=d)
# return HttpResponse(
# "<h1>You have reached max. transaction limit<br><a href='wallet_home'>GO BACK</a>")
timecheck = datetime.strptime(user1.user_last_transaction_for_begin, "%d-%b-%Y (%H:%M:%S.%f)")
if ((datetime.now() - timecheck).seconds < 80):
message = 'Please try after 80 seconds'
d = {}
request.user.user_ongoing_transaction = False
request.user.save()
d['message'] = message
return render(request, 'display_message_1.html', context=d)
# return HttpResponse("<h1>Please try after 80 seconds<br><a href='wallet_home'>GO BACK</a>")
# user1 = request.user
# user1.user_balance += amount
# user1.save()
# totp = pyotp.TOTP('base32secret3232')
curr_otp = getOTP()
# request.session['date_time'] = str(datetime.datet)
# print(curr_otp)
# print(curr_otp)
send_mail('SocPay | NoReply', 'Your OTP is : ' + str(curr_otp), 'accounts@socpay.in', [user1.email],
fail_silently=False)
user1.user_last_transaction_for_begin = datetime.now().strftime("%d-%b-%Y (%H:%M:%S.%f)")
user1.save()
request.session['user1_add'] = user1.username
request.session['user2_add'] = 'admin'
request.session['am_add'] = str(amount)
request.session['curr_otp_add'] = str(curr_otp)
request.session['time_add'] = datetime.now().strftime("%d-%b-%Y (%H:%M:%S.%f)")
return render(request, 'otp_add_money.html')
# return HttpResponse("<h1>Money Transeferred Successfully<br><a href='wallet_home'>GO BACK</a>")
def add_money_after_otp(request):
if not request.user.is_authenticated:
raise PermissionDenied
# print(request.session['user1'], request.session['user2'], request.session['am'], request.session['curr_otp'])
timenow = datetime.now()
timethen = datetime.strptime(request.session['time_add'],"%d-%b-%Y (%H:%M:%S.%f)")
if((timenow - timethen).seconds > 60):
message = 'OTP Timeout'
d = {}
request.user.user_ongoing_transaction = False
request.user.save()
d['message'] = message
return render(request, 'display_message_1.html', context=d)
# return HttpResponse("<h1>OTP Timeout<br><a href='wallet_home'>GO BACK</a>")
user1 = CustomUser.objects.get(username=request.session['user1_add'])
timecheck = datetime.strptime(user1.user_last_transaction_for_otp, "%d-%b-%Y (%H:%M:%S.%f)")
if ((datetime.now() - timecheck).seconds < 80):
message = 'Please try after 80 seconds.'
d = {}
request.user.user_ongoing_transaction = False
request.user.save()
d['message'] = message
return render(request, 'display_message_1.html', context=d)
# return HttpResponse("<h1>Please try after 80 seconds.<br><a href='wallet_home'>GO BACK</a>")
user2 = CustomUser.objects.get(username=request.session['user2_add'])
am = int(request.session['am_add'])
curr_otp = request.session['curr_otp_add']
otp1 = str(request.POST.get('otp'))
# print(otp1,curr_otp)
try:
y = int(otp1)
except:
message = 'OTP Invalid'
d = {}
request.user.user_ongoing_transaction = False
request.user.save()
d['message'] = message
return render(request, 'display_message_1.html', context=d)
# return HttpResponse("<h1>OTP Invalid<br><a href='wallet_home'>GO BACK</a>")
if (int(otp1) != int(curr_otp)):
message = 'OTP does not match'
d = {}
request.user.user_ongoing_transaction = False
request.user.save()
d['message'] = message
return render(request, 'display_message_1.html', context=d)
# print(otp1, curr_otp)
# return HttpResponse("<h1>OTP does not match<br><a href='wallet_home'>GO BACK</a>")
# user1.user_balance += am;
# user2.user_balance += am;
# user1.user_no_of_transactions += 1;
dt = datetime.now()
Transaction.objects.create(transaction_user_1=user1, transaction_user_2=user2, transaction_amount=am,
transaction_date=dt, transaction_time=dt, transaction_accepted=False, transaction_money_add=True)
# tempS = "from : "+str(user1.username)+" "+"to : "+str(user2.username)+" "+"amount : "+str(am)+" "+"date & time : "+str(dt)
# user1.user_transactions_list+=tempS+'\n'
# user2.user_transactions_list+=tempS+'\n'
user1.user_last_transaction_for_otp = datetime.now().strftime("%d-%b-%Y (%H:%M:%S.%f)")
user1.user_ongoing_transaction = False
user1.save()
user2.save()
message = 'Money Will be Added Shortly'
d = {}
request.user.user_ongoing_transaction = False
request.user.save()
d['message'] = message
return render(request, 'display_message_1.html', context=d)
# return HttpResponse("<h1>Money Will be Added Shortly<br><a href='wallet_home'>GO BACK</a>")
def transaction_accept(request):
if not request.user.is_authenticated:
raise PermissionDenied
id = -1
try:
id = int(request.POST.get('transaction_id'))
except:
message = '404 not found'
d = {}
request.user.user_ongoing_transaction = False
request.user.save()
d['message'] = message
return render(request, 'display_message_1.html', context=d)
# return HttpResponse("<h1>404 not found<br><a href='wallet_home'>GO BACK</a>")
if(request.user.username == 'admin'):
transaction_now = Transaction.objects.get(pk=id)
transaction_now.transaction_accepted = True
transaction_now.save()
sender = CustomUser.objects.get(username=transaction_now.transaction_user_1.username)
sender.user_balance += transaction_now.transaction_amount
sender.save()
return HttpResponseRedirect('transactions_to_be_accepted')
transaction_now = Transaction.objects.get(pk=id)
transaction_now.transaction_accepted = True
# Transaction.objects.filter(pk=id).update(transaction_accept=)
sender = CustomUser.objects.get(username=transaction_now.transaction_user_1.username)
receiver = CustomUser.objects.get(username=transaction_now.transaction_user_2.username)
receiver.user_balance += transaction_now.transaction_amount
transaction_now.save()
sender.save()
receiver.save()
# return transactions(request)
return HttpResponseRedirect('transactions_to_be_accepted')
def transaction_decline(request):
if not request.user.is_authenticated:
raise PermissionDenied
id = -1
try:
id = int(request.POST.get('transaction_id'))
except:
message = '404 not found'
d = {}
request.user.user_ongoing_transaction = False
request.user.save()
d['message'] = message
return render(request, 'display_message_1.html', context=d)
# return HttpResponse("<h1>404 not found<br><a href='wallet_home'>GO BACK</a>")
if (request.user.username == 'admin'):
transaction_now = Transaction.objects.get(pk=id)
transaction_now.transaction_accepted = False
transaction_now.delete()
return HttpResponseRedirect('transactions_to_be_accepted')
transaction_now = Transaction.objects.get(id=id)
transaction_now.transaction_accepted = False
sender = CustomUser.objects.get(username=transaction_now.transaction_user_1.username)
receiver = CustomUser.objects.get(username=transaction_now.transaction_user_2.username)
sender.user_balance += transaction_now.transaction_amount
sender.user_no_of_transactions -= 1
transaction_now.delete()
sender.save()
receiver.save()
return HttpResponseRedirect('transactions_to_be_accepted')
def transfer_money(request):
if not request.user.is_authenticated:
raise PermissionDenied
all_users = CustomUser.objects.all() # TODO fix database query
context = {'all_users': all_users}
return render(request, 'transfer_money.html', context=context)
| 37.130973
| 137
| 0.646313
| 2,564
| 20,979
| 5.133385
| 0.079953
| 0.059337
| 0.046194
| 0.041787
| 0.841589
| 0.805349
| 0.772223
| 0.759687
| 0.746923
| 0.728461
| 0
| 0.014503
| 0.221078
| 20,979
| 564
| 138
| 37.196809
| 0.790955
| 0.206111
| 0
| 0.711111
| 0
| 0
| 0.133104
| 0.041531
| 0
| 0
| 0
| 0.001773
| 0
| 1
| 0.033333
| false
| 0
| 0.038889
| 0
| 0.183333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2176aef736cf2571fd527ee2b148141cfc959151
| 640
|
py
|
Python
|
test/model/pretrained/test_tensorfloworg.py
|
fferflo/tfcv
|
b549f733f3f04395e0ed0d4527e30b83fa2d8ad9
|
[
"MIT"
] | null | null | null |
test/model/pretrained/test_tensorfloworg.py
|
fferflo/tfcv
|
b549f733f3f04395e0ed0d4527e30b83fa2d8ad9
|
[
"MIT"
] | null | null | null |
test/model/pretrained/test_tensorfloworg.py
|
fferflo/tfcv
|
b549f733f3f04395e0ed0d4527e30b83fa2d8ad9
|
[
"MIT"
] | null | null | null |
import tfcv
def test_resnet_v1b_50_imagenet():
tfcv.model.pretrained.tensorfloworg.resnet_v1b_50_imagenet.create(dilate=False)
tfcv.model.pretrained.tensorfloworg.resnet_v1b_50_imagenet.create(dilate=True)
def test_resnet_v1b_101_imagenet():
tfcv.model.pretrained.tensorfloworg.resnet_v1b_101_imagenet.create(dilate=False)
tfcv.model.pretrained.tensorfloworg.resnet_v1b_101_imagenet.create(dilate=True)
def test_resnet_v1b_152_imagenet():
tfcv.model.pretrained.tensorfloworg.resnet_v1b_152_imagenet.create(dilate=False)
tfcv.model.pretrained.tensorfloworg.resnet_v1b_152_imagenet.create(dilate=True)
| 45.714286
| 85
| 0.826563
| 86
| 640
| 5.802326
| 0.197674
| 0.162325
| 0.228457
| 0.38477
| 0.923848
| 0.923848
| 0.923848
| 0.875752
| 0.795591
| 0.795591
| 0
| 0.056314
| 0.084375
| 640
| 13
| 86
| 49.230769
| 0.795222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.3
| true
| 0
| 0.1
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
21c92e5cc387ca39b7ad9f5cb507778b1aa009d2
| 36,307
|
py
|
Python
|
backend/tracim_backend/tests/library/test_user_api.py
|
lezardrouge/tracim
|
713ff6066767554333e7e0b1de608ec1a7e4229c
|
[
"MIT"
] | null | null | null |
backend/tracim_backend/tests/library/test_user_api.py
|
lezardrouge/tracim
|
713ff6066767554333e7e0b1de608ec1a7e4229c
|
[
"MIT"
] | null | null | null |
backend/tracim_backend/tests/library/test_user_api.py
|
lezardrouge/tracim
|
713ff6066767554333e7e0b1de608ec1a7e4229c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import pytest
import transaction
from tracim_backend.exceptions import AuthenticationFailed
from tracim_backend.exceptions import EmailValidationFailed
from tracim_backend.exceptions import ExternalAuthUserEmailModificationDisallowed
from tracim_backend.exceptions import ExternalAuthUserPasswordModificationDisallowed
from tracim_backend.exceptions import MissingLDAPConnector
from tracim_backend.exceptions import TooShortAutocompleteString
from tracim_backend.exceptions import TracimValidationFailed
from tracim_backend.exceptions import UserAuthTypeDisabled
from tracim_backend.exceptions import UserDoesNotExist
from tracim_backend.lib.core.group import GroupApi
from tracim_backend.lib.core.user import UserApi
from tracim_backend.lib.core.userworkspace import RoleApi
from tracim_backend.lib.core.workspace import WorkspaceApi
from tracim_backend.models.auth import AuthType
from tracim_backend.models.auth import User
from tracim_backend.models.context_models import UserInContext
from tracim_backend.models.data import UserRoleInWorkspace
from tracim_backend.tests import DefaultTest
from tracim_backend.tests import eq_
class TestUserApi(DefaultTest):
def test_unit__create_minimal_user__ok__nominal_case(self):
api = UserApi(current_user=None, session=self.session, config=self.app_config)
u = api.create_minimal_user("bob@bob")
assert u.email == "bob@bob"
assert u.display_name == "bob"
@pytest.mark.internal_auth
def test_unit__create_minimal_user_and_update__ok__nominal_case(self):
api = UserApi(current_user=None, session=self.session, config=self.app_config)
u = api.create_minimal_user("bob@bob")
api.update(u, "bob", "bob@bob", "password", do_save=True)
nu = api.get_one_by_email("bob@bob")
assert nu is not None
assert nu.email == "bob@bob"
assert nu.display_name == "bob"
assert nu.validate_password("password")
def test_unit__create_minimal_user__err__too_short_email(self):
api = UserApi(current_user=None, session=self.session, config=self.app_config)
with pytest.raises(TracimValidationFailed):
api.create_minimal_user("b@")
def test_unit__create_minimal_user__err__too_long_email(self):
api = UserApi(current_user=None, session=self.session, config=self.app_config)
with pytest.raises(TracimValidationFailed):
email = "b{}b@bob".format("o" * 255)
api.create_minimal_user(email)
# email
def test_unit__update_user_email__ok__nominal_case(self):
api = UserApi(current_user=None, session=self.session, config=self.app_config)
u = api.create_minimal_user("bob@bob")
assert u.email == "bob@bob"
u = api.update(user=u, email="bib@bib")
assert u.email == "bib@bib"
def test_unit__update_user_email__err__wrong_format(self):
api = UserApi(current_user=None, session=self.session, config=self.app_config)
u = api.create_minimal_user("bob@bob")
# 2 char
with pytest.raises(EmailValidationFailed):
api.update(user=u, email="b+b")
def test_unit__update_user_email__err__too_short_email(self):
api = UserApi(current_user=None, session=self.session, config=self.app_config)
u = api.create_minimal_user("bob@bob")
# 2 char
with pytest.raises(TracimValidationFailed):
u = api.update(user=u, email="b@")
# 3 char
u = api.update(user=u, email="b@b")
assert u.email == "b@b"
def test_unit__update_user_email__err__too_long_email(self):
api = UserApi(current_user=None, session=self.session, config=self.app_config)
u = api.create_minimal_user("bob@bob")
# 256 char
chars = "o" * (256 - 6)
with pytest.raises(TracimValidationFailed):
email = "b{}b@bob".format(chars)
u = api.update(user=u, email=email)
# 255 char
chars = "o" * (255 - 6)
email = "b{}b@bob".format(chars)
u = api.update(user=u, email=email)
assert u.email == email
# password
def test_unit__update_user_password__ok__nominal_case(self):
api = UserApi(current_user=None, session=self.session, config=self.app_config)
u = api.create_minimal_user("bob@bob")
assert u.password is None
# 8 char
u = api.update(user=u, password="password")
assert u.password
assert u.validate_password("password")
# 16 char
u = api.update(user=u, password="password" * 2)
assert u.password
assert u.validate_password("password" * 2)
def test_unit__update_user_password__err__too_short_password(self):
api = UserApi(current_user=None, session=self.session, config=self.app_config)
u = api.create_minimal_user("bob@bob")
# 5 char
with pytest.raises(TracimValidationFailed):
api.update(user=u, password="passw")
# 6 char
api.update(user=u, password="passwo")
def test_unit__update_user_password__err__too_long_password(self):
api = UserApi(current_user=None, session=self.session, config=self.app_config)
u = api.create_minimal_user("bob@bob")
with pytest.raises(TracimValidationFailed):
password = "p" * 513
u = api.update(user=u, password=password)
password = "p" * 512
api.update(user=u, password=password)
# public_name
def test_unit__update_user_public_name__ok__nominal_case(self):
api = UserApi(current_user=None, session=self.session, config=self.app_config)
u = api.create_minimal_user("bob@bob")
assert u.display_name == "bob"
# 8 char
u = api.update(user=u, name="John Doe")
assert u.display_name == "John Doe"
# 16 char
u = api.update(user=u, name="John Doe" * 2)
assert u.display_name == "John Doe" * 2
def test_unit__update_user_public_name__err__too_short_public_name(self):
api = UserApi(current_user=None, session=self.session, config=self.app_config)
u = api.create_minimal_user("bob@bob")
# 2 char
with pytest.raises(TracimValidationFailed):
u = api.update(user=u, name="nn")
# 3 char
u = api.update(user=u, name="nnn")
assert u.display_name == "nnn"
def test_unit__update_user_public_name__err__too_long_password(self):
api = UserApi(current_user=None, session=self.session, config=self.app_config)
u = api.create_minimal_user("bob@bob")
with pytest.raises(TracimValidationFailed):
name = "n" * 256
u = api.update(user=u, name=name)
name = "n" * 255
api.update(user=u, name=name)
# lang
def test_unit__update_user_lang_name__ok__nominal_case(self):
api = UserApi(current_user=None, session=self.session, config=self.app_config)
u = api.create_minimal_user("bob@bob")
assert u.lang is None
# 2 char
u = api.update(user=u, lang="fr")
assert u.lang == "fr"
# 3 char
u = api.update(user=u, lang="fre")
assert u.lang == "fre"
def test_unit__update_user_lang__err__too_short_lang(self):
api = UserApi(current_user=None, session=self.session, config=self.app_config)
u = api.create_minimal_user("bob@bob")
# 1 char
with pytest.raises(TracimValidationFailed):
u = api.update(user=u, lang="f")
# 2 char
u = api.update(user=u, lang="fr")
assert u.lang == "fr"
def test_unit__update_user_lang__err__too_long_lang(self):
api = UserApi(current_user=None, session=self.session, config=self.app_config)
u = api.create_minimal_user("bob@bob")
with pytest.raises(TracimValidationFailed):
lang = "n" * 4
u = api.update(user=u, lang=lang)
lang = "n" * 3
api.update(user=u, lang=lang)
# timezone
def test_unit__update_timezone__ok__nominal_case(self):
api = UserApi(current_user=None, session=self.session, config=self.app_config)
u = api.create_minimal_user("bob@bob")
assert u.timezone is None
u = api.update(user=u, timezone="Europe/Paris")
assert u.timezone == "Europe/Paris"
def test_unit__update_timezone__too_long_timezone(self):
api = UserApi(current_user=None, session=self.session, config=self.app_config)
u = api.create_minimal_user("bob@bob")
with pytest.raises(TracimValidationFailed):
timezone = "t" * 33
u = api.update(user=u, timezone=timezone)
timezone = "t" * 32
api.update(user=u, timezone=timezone)
@pytest.mark.ldap
def test_unit__create_minimal_user_and_update__err__set_unaivalable_auth_type(self):
api = UserApi(current_user=None, session=self.session, config=self.app_config)
u = api.create_minimal_user("bob@bob")
with pytest.raises(UserAuthTypeDisabled):
api.update(u, name="bob", email="bob@bob", auth_type=AuthType.LDAP, do_save=True)
@pytest.mark.internal_auth
def test_unit__create_minimal_user_and_set_password__ok__nominal_case(self):
u = User()
u.email = "bob@bob"
u.password = "pass"
u.auth_type = AuthType.INTERNAL
u.display_name = "bob"
api = UserApi(current_user=u, session=self.session, config=self.app_config)
assert u.validate_password("pass")
api.set_password(u, "pass", "newpass", "newpass")
assert u is not None
assert u.email == "bob@bob"
assert u.display_name == "bob"
assert u.validate_password("newpass")
assert not u.validate_password("pass")
@pytest.mark.internal_auth
def test_unit__create_minimal_user_and_set_email__ok__nominal_case(self):
u = User()
u.email = "bob@bob"
u.password = "pass"
u.auth_type = AuthType.INTERNAL
u.display_name = "bob"
api = UserApi(current_user=u, session=self.session, config=self.app_config)
assert u.email == "bob@bob"
api.set_email(u, "pass", "newbobemail@bob")
assert u is not None
assert u.email == "newbobemail@bob"
@pytest.mark.internal_auth
def test__unit__create__user__ok_nominal_case(self):
api = UserApi(current_user=None, session=self.session, config=self.app_config)
u = api.create_user(
email="bob@bob",
password="password",
name="bob",
timezone="+2",
lang="en",
do_save=True,
do_notify=False,
)
assert u is not None
assert u.email == "bob@bob"
assert u.validate_password("password")
assert u.display_name == "bob"
assert u.timezone == "+2"
assert u.lang == "en"
def test_unit__user_with_email_exists__ok__nominal_case(self):
api = UserApi(current_user=None, session=self.session, config=self.app_config)
u = api.create_minimal_user("bibi@bibi")
api.update(u, "bibi", "bibi@bibi", "password", do_save=True)
transaction.commit()
eq_(True, api.user_with_email_exists("bibi@bibi"))
eq_(False, api.user_with_email_exists("unknown"))
def test_get_one_by_email(self):
api = UserApi(current_user=None, session=self.session, config=self.app_config)
u = api.create_minimal_user("bibi@bibi")
self.session.flush()
api.update(u, "bibi", "bibi@bibi", "password", do_save=True)
uid = u.user_id
transaction.commit()
eq_(uid, api.get_one_by_email("bibi@bibi").user_id)
def test_unit__get_one_by_email__err__user_does_not_exist(self):
api = UserApi(current_user=None, session=self.session, config=self.app_config)
with pytest.raises(UserDoesNotExist):
api.get_one_by_email("unknown")
def test_unit__get_all__ok__nominal_case(self):
api = UserApi(current_user=None, session=self.session, config=self.app_config)
api.create_minimal_user("bibi@bibi")
users = api.get_all()
# u1 + Admin user from BaseFixture
assert 2 == len(users)
def test_unit__get_known__user__admin__too_short_acp_str(self):
api = UserApi(current_user=None, session=self.session, config=self.app_config)
api.create_user(email="email@email", name="name", do_notify=False, do_save=True)
with pytest.raises(TooShortAutocompleteString):
api.get_known_user("e")
def test_unit__get_known__user__admin__by_email(self):
api = UserApi(current_user=None, session=self.session, config=self.app_config)
u1 = api.create_user(email="email@email", name="name", do_notify=False, do_save=True)
users = api.get_known_user("email")
assert len(users) == 1
assert users[0] == u1
def test_unit__get_known__user__user__no_workspace_empty_known_user(self):
admin = self.session.query(User).filter(User.email == "admin@admin.admin").one()
api = UserApi(current_user=admin, session=self.session, config=self.app_config)
u1 = api.create_user(email="email@email", name="name", do_notify=False, do_save=True)
api2 = UserApi(current_user=u1, session=self.session, config=self.app_config)
users = api2.get_known_user("email")
assert len(users) == 0
def test_unit__get_known__user__same_workspaces_users_by_name(self):
admin = self.session.query(User).filter(User.email == "admin@admin.admin").one()
api = UserApi(current_user=None, session=self.session, config=self.app_config)
u1 = api.create_user(email="email@email", name="name", do_notify=False, do_save=True)
u2 = api.create_user(email="email2@email2", name="name2", do_notify=False, do_save=True)
u3 = api.create_user(
email="notfound@notfound", name="notfound", do_notify=False, do_save=True
)
wapi = WorkspaceApi(current_user=admin, session=self.session, config=self.app_config)
workspace = wapi.create_workspace("test workspace n°1", save_now=True)
role_api = RoleApi(current_user=admin, session=self.session, config=self.app_config)
role_api.create_one(u1, workspace, UserRoleInWorkspace.READER, False)
role_api.create_one(u2, workspace, UserRoleInWorkspace.READER, False)
role_api.create_one(u3, workspace, UserRoleInWorkspace.READER, False)
api2 = UserApi(current_user=u1, session=self.session, config=self.app_config)
users = api2.get_known_user("name")
assert len(users) == 2
assert users[0] == u1
assert users[1] == u2
def test_unit__get_known__user__distinct_workspaces_users_by_name__exclude_workspace(self):
admin = self.session.query(User).filter(User.email == "admin@admin.admin").one()
api = UserApi(current_user=None, session=self.session, config=self.app_config)
u1 = api.create_user(email="email@email", name="name", do_notify=False, do_save=True)
u2 = api.create_user(email="email2@email2", name="name2", do_notify=False, do_save=True)
u3 = api.create_user(
email="notfound@notfound", name="notfound", do_notify=False, do_save=True
)
wapi = WorkspaceApi(current_user=admin, session=self.session, config=self.app_config)
workspace = wapi.create_workspace("test workspace n°1", save_now=True)
wapi = WorkspaceApi(current_user=admin, session=self.session, config=self.app_config)
workspace_2 = wapi.create_workspace("test workspace n°2", save_now=True)
role_api = RoleApi(current_user=admin, session=self.session, config=self.app_config)
role_api.create_one(u1, workspace, UserRoleInWorkspace.READER, False)
role_api.create_one(u2, workspace_2, UserRoleInWorkspace.READER, False)
role_api.create_one(u3, workspace, UserRoleInWorkspace.READER, False)
role_api.create_one(u3, workspace_2, UserRoleInWorkspace.READER, False)
api2 = UserApi(current_user=u3, session=self.session, config=self.app_config)
users = api2.get_known_user("name", exclude_workspace_ids=[workspace.workspace_id])
assert len(users) == 1
assert users[0] == u2
def test_unit__get_known__user__distinct_workspaces_users_by_name__exclude_workspace_and_name(
self
):
admin = self.session.query(User).filter(User.email == "admin@admin.admin").one()
api = UserApi(current_user=None, session=self.session, config=self.app_config)
u1 = api.create_user(email="email@email", name="name", do_notify=False, do_save=True)
u2 = api.create_user(email="email2@email2", name="name2", do_notify=False, do_save=True)
u3 = api.create_user(
email="notfound@notfound", name="notfound", do_notify=False, do_save=True
)
u4 = api.create_user(email="email3@email3", name="name3", do_notify=False, do_save=True)
wapi = WorkspaceApi(current_user=admin, session=self.session, config=self.app_config)
workspace = wapi.create_workspace("test workspace n°1", save_now=True)
wapi = WorkspaceApi(current_user=admin, session=self.session, config=self.app_config)
workspace_2 = wapi.create_workspace("test workspace n°2", save_now=True)
role_api = RoleApi(current_user=admin, session=self.session, config=self.app_config)
role_api.create_one(u1, workspace, UserRoleInWorkspace.READER, False)
role_api.create_one(u2, workspace_2, UserRoleInWorkspace.READER, False)
role_api.create_one(u4, workspace_2, UserRoleInWorkspace.READER, False)
role_api.create_one(u3, workspace, UserRoleInWorkspace.READER, False)
role_api.create_one(u3, workspace_2, UserRoleInWorkspace.READER, False)
api2 = UserApi(current_user=u3, session=self.session, config=self.app_config)
users = api2.get_known_user(
"name", exclude_workspace_ids=[workspace.workspace_id], exclude_user_ids=[u4.user_id]
)
assert len(users) == 1
assert users[0] == u2
def test_unit__get_known__user__distinct_workspaces_users_by_name(self):
admin = self.session.query(User).filter(User.email == "admin@admin.admin").one()
api = UserApi(current_user=None, session=self.session, config=self.app_config)
u1 = api.create_user(email="email@email", name="name", do_notify=False, do_save=True)
u2 = api.create_user(email="email2@email2", name="name2", do_notify=False, do_save=True)
u3 = api.create_user(
email="notfound@notfound", name="notfound", do_notify=False, do_save=True
)
wapi = WorkspaceApi(current_user=admin, session=self.session, config=self.app_config)
workspace = wapi.create_workspace("test workspace n°1", save_now=True)
wapi = WorkspaceApi(current_user=admin, session=self.session, config=self.app_config)
workspace_2 = wapi.create_workspace("test workspace n°2", save_now=True)
role_api = RoleApi(current_user=admin, session=self.session, config=self.app_config)
role_api.create_one(u1, workspace, UserRoleInWorkspace.READER, False)
role_api.create_one(u2, workspace_2, UserRoleInWorkspace.READER, False)
role_api.create_one(u3, workspace, UserRoleInWorkspace.READER, False)
role_api.create_one(u3, workspace_2, UserRoleInWorkspace.READER, False)
api2 = UserApi(current_user=u3, session=self.session, config=self.app_config)
users = api2.get_known_user("name")
assert len(users) == 2
assert users[0] == u1
assert users[1] == u2
def test_unit__get_known__user__same_workspaces_users_by_name__exclude_user(self):
admin = self.session.query(User).filter(User.email == "admin@admin.admin").one()
api = UserApi(current_user=None, session=self.session, config=self.app_config)
u1 = api.create_user(email="email@email", name="name", do_notify=False, do_save=True)
u2 = api.create_user(email="email2@email2", name="name2", do_notify=False, do_save=True)
u3 = api.create_user(
email="notfound@notfound", name="notfound", do_notify=False, do_save=True
)
wapi = WorkspaceApi(current_user=admin, session=self.session, config=self.app_config)
workspace = wapi.create_workspace("test workspace n°1", save_now=True)
role_api = RoleApi(current_user=admin, session=self.session, config=self.app_config)
role_api.create_one(u1, workspace, UserRoleInWorkspace.READER, False)
role_api.create_one(u2, workspace, UserRoleInWorkspace.READER, False)
role_api.create_one(u3, workspace, UserRoleInWorkspace.READER, False)
api2 = UserApi(current_user=u1, session=self.session, config=self.app_config)
users = api2.get_known_user("name", exclude_user_ids=[u1.user_id])
assert len(users) == 1
assert users[0] == u2
def test_unit__get_known__user__same_workspaces_users_by_email(self):
admin = self.session.query(User).filter(User.email == "admin@admin.admin").one()
api = UserApi(current_user=None, session=self.session, config=self.app_config)
u1 = api.create_user(email="email@email", name="name", do_notify=False, do_save=True)
u2 = api.create_user(email="email2@email2", name="name2", do_notify=False, do_save=True)
u3 = api.create_user(
email="notfound@notfound", name="notfound", do_notify=False, do_save=True
)
wapi = WorkspaceApi(current_user=admin, session=self.session, config=self.app_config)
workspace = wapi.create_workspace("test workspace n°1", save_now=True)
role_api = RoleApi(current_user=admin, session=self.session, config=self.app_config)
role_api.create_one(u1, workspace, UserRoleInWorkspace.READER, False)
role_api.create_one(u2, workspace, UserRoleInWorkspace.READER, False)
role_api.create_one(u3, workspace, UserRoleInWorkspace.READER, False)
api2 = UserApi(current_user=u1, session=self.session, config=self.app_config)
users = api2.get_known_user("email")
assert len(users) == 2
assert users[0] == u1
assert users[1] == u2
def test_unit__get_known__user__admin__by_name(self):
api = UserApi(current_user=None, session=self.session, config=self.app_config)
u1 = api.create_user(email="email@email", name="name", do_notify=False, do_save=True)
users = api.get_known_user("nam")
assert len(users) == 1
assert users[0] == u1
def test_unit__get_one__ok__nominal_case(self):
api = UserApi(current_user=None, session=self.session, config=self.app_config)
u = api.create_minimal_user("titi@titi")
api.update(u, "titi", "titi@titi", "password", do_save=True)
one = api.get_one(u.user_id)
eq_(u.user_id, one.user_id)
def test_unit__get_user_with_context__nominal_case(self):
user = User(email="admin@tracim.tracim", display_name="Admin", is_active=True)
api = UserApi(current_user=None, session=self.session, config=self.app_config)
new_user = api.get_user_with_context(user)
assert isinstance(new_user, UserInContext)
assert new_user.user == user
assert new_user.profile == "nobody"
assert new_user.user_id == user.user_id
assert new_user.email == "admin@tracim.tracim"
assert new_user.display_name == "Admin"
assert new_user.is_active is True
# TODO - G.M - 03-05-2018 - [avatar][agenda] Should test this
# with true value when those param will be available.
assert new_user.avatar_url is None
def test_unit__get_current_user_ok__nominal_case(self):
user = User(email="admin@tracim.tracim")
api = UserApi(current_user=user, session=self.session, config=self.app_config)
new_user = api.get_current_user()
assert isinstance(new_user, User)
assert user == new_user
def test_unit__get_current_user__err__user_not_exist(self):
api = UserApi(current_user=None, session=self.session, config=self.app_config)
with pytest.raises(UserDoesNotExist):
api.get_current_user()
@pytest.mark.internal_auth
def test_unit__authenticate_user___ok__nominal_case(self):
api = UserApi(current_user=None, session=self.session, config=self.app_config)
user = api.authenticate("admin@admin.admin", "admin@admin.admin")
assert isinstance(user, User)
assert user.email == "admin@admin.admin"
assert user.auth_type == AuthType.INTERNAL
@pytest.mark.internal_auth
def test_unit__authenticate_user___err__user_not_active(self):
api = UserApi(current_user=None, session=self.session, config=self.app_config)
gapi = GroupApi(current_user=None, session=self.session, config=self.app_config)
groups = [gapi.get_one_with_name("users")]
user = api.create_user(
email="test@test.test",
password="password",
name="bob",
groups=groups,
timezone="Europe/Paris",
do_save=True,
do_notify=False,
)
api.disable(user)
with pytest.raises(AuthenticationFailed):
api.authenticate("test@test.test", "test@test.test")
@pytest.mark.internal_auth
def test_unit__authenticate_user___err__wrong_password(self):
api = UserApi(current_user=None, session=self.session, config=self.app_config)
with pytest.raises(AuthenticationFailed):
api.authenticate("admin@admin.admin", "wrong_password")
@pytest.mark.internal_auth
def test_unit__authenticate_user___err__wrong_user(self):
api = UserApi(current_user=None, session=self.session, config=self.app_config)
with pytest.raises(AuthenticationFailed):
api.authenticate("admin@admin.admin", "wrong_password")
def test_unit__disable_user___ok__nominal_case(self):
api = UserApi(current_user=None, session=self.session, config=self.app_config)
gapi = GroupApi(current_user=None, session=self.session, config=self.app_config)
groups = [gapi.get_one_with_name("users")]
user = api.create_user(
email="test@test.test",
password="password",
name="bob",
groups=groups,
timezone="Europe/Paris",
do_save=True,
do_notify=False,
)
user2 = api.create_user(
email="test2@test.test",
password="password",
name="bob2",
groups=groups,
timezone="Europe/Paris",
do_save=True,
do_notify=False,
)
api2 = UserApi(current_user=user, session=self.session, config=self.app_config)
api2.disable(user2)
updated_user2 = api.get_one(user2.user_id)
assert updated_user2.is_active is False
assert updated_user2.user_id == user2.user_id
assert updated_user2.email == user2.email
def test_unit__disable_user___err__user_cant_disable_itself(self):
api = UserApi(current_user=None, session=self.session, config=self.app_config)
gapi = GroupApi(current_user=None, session=self.session, config=self.app_config)
groups = [gapi.get_one_with_name("users")]
user = api.create_user(
email="test@test.test",
password="password",
name="bob",
groups=groups,
timezone="Europe/Paris",
do_save=True,
do_notify=False,
)
api2 = UserApi(current_user=user, session=self.session, config=self.app_config)
from tracim_backend.exceptions import UserCantDisableHimself
with pytest.raises(UserCantDisableHimself):
api2.disable(user)
class TestFakeLDAPUserApi(DefaultTest):
config_section = "base_test_ldap"
@pytest.mark.ldap
def test_unit__authenticate_user___err__no_ldap_connector(self):
api = UserApi(current_user=None, session=self.session, config=self.app_config)
with pytest.raises(MissingLDAPConnector):
api.authenticate("hubert@planetexpress.com", "professor")
@pytest.mark.xfail(reason="create account with specific profile ldap feature disabled")
@pytest.mark.ldap
def test_unit__authenticate_user___ok__new_user_ldap_auth_custom_profile(self):
# TODO - G.M - 2018-12-05 - [ldap_profile]
# support for profile attribute disabled
# Should be reenabled later probably with a better code
class fake_ldap_connector(object):
def authenticate(self, email: str, password: str):
if not email == "hubert@planetexpress.com" and password == "professor":
return None
return [
None,
{
"mail": ["huber@planetepress.com"],
"givenName": ["Hubert"],
"profile": ["trusted-users"],
},
]
api = UserApi(current_user=None, session=self.session, config=self.app_config)
user = api.authenticate("hubert@planetexpress.com", "professor", fake_ldap_connector())
assert isinstance(user, User)
assert user.email == "hubert@planetexpress.com"
assert user.auth_type == AuthType.LDAP
assert user.display_name == "Hubert"
assert user.profile.name == "trusted-users"
@pytest.mark.ldap
def test_unit__authenticate_user___ok__new_user_ldap_auth(self):
class fake_ldap_connector(object):
def authenticate(self, email: str, password: str):
if not email == "hubert@planetexpress.com" and password == "professor":
return None
return [None, {"mail": ["huber@planetepress.com"], "givenName": ["Hubert"]}]
api = UserApi(current_user=None, session=self.session, config=self.app_config)
user = api.authenticate("hubert@planetexpress.com", "professor", fake_ldap_connector())
assert isinstance(user, User)
assert user.email == "hubert@planetexpress.com"
assert user.auth_type == AuthType.LDAP
assert user.display_name == "Hubert"
assert user.profile.name == "users"
@pytest.mark.ldap
def test__unit__create_user__err__external_auth_ldap_with_password(self):
api = UserApi(current_user=None, session=self.session, config=self.app_config)
with pytest.raises(ExternalAuthUserPasswordModificationDisallowed):
api.create_user(
email="bob@bob",
password="password",
name="bob",
auth_type=AuthType.LDAP,
timezone="+2",
lang="en",
do_save=True,
do_notify=False,
)
@pytest.mark.ldap
def test__unit__create__user__ok__external_auth_ldap(self):
api = UserApi(current_user=None, session=self.session, config=self.app_config)
u = api.create_user(
email="bob@bob",
password=None,
name="bob",
auth_type=AuthType.LDAP,
timezone="+2",
lang="en",
do_save=True,
do_notify=False,
)
assert u is not None
assert u.email == "bob@bob"
assert u.validate_password(None) is False
assert u.display_name == "bob"
assert u.timezone == "+2"
assert u.lang == "en"
@pytest.mark.ldap
def test_unit_update__ok_external_auth_ldap(self):
api = UserApi(current_user=None, session=self.session, config=self.app_config)
u = api.create_user(
email="bob@bob",
password=None,
name="bob",
auth_type=AuthType.LDAP,
timezone="+2",
lang="en",
do_save=True,
do_notify=False,
)
api.update(
email="bob@bob",
user=u,
name="bobi",
password=None,
auth_type=AuthType.LDAP,
timezone="-1",
lang="fr",
do_save=True,
)
assert u.display_name == "bobi"
@pytest.mark.ldap
def test_unit_update__err__external_auth_ldap_set_password(self):
api = UserApi(current_user=None, session=self.session, config=self.app_config)
u = api.create_user(
email="bob@bob",
password=None,
name="bob",
auth_type=AuthType.LDAP,
timezone="+2",
lang="en",
do_save=True,
do_notify=False,
)
with pytest.raises(ExternalAuthUserPasswordModificationDisallowed):
api.update(
email="bob@bob",
user=u,
name="bobi",
password="new_password",
auth_type=AuthType.LDAP,
timezone="-1",
lang="fr",
do_save=True,
)
@pytest.mark.ldap
def test_unit_update__err__external_auth_ldap_set_email(self):
api = UserApi(current_user=None, session=self.session, config=self.app_config)
u = api.create_user(
email="bob@bob",
password=None,
name="bob",
auth_type=AuthType.LDAP,
timezone="+2",
lang="en",
do_save=True,
do_notify=False,
)
with pytest.raises(ExternalAuthUserEmailModificationDisallowed):
api.update(
email="bob@bob1",
user=u,
name="bobi",
password=None,
auth_type=AuthType.LDAP,
timezone="-1",
lang="fr",
do_save=True,
)
@pytest.mark.ldap
def test_unit__check_email_modification_allowed__err_external_auth_ldap(self):
api = UserApi(current_user=None, session=self.session, config=self.app_config)
u = api.create_user(
email="bob@bob",
password=None,
name="bob",
auth_type=AuthType.LDAP,
timezone="+2",
lang="en",
do_save=True,
do_notify=False,
)
with pytest.raises(ExternalAuthUserEmailModificationDisallowed):
api._check_email_modification_allowed(u)
@pytest.mark.ldap
def test_unit__check_password_modification_allowed__err_external_auth_ldap(self):
api = UserApi(current_user=None, session=self.session, config=self.app_config)
u = api.create_user(
email="bob@bob",
password=None,
name="bob",
auth_type=AuthType.LDAP,
timezone="+2",
lang="en",
do_save=True,
do_notify=False,
)
with pytest.raises(ExternalAuthUserPasswordModificationDisallowed):
api._check_password_modification_allowed(u)
@pytest.mark.ldap
def test_unit_set_password__err__external_auth_ldap(self):
api = UserApi(current_user=None, session=self.session, config=self.app_config)
u = api.create_user(
email="bob@bob",
password=None,
name="bob",
auth_type=AuthType.LDAP,
timezone="+2",
lang="en",
do_save=True,
do_notify=False,
)
api._user = u
with pytest.raises(ExternalAuthUserPasswordModificationDisallowed):
api.set_password(u, "pass", "pass", "pass")
@pytest.mark.ldap
def test_unit_set_email__err__external_auth_ldap(self):
api = UserApi(current_user=None, session=self.session, config=self.app_config)
u = api.create_user(
email="bob@bob",
password=None,
name="bob",
auth_type=AuthType.LDAP,
timezone="+2",
lang="en",
do_save=True,
do_notify=False,
)
api._user = u
with pytest.raises(ExternalAuthUserEmailModificationDisallowed):
api.set_email(u, "pass", "bob@bobi")
| 44.878863
| 98
| 0.655521
| 4,620
| 36,307
| 4.878139
| 0.053896
| 0.04588
| 0.068687
| 0.091583
| 0.874429
| 0.817456
| 0.785065
| 0.768248
| 0.743355
| 0.723433
| 0
| 0.008783
| 0.234803
| 36,307
| 808
| 99
| 44.934406
| 0.802102
| 0.012835
| 0
| 0.639087
| 0
| 0
| 0.068303
| 0.00592
| 0
| 0
| 0
| 0.001238
| 0.134094
| 1
| 0.087019
| false
| 0.097004
| 0.031384
| 0
| 0.131241
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
21d508b2bee707fa74b5f2ec1aec3e9f1be6021d
| 8,490
|
py
|
Python
|
qa327_test/backend/test_sell_ticket.py
|
EricFillion/CMPE-327
|
5e9f7c0b083643f7b6b9702775f69f67863b395e
|
[
"MIT"
] | null | null | null |
qa327_test/backend/test_sell_ticket.py
|
EricFillion/CMPE-327
|
5e9f7c0b083643f7b6b9702775f69f67863b395e
|
[
"MIT"
] | null | null | null |
qa327_test/backend/test_sell_ticket.py
|
EricFillion/CMPE-327
|
5e9f7c0b083643f7b6b9702775f69f67863b395e
|
[
"MIT"
] | null | null | null |
"""
Whitebox tests for the `sell_ticket` backend function.
"""
from datetime import date
from seleniumbase import BaseCase
from qa327.backend import sell_ticket
from qa327.models import db, User
from qa327_test.common import TEST_USER
class BackEndSellTicketTest(BaseCase):
"""
Testing backend function `sell_ticket` using data interface coverage.
"""
def test_sell_ticket_valid_no_fraction(self):
"""
All inputs valid, price with no fractional part | user=<user in DB> name="Unique" quantity=1 price=10.00 expiryDate=date(2030, 1, 1) | No error
"""
# Prepare DB
new_user = User()
new_user.name = TEST_USER.name
new_user.email = TEST_USER.email
new_user.password = TEST_USER.password
new_user.balance = TEST_USER.balance
db.session.add(new_user)
db.session.commit()
# Set up parameters
user = new_user
name = "Unique"
quantity = 1
price = 10.00
expiryDate = date(2030, 1, 1)
# Call function
ret_value = sell_ticket(user, name, quantity, price, expiryDate)
# Check return value
assert ret_value == False
def test_sell_ticket_valid_with_fraction(self):
"""
All inputs valid, price with fractional part | user=<user in DB> name="Unique" quantity=1 price=12.34 expiryDate=date(2030, 1, 1) | No error
"""
# Prepare DB
new_user = User()
new_user.name = TEST_USER.name
new_user.email = TEST_USER.email
new_user.password = TEST_USER.password
new_user.balance = TEST_USER.balance
db.session.add(new_user)
db.session.commit()
# Set up parameters
user = new_user
name = "Unique"
quantity = 1
price = 12.34
expiryDate = date(2030, 1, 1)
# Call function
ret_value = sell_ticket(user, name, quantity, price, expiryDate)
# Check return value
assert ret_value == False
def test_sell_ticket_user_not_in_db(self):
"""
User object that doesn't exist in database | user=<user not in DB> name="Unique" quantity=1 price=10.00 expiryDate=date(2030, 1, 1) | Internal Error: user does not exist in database
"""
# Prepare DB
new_user = User()
new_user.name = TEST_USER.name
new_user.email = TEST_USER.email
new_user.password = TEST_USER.password
new_user.balance = TEST_USER.balance
# Skip adding new_user to DB
# Set up parameters
user = new_user
name = "Unique"
quantity = 1
price = 10.00
expiryDate = date(2030, 1, 1)
# Call function
ret_value = sell_ticket(user, name, quantity, price, expiryDate)
# Check return value
assert ret_value == "Internal Error: user does not exist in database"
def test_sell_ticket_user_bad_type(self):
"""
Non-User type user parameter | user=None name="Unique" quantity=1 price=10.00 expiryDate=date(2030, 1, 1) | Internal Error: 'user' must be of type 'User'
"""
# Prepare DB
new_user = User()
new_user.name = TEST_USER.name
new_user.email = TEST_USER.email
new_user.password = TEST_USER.password
new_user.balance = TEST_USER.balance
db.session.add(new_user)
db.session.commit()
# Set up parameters
user = None
name = "Unique"
quantity = 1
price = 10.00
expiryDate = date(2030, 1, 1)
# Call function
ret_value = sell_ticket(user, name, quantity, price, expiryDate)
# Check return value
assert ret_value == "Internal Error: 'user' must be of type 'User'"
def test_sell_ticket_duplicate_name(self):
"""
Duplicate name | user=<user in DB> name="Not Unique" quantity=1 price=10.00 expiryDate=date(2030, 1, 1) | Error: "A ticket with that name already exists."
"""
# The most straightforward way to have a ticket with a duplicate name
# is to just insert the same ticket into the DB twice.
# Prepare DB
new_user = User()
new_user.name = TEST_USER.name
new_user.email = TEST_USER.email
new_user.password = TEST_USER.password
new_user.balance = TEST_USER.balance
db.session.add(new_user)
db.session.commit()
# Set up parameters
user = new_user
name = "Not Unique"
quantity = 1
price = 10.00
expiryDate = date(2030, 1, 1)
# Call function
ret_value = sell_ticket(user, name, quantity, price, expiryDate)
# Check return value
assert ret_value == False
# Call function
ret_value = sell_ticket(user, name, quantity, price, expiryDate)
# Check return value
assert ret_value == "A ticket with that name already exists."
def test_sell_ticket_name_bad_type(self):
"""
Non-str type name parameter | user=<user in DB> name=None quantity=1 price=10.00 expiryDate=date(2030, 1, 1) | Internal Error: 'name' must be of type 'str'
"""
# Prepare DB
new_user = User()
new_user.name = TEST_USER.name
new_user.email = TEST_USER.email
new_user.password = TEST_USER.password
new_user.balance = TEST_USER.balance
db.session.add(new_user)
db.session.commit()
# Set up parameters
user = new_user
name = None
quantity = 1
price = 10.00
expiryDate = date(2030, 1, 1)
# Call function
ret_value = sell_ticket(user, name, quantity, price, expiryDate)
# Check return value
assert ret_value == "Internal Error: 'name' must be of type 'str'"
def test_sell_ticket_quantity_bad_type(self):
"""
Non-int type quantity parameter | user=<user in DB> name="Unique" quantity=None price=10.00 expiryDate=date(2030, 1, 1) | Internal Error: 'quantity' must be of type 'int'
"""
# Prepare DB
new_user = User()
new_user.name = TEST_USER.name
new_user.email = TEST_USER.email
new_user.password = TEST_USER.password
new_user.balance = TEST_USER.balance
db.session.add(new_user)
db.session.commit()
# Set up parameters
user = new_user
name = "Unique"
quantity = None
price = 10.00
expiryDate = date(2030, 1, 1)
# Call function
ret_value = sell_ticket(user, name, quantity, price, expiryDate)
# Check return value
assert ret_value == "Internal Error: 'quantity' must be of type 'int'"
def test_sell_ticket_price_bad_type(self):
"""
Non-float type price parameter | user=<user in DB> name="Unique" quantity=1 price=None expiryDate=date(2030, 1, 1) | Internal Error: 'price' must be of type 'float'
"""
# Prepare DB
new_user = User()
new_user.name = TEST_USER.name
new_user.email = TEST_USER.email
new_user.password = TEST_USER.password
new_user.balance = TEST_USER.balance
db.session.add(new_user)
db.session.commit()
# Set up parameters
user = new_user
name = "Unique"
quantity = 1
price = None
expiryDate = date(2030, 1, 1)
# Call function
ret_value = sell_ticket(user, name, quantity, price, expiryDate)
# Check return value
assert ret_value == "Internal Error: 'price' must be of type 'float'"
def test_sell_ticket_expiryDate_bad_type(self):
"""
Non-date type expiryDate parameter | user=<user in DB> name="Unique" quantity=1 price=10.00 expiryDate=None | Internal Error: 'expiryDate' must be of type 'date'
"""
# Prepare DB
new_user = User()
new_user.name = TEST_USER.name
new_user.email = TEST_USER.email
new_user.password = TEST_USER.password
new_user.balance = TEST_USER.balance
db.session.add(new_user)
db.session.commit()
# Set up parameters
user = new_user
name = "Unique"
quantity = 1
price = 10.00
expiryDate = None
# Call function
ret_value = sell_ticket(user, name, quantity, price, expiryDate)
# Check return value
assert ret_value == "Internal Error: 'expiryDate' must be of type 'date'"
| 37.400881
| 192
| 0.614252
| 1,117
| 8,490
| 4.5094
| 0.094897
| 0.086162
| 0.037125
| 0.050625
| 0.848918
| 0.838594
| 0.835815
| 0.804646
| 0.74866
| 0.742505
| 0
| 0.030962
| 0.296231
| 8,490
| 226
| 193
| 37.566372
| 0.81205
| 0.273027
| 0
| 0.794326
| 0
| 0
| 0.063199
| 0
| 0
| 0
| 0
| 0
| 0.070922
| 1
| 0.06383
| false
| 0.06383
| 0.035461
| 0
| 0.106383
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
df3492712d1d7f4890746597c35bc752ec1b7cf3
| 132
|
py
|
Python
|
test/test_common.py
|
napulen/AugmentedNet
|
16aaeeccf15508478ac5987f9cf5d148ea44876e
|
[
"MIT"
] | 14
|
2021-09-03T05:15:09.000Z
|
2022-03-30T07:46:29.000Z
|
test/test_common.py
|
napulen/AugmentedNet
|
16aaeeccf15508478ac5987f9cf5d148ea44876e
|
[
"MIT"
] | 27
|
2021-11-10T15:29:47.000Z
|
2022-03-23T02:09:17.000Z
|
test/test_common.py
|
napulen/AugmentedNet
|
16aaeeccf15508478ac5987f9cf5d148ea44876e
|
[
"MIT"
] | null | null | null |
"""Tests for AugmentedNet.common."""
import unittest
import AugmentedNet.common
class TestEvaluate(unittest.TestCase):
pass
| 13.2
| 38
| 0.765152
| 14
| 132
| 7.214286
| 0.714286
| 0.356436
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136364
| 132
| 9
| 39
| 14.666667
| 0.885965
| 0.227273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.25
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
df4e1e86b5434702d1effc0b25ee616b59a2feac
| 7,343
|
py
|
Python
|
tests/test_success_range_normalize.py
|
Bernardo-MG/wargame_analysis_jupyter_notebook
|
db13838ce0f8c6dcbc160259c1ee0ae258b51ba7
|
[
"MIT"
] | null | null | null |
tests/test_success_range_normalize.py
|
Bernardo-MG/wargame_analysis_jupyter_notebook
|
db13838ce0f8c6dcbc160259c1ee0ae258b51ba7
|
[
"MIT"
] | null | null | null |
tests/test_success_range_normalize.py
|
Bernardo-MG/wargame_analysis_jupyter_notebook
|
db13838ce0f8c6dcbc160259c1ee0ae258b51ba7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import unittest
from decimal import Decimal
from scripts.probability import roll_success_range
"""
Max shots script tests.
"""
__author__ = 'Bernardo Martínez Garrido'
__license__ = 'MIT'
class Test1d6StartZero(unittest.TestCase):
"""
Tests the success range with the range [0,5], which is the range of a six sides die.
"""
def test_below_min_above_not_equal(self):
chance = roll_success_range(0, 5, -1, above=True, equal=False, normalize=True)
self.assertEqual({"min": 0, "max": 5}, chance)
def test_goal_2_above_not_equal(self):
chance = roll_success_range(0, 5, 2, above=True, equal=False, normalize=True)
self.assertEqual({"min": 0, "max": 2}, chance)
def test_goal_2_above_equal(self):
chance = roll_success_range(0, 5, 2, above=True, equal=True, normalize=True)
self.assertEqual({"min": 0, "max": 3}, chance)
def test_goal_2_below_not_equal(self):
chance = roll_success_range(0, 5, 2, above=False, equal=False, normalize=True)
self.assertEqual({"min": 0, "max": 1}, chance)
def test_goal_2_below_equal(self):
chance = roll_success_range(0, 5, 2, above=False, equal=True, normalize=True)
self.assertEqual({"min": 0, "max": 2}, chance)
class Test1d6AboveNotEqual(unittest.TestCase):
"""
Tests the chance to go above with the range [1,6], which is the range of a six sides die.
"""
def test_no_goal(self):
chance = roll_success_range(1, 6, 0, above=True, equal=False, normalize=True)
self.assertEqual({"min": 1, "max": 6}, chance)
def test_goal_above_max(self):
chance = roll_success_range(1, 6, 10, above=True, equal=False, normalize=True)
self.assertEqual(None, chance)
def test_goal_1(self):
chance = roll_success_range(1, 6, 1, above=True, equal=False, normalize=True)
self.assertEqual({"min": 1, "max": 5}, chance)
def test_goal_2(self):
chance = roll_success_range(1, 6, 2, above=True, equal=False, normalize=True)
self.assertEqual({"min": 1, "max": 4}, chance)
def test_goal_3(self):
chance = roll_success_range(1, 6, 3, above=True, equal=False, normalize=True)
self.assertEqual({"min": 1, "max": 3}, chance)
def test_goal_4(self):
chance = roll_success_range(1, 6, 4, above=True, equal=False, normalize=True)
self.assertEqual({"min": 1, "max": 2}, chance)
def test_goal_5(self):
chance = roll_success_range(1, 6, 5, above=True, equal=False, normalize=True)
self.assertEqual({"min": 1, "max": 1}, chance)
def test_goal_6(self):
chance = roll_success_range(1, 6, 6, above=True, equal=False, normalize=True)
self.assertEqual(None, chance)
class Test1d6AboveEqual(unittest.TestCase):
"""
Tests the chance to go above with the range [1,6], which is the range of a six sides die.
"""
def test_no_goal(self):
chance = roll_success_range(1, 6, 0, above=True, equal=True, normalize=True)
self.assertEqual({"min": 1, "max": 6}, chance)
def test_goal_above_max(self):
chance = roll_success_range(1, 6, 10, above=True, equal=True, normalize=True)
self.assertEqual(None, chance)
def test_goal_1(self):
chance = roll_success_range(1, 6, 1, above=True, equal=True, normalize=True)
self.assertEqual({"min": 1, "max": 6}, chance)
def test_goal_2(self):
chance = roll_success_range(1, 6, 2, above=True, equal=True, normalize=True)
self.assertEqual({"min": 1, "max": 5}, chance)
def test_goal_3(self):
chance = roll_success_range(1, 6, 3, above=True, equal=True, normalize=True)
self.assertEqual({"min": 1, "max": 4}, chance)
def test_goal_4(self):
chance = roll_success_range(1, 6, 4, above=True, equal=True, normalize=True)
self.assertEqual({"min": 1, "max": 3}, chance)
def test_goal_5(self):
chance = roll_success_range(1, 6, 5, above=True, equal=True, normalize=True)
self.assertEqual({"min": 1, "max": 2}, chance)
def test_goal_6(self):
chance = roll_success_range(1, 6, 6, above=True, equal=True, normalize=True)
self.assertEqual({"min": 1, "max": 1}, chance)
class Test1d6BelowNotEqual(unittest.TestCase):
"""
Tests the chance to go above with the range [1,6], which is the range of a six sides die.
"""
def test_no_goal(self):
chance = roll_success_range(1, 6, 0, above=False, equal=False, normalize=True)
self.assertEqual(None, chance)
def test_goal_above_max(self):
chance = roll_success_range(1, 6, 10, above=False, equal=False, normalize=True)
self.assertEqual({"min": 1, "max": 6}, chance)
def test_goal_1(self):
chance = roll_success_range(1, 6, 1, above=False, equal=False, normalize=True)
self.assertEqual(None, chance)
def test_goal_2(self):
chance = roll_success_range(1, 6, 2, above=False, equal=False, normalize=True)
self.assertEqual({"min": 1, "max": 1}, chance)
def test_goal_3(self):
chance = roll_success_range(1, 6, 3, above=False, equal=False, normalize=True)
self.assertEqual({"min": 1, "max": 2}, chance)
def test_goal_4(self):
chance = roll_success_range(1, 6, 4, above=False, equal=False, normalize=True)
self.assertEqual({"min": 1, "max": 3}, chance)
def test_goal_5(self):
chance = roll_success_range(1, 6, 5, above=False, equal=False, normalize=True)
self.assertEqual({"min": 1, "max": 4}, chance)
def test_goal_6(self):
chance = roll_success_range(1, 6, 6, above=False, equal=False, normalize=True)
self.assertEqual({"min": 1, "max": 5}, chance)
class Test1d6BelowEqual(unittest.TestCase):
"""
Tests the chance to go above with the range [1,6], which is the range of a six sides die.
"""
def test_no_goal(self):
chance = roll_success_range(1, 6, 0, above=False, equal=True, normalize=True)
self.assertEqual(None, chance)
def test_goal_above_max(self):
chance = roll_success_range(1, 6, 10, above=False, equal=True, normalize=True)
self.assertEqual({"min": 1, "max": 6}, chance)
def test_goal_1(self):
chance = roll_success_range(1, 6, 1, above=False, equal=True, normalize=True)
self.assertEqual({"min": 1, "max": 1}, chance)
def test_goal_2(self):
chance = roll_success_range(1, 6, 2, above=False, equal=True, normalize=True)
self.assertEqual({"min": 1, "max": 2}, chance)
def test_goal_3(self):
chance = roll_success_range(1, 6, 3, above=False, equal=True, normalize=True)
self.assertEqual({"min": 1, "max": 3}, chance)
def test_goal_4(self):
chance = roll_success_range(1, 6, 4, above=False, equal=True, normalize=True)
self.assertEqual({"min": 1, "max": 4}, chance)
def test_goal_5(self):
chance = roll_success_range(1, 6, 5, above=False, equal=True, normalize=True)
self.assertEqual({"min": 1, "max": 5}, chance)
def test_goal_6(self):
chance = roll_success_range(1, 6, 6, above=False, equal=True, normalize=True)
self.assertEqual({"min": 1, "max": 6}, chance)
| 27.605263
| 93
| 0.639384
| 1,068
| 7,343
| 4.23221
| 0.059925
| 0.10354
| 0.134513
| 0.171903
| 0.92854
| 0.92854
| 0.918363
| 0.909956
| 0.909956
| 0.904204
| 0
| 0.03938
| 0.218439
| 7,343
| 265
| 94
| 27.709434
| 0.748214
| 0.063598
| 0
| 0.545455
| 0
| 0
| 0.031615
| 0
| 0
| 0
| 0
| 0
| 0.305785
| 1
| 0.305785
| false
| 0
| 0.024793
| 0
| 0.371901
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
10c5d813876825486bca15a36f2e36133fc93332
| 3,926
|
py
|
Python
|
datadelivery/test_commands.py
|
Duke-GCB/datadelivery-cli
|
ae34452590ea22d11859bc0d7df2d3245e30a342
|
[
"MIT"
] | null | null | null |
datadelivery/test_commands.py
|
Duke-GCB/datadelivery-cli
|
ae34452590ea22d11859bc0d7df2d3245e30a342
|
[
"MIT"
] | 4
|
2018-05-02T18:16:31.000Z
|
2018-05-18T13:54:44.000Z
|
datadelivery/test_commands.py
|
Duke-GCB/datadelivery-cli
|
ae34452590ea22d11859bc0d7df2d3245e30a342
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from unittest import TestCase
from mock import MagicMock, patch, call
from datadelivery.commands import Commands
from datadelivery.s3 import NotFoundException
class CommandsTestCase(TestCase):
def setUp(self):
self.config = MagicMock()
@patch('datadelivery.commands.ConfigFile')
@patch('datadelivery.commands.S3')
def test_deliver_bucket(self, mock_s3, mock_config_file):
mock_s3_object = mock_s3.return_value
mock_to_user = MagicMock()
mock_bucket = MagicMock()
mock_delivery = MagicMock()
mock_config_file.return_value.read_or_create_config.return_value = self.config
mock_s3_object.get_s3user_by_email.return_value = mock_to_user
mock_s3_object.get_bucket_by_name.return_value = mock_bucket
mock_s3_object.create_delivery.return_value = mock_delivery
commands = Commands(version_str='1.0')
commands.deliver(bucket_name='some_bucket', email='joe@joe.com', user_message='Test', resend=False)
mock_s3.assert_called_with(self.config, user_agent_str='datadelivery/1.0')
mock_s3_object.get_s3user_by_email.assert_called_with('joe@joe.com')
mock_s3_object.get_bucket_by_name.assert_called_with('some_bucket')
mock_s3_object.create_delivery.assert_called_with(mock_bucket, mock_to_user, 'Test')
mock_s3_object.send_delivery.assert_called_with(mock_delivery, False)
@patch('datadelivery.commands.ConfigFile')
@patch('datadelivery.commands.S3')
def test_deliver_bucket_create_bucket(self, mock_s3, mock_config_file):
mock_s3_object = mock_s3.return_value
mock_to_user = MagicMock()
mock_bucket = MagicMock()
mock_delivery = MagicMock()
mock_config_file.return_value.read_or_create_config.return_value = self.config
mock_s3_object.get_s3user_by_email.return_value = mock_to_user
mock_s3_object.get_bucket_by_name.side_effect = NotFoundException
mock_s3_object.create_bucket.return_value = mock_bucket
mock_s3_object.create_delivery.return_value = mock_delivery
commands = Commands(version_str='1.0')
commands.deliver(bucket_name='some_bucket', email='joe@joe.com', user_message='Test', resend=False)
mock_s3.assert_called_with(self.config, user_agent_str='datadelivery/1.0')
mock_s3_object.get_s3user_by_email.assert_called_with('joe@joe.com')
mock_s3_object.get_bucket_by_name.assert_called_with('some_bucket')
mock_s3_object.create_bucket.assert_called_with('some_bucket')
mock_s3_object.create_delivery.assert_called_with(mock_bucket, mock_to_user, 'Test')
mock_s3_object.send_delivery.assert_called_with(mock_delivery, False)
@patch('datadelivery.commands.ConfigFile')
@patch('datadelivery.commands.S3')
def test_deliver_bucket_resend(self, mock_s3, mock_config_file):
mock_s3_object = mock_s3.return_value
mock_to_user = MagicMock()
mock_bucket = MagicMock()
mock_delivery = MagicMock()
mock_config_file.return_value.read_or_create_config.return_value = self.config
mock_s3_object.get_s3user_by_email.return_value = mock_to_user
mock_s3_object.get_bucket_by_name.return_value = mock_bucket
mock_s3_object.create_delivery.return_value = mock_delivery
commands = Commands(version_str='1.0')
commands.deliver(bucket_name='some_bucket', email='joe@joe.com', user_message='Test', resend=True)
mock_s3.assert_called_with(self.config, user_agent_str='datadelivery/1.0')
mock_s3_object.get_s3user_by_email.assert_called_with('joe@joe.com')
mock_s3_object.get_bucket_by_name.assert_called_with('some_bucket')
mock_s3_object.create_delivery.assert_called_with(mock_bucket, mock_to_user, 'Test')
mock_s3_object.send_delivery.assert_called_with(mock_delivery, True)
| 50.987013
| 107
| 0.758533
| 547
| 3,926
| 4.983547
| 0.104205
| 0.077036
| 0.114453
| 0.066031
| 0.899486
| 0.888481
| 0.888481
| 0.888481
| 0.888481
| 0.888481
| 0
| 0.017174
| 0.15461
| 3,926
| 76
| 108
| 51.657895
| 0.804158
| 0
| 0
| 0.75
| 0
| 0
| 0.099873
| 0.042803
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.0625
| false
| 0
| 0.078125
| 0
| 0.15625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8009f8673bbe7bad372833b19acccfaa7e40080b
| 70
|
py
|
Python
|
pytracking/tracker/trdimp/__init__.py
|
594422814/TransformerTrack
|
e3bdd0be1a9a2cd4b1b6bb1b946a9a57a525b3fc
|
[
"MIT"
] | 215
|
2021-03-16T12:10:57.000Z
|
2022-03-30T03:02:36.000Z
|
pytracking/tracker/trdimp/__init__.py
|
YanWanquan/TransformerTrack
|
7b73e3830754fd4b32ba9bd99fb0e77ad92d1b63
|
[
"MIT"
] | 34
|
2021-03-24T08:18:32.000Z
|
2022-03-17T01:59:51.000Z
|
pytracking/tracker/trdimp/__init__.py
|
YanWanquan/TransformerTrack
|
7b73e3830754fd4b32ba9bd99fb0e77ad92d1b63
|
[
"MIT"
] | 37
|
2021-03-17T06:32:55.000Z
|
2022-03-28T07:03:14.000Z
|
from .trdimp import TrDiMP
def get_tracker_class():
return TrDiMP
| 17.5
| 26
| 0.771429
| 10
| 70
| 5.2
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.171429
| 70
| 4
| 27
| 17.5
| 0.896552
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
8039c54250d2976d67b83e270f74b20687d3646f
| 44,339
|
py
|
Python
|
models/layers.py
|
lim0606/pytorch-ardae-vae
|
52f460a90fa5822692031ab7dcca39fa9168988e
|
[
"MIT"
] | 11
|
2020-06-11T03:01:46.000Z
|
2021-06-17T02:59:39.000Z
|
models/layers.py
|
lim0606/pytorch-ardae-vae
|
52f460a90fa5822692031ab7dcca39fa9168988e
|
[
"MIT"
] | 1
|
2020-06-18T00:59:24.000Z
|
2020-06-19T22:55:14.000Z
|
models/layers.py
|
lim0606/pytorch-ardae-vae
|
52f460a90fa5822692031ab7dcca39fa9168988e
|
[
"MIT"
] | null | null | null |
'''
copied and modified from https://github.com/CW-Huang/torchkit/blob/33f61b914bf8e79faebab3d3d64c17ea921ce6d2/torchkit/nn.py
copied and modified from https://github.com/lim0606/pytorch-flows-dev/blob/master/flows.py
'''
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils import get_nonlinear_func
from torch.nn.modules.utils import _pair
'''
miscellanious layers
'''
class Identity(nn.Module):
def __init__(self,):
super().__init__()
def forward(self, input):
return input
'''
copied and modified from https://github.com/CW-Huang/torchkit/blob/master/nn.py
'''
class WeightNormalizedLinear(nn.Module):
def __init__(self, in_features, out_features, bias=True, mask=None, norm=True):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.register_buffer('mask',mask)
self.norm = norm
self.direction = nn.Parameter(torch.Tensor(out_features, in_features))
self.scale = nn.Parameter(torch.Tensor(out_features))
if bias:
self.bias = nn.Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.direction.size(1))
self.direction.data.uniform_(-stdv, stdv)
self.scale.data.uniform_(1, 1)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input):
if self.norm:
dir_ = self.direction
direction = dir_.div(dir_.pow(2).sum(1).sqrt()[:,None])
weight = self.scale[:,None].mul(direction)
else:
weight = self.scale[:,None].mul(self.direction)
if self.mask is not None:
#weight = weight * getattr(self.mask,⋅
# ('cpu', 'cuda')[weight.is_cuda])()
weight = weight * self.mask
return F.linear(input, weight, self.bias)
def __repr__(self):
return self.__class__.__name__ + '(' \
+ 'in_features=' + str(self.in_features) \
+ ', out_features=' + str(self.out_features) + ')'
class ResLinear(nn.Module):
def __init__(self,
in_features, out_features, bias=True, same_dim=False,
activation=nn.ReLU(), oper=WeightNormalizedLinear, oper_kwargs={'norm': False}):
super().__init__()
self.same_dim = same_dim
self.dot_0h = oper(in_features, out_features, bias, **oper_kwargs)
self.dot_h1 = oper(out_features, out_features, bias, **oper_kwargs)
if not same_dim:
self.dot_01 = oper(in_features, out_features, bias, **oper_kwargs)
self.activation = activation
def forward(self, input):
h = self.activation(self.dot_0h(input))
out_nonlinear = self.dot_h1(h)
out_skip = input if self.same_dim else self.dot_01(input)
return out_nonlinear + out_skip
class ContextResLinear(nn.Module):
def __init__(self,
in_features, out_features, context_features, bias=True, same_dim=False,
activation=nn.ReLU(), oper=WeightNormalizedLinear, oper_kwargs={'norm': False}):
super().__init__()
self.same_dim = same_dim
self.dot_0h = oper(in_features, out_features, bias, **oper_kwargs)
self.dot_h1 = oper(out_features, out_features, bias, **oper_kwargs)
if not same_dim:
self.dot_01 = oper(in_features, out_features, bias, **oper_kwargs)
self.dot_0c = oper(context_features, out_features, bias, **oper_kwargs)
self.dot_c1 = oper(out_features, out_features, bias, **oper_kwargs)
self.activation = activation
def forward(self, input, context):
h = self.activation(self.dot_0h(input))
outi_nonlinear = self.dot_h1(h)
c = self.activation(self.dot_0c(context))
outc_nonlinear = self.dot_c1(c)
out_skip = input if self.same_dim else self.dot_01(input)
return outi_nonlinear + outc_nonlinear + out_skip
''' context '''
class ContextLinear(nn.Module):
__constants__ = ['bias', 'in_features', 'out_features']
def __init__(self, in_features, out_features, context_features, bias=True):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.context_features = context_features
self.direction = nn.Parameter(torch.Tensor(out_features, in_features))
self.cscale = nn.Linear(context_features, out_features, bias=False)
self.cbias = nn.Linear(context_features, out_features, bias=bias)
#self.cbias = nn.Linear(in_features+context_features, out_features, bias=bias)
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.kaiming_uniform_(self.direction, a=math.sqrt(5))
self.cscale.weight.data.normal_(0, 0.005)
#torch.nn.init.constant_(self.cscale.bias, 1)
#self.cbias.weight.data.normal_(0, 0.001)
#torch.nn.init.constant_(self.cbias.bias, 0)
def forward(self, input, context):
scale = 1.+self.cscale(context)
bias = self.cbias(context)
return scale * F.linear(input, self.direction, None) + bias
#return scale * self.cbias(torch.cat([input, context], dim=1))
def extra_repr(self):
return 'in_features={}, out_features={}, context_features={}'.format(
self.in_features, self.out_features, self.context_features,
)
class ContextConv2d(nn.Module):
def __init__(self,
in_channels, out_channels, context_channels,
kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros'):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.context_channels = context_channels
self.direction = nn.Conv2d(in_channels, out_channels, bias=False, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups)#, padding_mode=padding_mode)
self.cscale = nn.Conv2d(context_channels, out_channels, bias=False, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups)#, padding_mode=padding_mode)
self.cbias = nn.Conv2d(context_channels, out_channels, bias=bias, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups)#, padding_mode=padding_mode)
self.reset_parameters()
def reset_parameters(self):
#torch.nn.init.kaiming_uniform_(self.direction, a=math.sqrt(5))
self.cscale.weight.data.normal_(0, 0.005)
#torch.nn.init.constant_(self.cscale.bias, 1)
#self.cbias.weight.data.normal_(0, 0.001)
#torch.nn.init.constant_(self.cbias.bias, 0)
def forward(self, input, context):
scale = 1.+self.cscale(context)
bias = self.cbias(context)
return scale * self.direction(input) + bias
def extra_repr(self):
return 'in_channels={}, out_channels={}, context_channels={}'.format(
self.in_channels, self.out_channels, self.context_channels,
)
class ContextWeightNormalizedLinear(nn.Module):
def __init__(self, in_features, out_features, context_features, bias=True, in_norm=False, ctx_norm=True, ctx_scale=0.1):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.context_features = context_features
self.in_norm = in_norm
self.ctx_norm = ctx_norm
self.ctx_scale = ctx_scale
self.direction = nn.Parameter(torch.Tensor(out_features, in_features))
self.cscale = nn.Parameter(torch.Tensor(out_features, context_features))
self.cbias = nn.Linear(context_features, out_features, bias=bias)
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.kaiming_uniform_(self.direction, a=math.sqrt(5))
self.cscale.data.normal_(0, 0.005)
#self.cscale.weight.data.normal_(0, 0.1)
#self.cbias.weight.data.normal_(0, 0.1)
#self.direction.data.normal_(0, 0.001)
def forward(self, input, context):
bias = self.cbias(context)
if self.ctx_norm:
cscale_ = self.cscale
cscale = cscale_.div(cscale_.pow(2).sum(1).sqrt()[:,None])
scale = 1.+self.ctx_scale*F.linear(context, cscale, None)
else:
scale = 1.+F.linear(context, self.cscale, None)
if self.in_norm:
dir_ = self.direction
weight = dir_.div(dir_.pow(2).sum(1).sqrt()[:,None])
else:
weight = self.direction
return scale * F.linear(input, weight, None) + bias
def extra_repr(self):
return 'in_features={}, out_features={}, context_features={}, in_norm={}, ctx_norm={}'.format(
self.in_features, self.out_features, self.context_features, self.in_norm, self.ctx_norm
)
''' context (softplus) '''
class ContextSoftPlusLinear(nn.Module):
__constants__ = ['bias', 'in_features', 'out_features']
def __init__(self, in_features, out_features, context_features, bias=True):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.context_features = context_features
self.direction = nn.Parameter(torch.Tensor(out_features, in_features))
self.cscale = nn.Linear(context_features, out_features, bias=True)
self.cbias = nn.Linear(context_features, out_features, bias=bias)
#self.cbias = nn.Linear(in_features+context_features, out_features, bias=bias)
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.kaiming_uniform_(self.direction, a=math.sqrt(5))
self.cscale.weight.data.normal_(0, 0.005)
fan_in, _ = torch.nn.init._calculate_fan_in_and_fan_out(self.cscale.weight)
bound = 1 / math.sqrt(fan_in)
torch.nn.init.uniform_(self.cscale.bias, -bound, bound)
#torch.nn.init.constant_(self.cscale.bias, 1)
#self.cbias.weight.data.normal_(0, 0.001)
#torch.nn.init.constant_(self.cbias.bias, 0)
def forward(self, input, context):
scale = F.softplus(self.cscale(context))
bias = self.cbias(context)
return scale * F.linear(input, self.direction, None) + bias
#return scale * self.cbias(torch.cat([input, context], dim=1))
def extra_repr(self):
return 'in_features={}, out_features={}, context_features={}'.format(
self.in_features, self.out_features, self.context_features,
)
class ContextSoftPlusConv2d(nn.Module):
def __init__(self,
in_channels, out_channels, context_channels,
kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros'):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.context_channels = context_channels
self.direction = nn.Conv2d(in_channels, out_channels, bias=False, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups)#, padding_mode=padding_mode)
self.cscale = nn.Conv2d(context_channels, out_channels, bias=True, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups)#, padding_mode=padding_mode)
self.cbias = nn.Conv2d(context_channels, out_channels, bias=bias, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups)#, padding_mode=padding_mode)
self.reset_parameters()
def reset_parameters(self):
#torch.nn.init.kaiming_uniform_(self.direction, a=math.sqrt(5))
self.cscale.weight.data.normal_(0, 0.005)
fan_in, _ = torch.nn.init._calculate_fan_in_and_fan_out(self.cscale.weight)
bound = 1 / math.sqrt(fan_in)
torch.nn.init.uniform_(self.cscale.bias, -bound, bound)
#torch.nn.init.constant_(self.cscale.bias, 1)
#self.cbias.weight.data.normal_(0, 0.001)
#torch.nn.init.constant_(self.cbias.bias, 0)
def forward(self, input, context):
scale = F.softplus(self.cscale(context))
bias = self.cbias(context)
return scale * self.direction(input) + bias
def extra_repr(self):
return 'in_channels={}, out_channels={}, context_channels={}'.format(
self.in_channels, self.out_channels, self.context_channels,
)
class ContextSoftPlusWeightNormalizedLinear(nn.Module):
def __init__(self, in_features, out_features, context_features, bias=True, in_norm=False, ctx_norm=True):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.context_features = context_features
self.in_norm = in_norm
self.ctx_norm = ctx_norm
self.direction = nn.Parameter(torch.Tensor(out_features, in_features))
self.cscale = nn.Parameter(torch.Tensor(out_features, context_features))
self.cscalebias = nn.Parameter(torch.Tensor(out_features))
self.cbias = nn.Linear(context_features, out_features, bias=bias)
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.kaiming_uniform_(self.direction, a=math.sqrt(5))
self.cscale.data.normal_(0, 1)
fan_in, _ = torch.nn.init._calculate_fan_in_and_fan_out(self.cscale)
bound = 1 / math.sqrt(fan_in)
torch.nn.init.uniform_(self.cscalebias, -bound, bound)
#self.cscale.weight.data.normal_(0, 0.1)
#self.cbias.weight.data.normal_(0, 0.1)
#self.direction.data.normal_(0, 0.001)
def forward(self, input, context):
bias = self.cbias(context)
if self.ctx_norm:
cscale_ = self.cscale
cscale = cscale_.div(cscale_.pow(2).sum(1).sqrt()[:,None])
scale = F.softplus(F.linear(context, cscale, self.cscalebias))
else:
scale = F.softplus(F.linear(context, self.cscale, self.cscalebias))
if self.in_norm:
dir_ = self.direction
weight = dir_.div(dir_.pow(2).sum(1).sqrt()[:,None])
else:
weight = self.direction
return scale * F.linear(input, weight, None) + bias
def extra_repr(self):
return 'in_features={}, out_features={}, context_features={}, in_norm={}, ctx_norm={}'.format(
self.in_features, self.out_features, self.context_features, self.in_norm, self.ctx_norm
)
class ContextSoftPlusWeightNormalizedConv2d(nn.Module):
__constants__ = ['stride', 'padding', 'dilation', 'groups', 'bias',
#'padding_mode', 'output_padding',
'in_channels', 'out_channels', 'context_channels', 'kernel_size']
def __init__(self,
in_channels, out_channels, context_channels,
kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, in_norm=False, ctx_norm=True):#padding_mode='zeros'):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.context_channels = context_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.bias = bias
self.in_norm = in_norm
self.ctx_norm = ctx_norm
self.direction = nn.Parameter(torch.Tensor(out_channels, in_channels, kernel_size, kernel_size))
self.cscale = nn.Parameter(torch.Tensor(out_channels, context_channels, kernel_size, kernel_size))
self.cscalebias = nn.Parameter(torch.Tensor(out_channels))
self.cbias = nn.Conv2d(context_channels, out_channels, bias=bias, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups)#, padding_mode=padding_mode)
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.kaiming_uniform_(self.direction, a=math.sqrt(5))
self.cscale.data.normal_(0, 1)
fan_in, _ = torch.nn.init._calculate_fan_in_and_fan_out(self.cscale)
bound = 1 / math.sqrt(fan_in)
torch.nn.init.uniform_(self.cscalebias, -bound, bound)
#torch.nn.init.constant_(self.cscale.bias, 1)
#self.cbias.weight.data.normal_(0, 0.001)
#torch.nn.init.constant_(self.cbias.bias, 0)
def forward(self, input, context):
bias = self.cbias(context)
if self.ctx_norm:
cscale_ = self.cscale
cscale = cscale_.div(cscale_.pow(2).sum(1).sum(1).sum(1).sqrt()[:,None,None,None])
scale = F.softplus(F.conv2d(context, cscale, bias=self.cscalebias,
stride=self.stride, padding=self.padding, dilation=self.dilation, groups=self.groups))
else:
scale = F.softplus(F.conv2d(context, self.cscale, bias=self.cscalebias,
stride=self.stride, padding=self.padding, dilation=self.dilation, groups=self.groups))
if self.in_norm:
dir_ = self.direction
weight = dir_.div(dir_.pow(2).sum(1).sum(1).sum(1).sqrt()[:,None,None,None])
else:
weight = self.direction
out = F.conv2d(input, weight, bias=None,
stride=self.stride, padding=self.padding, dilation=self.dilation, groups=self.groups)
return scale * out + bias
def extra_repr(self):
s = ('{in_channels}, {out_channels}, {context_channels}, in_norm={in_norm}, ctx_norm={ctx_norm}, kernel_size={kernel_size}'
', stride={stride}')
if self.padding != 0:
s += ', padding={padding}'
if self.dilation != 1:
s += ', dilation={dilation}'
if self.groups != 1:
s += ', groups={groups}'
if self.bias is None:
s += ', bias=False'
return s.format(**self.__dict__)
''' bilinear '''
class SimplifiedBilinear(nn.Module):
def __init__(self, in1_features, in2_features, out_features, bias=True):
super().__init__()
self.in1_features = in1_features
self.in2_features = in2_features
self.out_features = out_features
self.path1 = nn.Linear(in1_features, out_features, bias=bias)
self.path2 = nn.Linear(in2_features, out_features, bias=False)
def forward(self, input1, input2):
return self.path1(input1) + self.path2(input2)
def extra_repr(self):
return 'in1_features={}, in2_features={}, out_features={}'.format(
self.in1_features, self.in2_features, self.out_features,
)
class WeightNormalizedSimplifiedBilinear(nn.Module):
def __init__(self, in1_features, in2_features, out_features, bias=True, in1_norm=False, in2_norm=True):
super().__init__()
self.in1_features = in1_features
self.in2_features = in2_features
self.out_features = out_features
self.in1_norm = in1_norm
self.in2_norm = in2_norm
self.path1 = nn.Parameter(torch.Tensor(out_features, in1_features))
self.path2 = nn.Parameter(torch.Tensor(out_features, in2_features))
if bias:
self.bias = nn.Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.kaiming_uniform_(self.path1, a=math.sqrt(5))
torch.nn.init.kaiming_uniform_(self.path2, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = torch.nn.init._calculate_fan_in_and_fan_out(self.path1)
bound = 1 / math.sqrt(fan_in)
torch.nn.init.uniform_(self.bias, -bound, bound)
def forward(self, input1, input2):
if self.in1_norm:
dir1_ = self.path1
weight1 = dir1_.div(dir1_.pow(2).sum(1).sqrt()[:,None])
else:
weight1 = self.path1
if self.in2_norm:
dir2_ = self.path2
weight2 = dir2_.div(dir2_.pow(2).sum(1).sqrt()[:,None])
else:
weight2 = self.path2
return F.linear(input1, weight1, self.bias) + F.linear(input2, weight2, None)
def extra_repr(self):
return 'in1_features={}, in2_features={}, out_features={}, in1_norm={}, in2_norm={}'.format(
self.in1_features, self.in2_features, self.out_features, self.in1_norm, self.in2_norm
)
class StackedWeightNormalizedSimplifiedBilinear(nn.Module):
def __init__(self, in1_features, in2_features, hid_features, out_features, bias=True, norm=True, nonlinearity='relu'):
super().__init__()
self.in1_features = in1_features
self.in2_features = in2_features
self.hid_features = hid_features
self.out_features = out_features
self.norm = norm
self.nonlinearity = nonlinearity
self.main = WeightNormalizedSimplifiedBilinear(in1_features, in2_features, hid_features, bias=bias, norm=norm)
self.fc = nn.Linear(hid_features, out_features)
def forward(self, input1, input2):
afunc = get_nonlinear_func(self.nonlinearity)
hid = afunc(self.main(input1, input2))
out = self.fc(hid)
return out
''' MLP '''
class MLP(nn.Module):
def __init__(self,
input_dim=2,
hidden_dim=8,
output_dim=2,
nonlinearity='relu',
num_hidden_layers=1,
use_nonlinearity_output=False,
):
super().__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.nonlinearity = nonlinearity
self.num_hidden_layers = num_hidden_layers
self.use_nonlinearity_output = use_nonlinearity_output
layers = []
if num_hidden_layers >= 1:
for i in range(num_hidden_layers):
layers += [nn.Linear(input_dim if i==0 else hidden_dim, hidden_dim)]
self.layers = nn.ModuleList(layers)
self.fc = nn.Linear(input_dim if num_hidden_layers==0 else hidden_dim, output_dim)
def forward(self, input):
# init
batch_size = input.size(0)
x = input.view(batch_size, self.input_dim)
# forward
hidden = x
if self.num_hidden_layers >= 1:
for i in range(self.num_hidden_layers):
hidden = get_nonlinear_func(self.nonlinearity)(self.layers[i](hidden))
output = self.fc(hidden)
if self.use_nonlinearity_output:
output = get_nonlinear_func(self.nonlinearity)(output)
return output
class WNMLP(nn.Module):
def __init__(self,
input_dim=2,
hidden_dim=8,
output_dim=2,
nonlinearity='relu',
num_hidden_layers=1,
use_nonlinearity_output=False,
use_norm_output=False,
):
super().__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.nonlinearity = nonlinearity
self.num_hidden_layers = num_hidden_layers
self.use_nonlinearity_output = use_nonlinearity_output
self.use_norm_output = use_norm_output
layers = []
if num_hidden_layers >= 1:
for i in range(num_hidden_layers):
layers += [WeightNormalizedLinear(input_dim if i==0 else hidden_dim, hidden_dim)]
self.layers = nn.ModuleList(layers)
self.fc = WeightNormalizedLinear(input_dim if num_hidden_layers==0 else hidden_dim, output_dim, norm=use_norm_output)
def forward(self, input):
# init
batch_size = input.size(0)
x = input.view(batch_size, self.input_dim)
# forward
hidden = x
if self.num_hidden_layers >= 1:
for i in range(self.num_hidden_layers):
hidden = get_nonlinear_func(self.nonlinearity)(self.layers[i](hidden))
output = self.fc(hidden)
if self.use_nonlinearity_output:
output = get_nonlinear_func(self.nonlinearity)(output)
return output
class ResMLP(nn.Module):
def __init__(self,
input_dim=2,
hidden_dim=8,
output_dim=2,
nonlinearity='relu',
num_hidden_layers=1,
use_nonlinearity_output=False,
layer='wnlinear',
use_norm=False,
use_norm_output=False,
):
super().__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.nonlinearity = nonlinearity
self.num_hidden_layers = num_hidden_layers
self.use_nonlinearity_output = use_nonlinearity_output
self.layer = layer
self.use_norm = use_norm
self.use_norm_output = use_norm_output
if self.layer == 'linear':
oper = nn.Linear
oper_kwargs={}
elif self.layer == 'wnlinear':
oper = WeightNormalizedLinear
oper_kwargs={'norm': use_norm}
else:
raise NotImplementedError
layers = []
prev_hidden_dim = input_dim
if num_hidden_layers >= 1:
for i in range(num_hidden_layers):
layers += [ResLinear(input_dim if i==0 else hidden_dim,
hidden_dim,
same_dim=prev_hidden_dim==hidden_dim,
oper=oper,
oper_kwargs=oper_kwargs)]
prev_hidden_dim = hidden_dim
self.layers = nn.ModuleList(layers)
self.fc = ResLinear(input_dim if num_hidden_layers==0 else hidden_dim,
output_dim,
same_dim=prev_hidden_dim==output_dim,
oper=oper,
oper_kwargs=oper_kwargs)
def forward(self, input):
# init
batch_size = input.size(0)
x = input.view(batch_size, self.input_dim)
# forward
hidden = x
if self.num_hidden_layers >= 1:
for i in range(self.num_hidden_layers):
hidden = get_nonlinear_func(self.nonlinearity)(self.layers[i](hidden))
output = self.fc(hidden)
if self.use_nonlinearity_output:
output = get_nonlinear_func(self.nonlinearity)(output)
return output
class ContextResMLP(nn.Module):
def __init__(self,
input_dim=2,
context_dim=2,
hidden_dim=8,
output_dim=2,
nonlinearity='relu',
num_hidden_layers=1,
use_nonlinearity_output=False,
use_norm=False,
use_norm_output=False,
):
super().__init__()
self.input_dim = input_dim
self.context_dim = context_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.nonlinearity = nonlinearity
self.num_hidden_layers = num_hidden_layers
self.use_nonlinearity_output = use_nonlinearity_output
self.use_norm = use_norm
self.use_norm_output = use_norm_output
layers = []
prev_hidden_dim = input_dim
if num_hidden_layers >= 1:
for i in range(num_hidden_layers):
layers += [ContextResLinear(input_dim if i==0 else hidden_dim,
hidden_dim,
context_dim,
same_dim=prev_hidden_dim==hidden_dim,
oper_kwargs={'norm': use_norm})]
prev_hidden_dim = hidden_dim
self.layers = nn.ModuleList(layers)
self.fc = ContextResLinear(input_dim if num_hidden_layers==0 else hidden_dim,
output_dim,
context_dim,
same_dim=prev_hidden_dim==output_dim,
oper_kwargs={'norm': use_norm_output})
def forward(self, input, context):
# init
batch_size = input.size(0)
x = input.view(batch_size, self.input_dim)
ctx = context.view(batch_size, self.context_dim)
# forward
hidden = x
if self.num_hidden_layers >= 1:
for i in range(self.num_hidden_layers):
hidden = get_nonlinear_func(self.nonlinearity)(self.layers[i](hidden, ctx))
output = self.fc(hidden, ctx)
if self.use_nonlinearity_output:
output = get_nonlinear_func(self.nonlinearity)(output)
return output
class ContextConcatMLP(nn.Module):
def __init__(self,
input_dim=2,
context_dim=2,
hidden_dim=8,
output_dim=2,
nonlinearity='relu',
num_hidden_layers=1,
use_nonlinearity_output=False,
):
super().__init__()
self.input_dim = input_dim
self.context_dim = context_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.nonlinearity = nonlinearity
self.num_hidden_layers = num_hidden_layers
self.use_nonlinearity_output = use_nonlinearity_output
layers = []
if num_hidden_layers >= 1:
for i in range(num_hidden_layers):
layers += [nn.Linear(input_dim+context_dim if i==0 else hidden_dim+context_dim, hidden_dim)]
self.layers = nn.ModuleList(layers)
self.fc = nn.Linear(input_dim+context_dim if num_hidden_layers==0 else hidden_dim+context_dim, output_dim)
def forward(self, input, context):
# init
batch_size = input.size(0)
x = input.view(batch_size, self.input_dim)
ctx = context.view(batch_size, self.context_dim)
# forward
hidden = x
if self.num_hidden_layers >= 1:
for i in range(self.num_hidden_layers):
_hidden = torch.cat([hidden, ctx], dim=1)
hidden = get_nonlinear_func(self.nonlinearity)(self.layers[i](_hidden))
_hidden = torch.cat([hidden, ctx], dim=1)
output = self.fc(_hidden)
if self.use_nonlinearity_output:
output = get_nonlinear_func(self.nonlinearity)(output)
return output
class ContextScaleMLP(nn.Module):
def __init__(self,
input_dim=2,
context_dim=2,
hidden_dim=8,
output_dim=2,
nonlinearity='relu',
num_hidden_layers=3,
use_nonlinearity_output=False,
):
super().__init__()
self.input_dim = input_dim
self.context_dim = context_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.nonlinearity = nonlinearity
self.num_hidden_layers = num_hidden_layers
self.use_nonlinearity_output = use_nonlinearity_output
layers = []
if num_hidden_layers >= 1:
for i in range(num_hidden_layers):
layers += [ContextLinear(
in_features=input_dim if i==0 else hidden_dim,
out_features=hidden_dim,
context_features=context_dim)]
self.layers = nn.ModuleList(layers)
self.fc = ContextLinear(
in_features=input_dim if num_hidden_layers==0 else hidden_dim,
out_features=output_dim,
context_features=context_dim)
#def reset_parameters(self):
# for layer in self.layers:
# layer.reset_parameters()
# self.fc.reset_parameters()
def forward(self, input, context):
# init
batch_size = input.size(0)
x = input.view(batch_size, self.input_dim)
ctx = context.view(batch_size, self.context_dim)
# forward
hidden = x
if self.num_hidden_layers >= 1:
for i in range(self.num_hidden_layers):
hidden = get_nonlinear_func(self.nonlinearity)(self.layers[i](hidden, ctx))
output = self.fc(hidden, ctx)
if self.use_nonlinearity_output:
output = get_nonlinear_func(self.nonlinearity)(output)
return output
class ContextWNScaleMLP(nn.Module):
def __init__(self,
input_dim=2,
context_dim=2,
hidden_dim=8,
output_dim=2,
nonlinearity='relu',
num_hidden_layers=3,
use_nonlinearity_output=False,
):
super().__init__()
self.input_dim = input_dim
self.context_dim = context_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.nonlinearity = nonlinearity
self.num_hidden_layers = num_hidden_layers
self.use_nonlinearity_output = use_nonlinearity_output
layers = []
if num_hidden_layers >= 1:
for i in range(num_hidden_layers):
layers += [ContextWeightNormalizedLinear(
in_features=input_dim if i==0 else hidden_dim,
out_features=hidden_dim,
context_features=context_dim)]
self.layers = nn.ModuleList(layers)
self.fc = ContextWeightNormalizedLinear(
in_features=input_dim if num_hidden_layers==0 else hidden_dim,
out_features=output_dim,
context_features=context_dim)
#def reset_parameters(self):
# for layer in self.layers:
# layer.reset_parameters()
# self.fc.reset_parameters()
def forward(self, input, context):
# init
batch_size = input.size(0)
x = input.view(batch_size, self.input_dim)
ctx = context.view(batch_size, self.context_dim)
# forward
hidden = x
if self.num_hidden_layers >= 1:
for i in range(self.num_hidden_layers):
hidden = get_nonlinear_func(self.nonlinearity)(self.layers[i](hidden, ctx))
output = self.fc(hidden, ctx)
if self.use_nonlinearity_output:
output = get_nonlinear_func(self.nonlinearity)(output)
return output
class ContextSPScaleMLP(nn.Module):
def __init__(self,
input_dim=2,
context_dim=2,
hidden_dim=8,
output_dim=2,
nonlinearity='relu',
num_hidden_layers=3,
use_nonlinearity_output=False,
):
super().__init__()
self.input_dim = input_dim
self.context_dim = context_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.nonlinearity = nonlinearity
self.num_hidden_layers = num_hidden_layers
self.use_nonlinearity_output = use_nonlinearity_output
layers = []
if num_hidden_layers >= 1:
for i in range(num_hidden_layers):
layers += [ContextSoftPlusLinear(
in_features=input_dim if i==0 else hidden_dim,
out_features=hidden_dim,
context_features=context_dim)]
self.layers = nn.ModuleList(layers)
self.fc = ContextSoftPlusLinear(
in_features=input_dim if num_hidden_layers==0 else hidden_dim,
out_features=output_dim,
context_features=context_dim)
def forward(self, input, context):
# init
batch_size = input.size(0)
x = input.view(batch_size, self.input_dim)
ctx = context.view(batch_size, self.context_dim)
# forward
hidden = x
if self.num_hidden_layers >= 1:
for i in range(self.num_hidden_layers):
hidden = get_nonlinear_func(self.nonlinearity)(self.layers[i](hidden, ctx))
output = self.fc(hidden, ctx)
if self.use_nonlinearity_output:
output = get_nonlinear_func(self.nonlinearity)(output)
return output
class ContextSPWNScaleMLP(nn.Module):
def __init__(self,
input_dim=2,
context_dim=2,
hidden_dim=8,
output_dim=2,
nonlinearity='relu',
num_hidden_layers=3,
use_nonlinearity_output=False,
):
super().__init__()
self.input_dim = input_dim
self.context_dim = context_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.nonlinearity = nonlinearity
self.num_hidden_layers = num_hidden_layers
self.use_nonlinearity_output = use_nonlinearity_output
layers = []
if num_hidden_layers >= 1:
for i in range(num_hidden_layers):
layers += [ContextSoftPlusWeightNormalizedLinear(
in_features=input_dim if i==0 else hidden_dim,
out_features=hidden_dim,
context_features=context_dim)]
self.layers = nn.ModuleList(layers)
self.fc = ContextSoftPlusWeightNormalizedLinear(
in_features=input_dim if num_hidden_layers==0 else hidden_dim,
out_features=output_dim,
context_features=context_dim)
def forward(self, input, context):
# init
batch_size = input.size(0)
x = input.view(batch_size, self.input_dim)
ctx = context.view(batch_size, self.context_dim)
# forward
hidden = x
if self.num_hidden_layers >= 1:
for i in range(self.num_hidden_layers):
hidden = get_nonlinear_func(self.nonlinearity)(self.layers[i](hidden, ctx))
output = self.fc(hidden, ctx)
if self.use_nonlinearity_output:
output = get_nonlinear_func(self.nonlinearity)(output)
return output
class ContextBilinearMLP(nn.Module):
def __init__(self,
input_dim=2,
context_dim=2,
hidden_dim=8,
output_dim=2,
nonlinearity='relu',
num_hidden_layers=3,
use_nonlinearity_output=False,
):
super().__init__()
self.input_dim = input_dim
self.context_dim = context_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.nonlinearity = nonlinearity
self.num_hidden_layers = num_hidden_layers
self.use_nonlinearity_output = use_nonlinearity_output
layers = []
if num_hidden_layers >= 1:
for i in range(num_hidden_layers):
layers += [SimplifiedBilinear(
in1_features=input_dim if i==0 else hidden_dim,
in2_features=context_dim,
out_features=hidden_dim,
)]
self.layers = nn.ModuleList(layers)
self.fc = SimplifiedBilinear(
in1_features=input_dim if num_hidden_layers==0 else hidden_dim,
in2_features=context_dim,
out_features=output_dim,
)
def reset_parameters(self):
for layer in self.layers:
layer.reset_parameters()
self.fc.reset_parameters()
def forward(self, input, context):
# init
batch_size = input.size(0)
x = input.view(batch_size, self.input_dim)
ctx = context.view(batch_size, self.context_dim)
# forward
hidden = x
if self.num_hidden_layers >= 1:
for i in range(self.num_hidden_layers):
hidden = get_nonlinear_func(self.nonlinearity)(self.layers[i](hidden, ctx))
output = self.fc(hidden, ctx)
if self.use_nonlinearity_output:
output = get_nonlinear_func(self.nonlinearity)(output)
return output
class ContextWNBilinearMLP(nn.Module):
def __init__(self,
input_dim=2,
context_dim=2,
hidden_dim=8,
output_dim=2,
nonlinearity='relu',
num_hidden_layers=3,
use_nonlinearity_output=False,
):
super().__init__()
self.input_dim = input_dim
self.context_dim = context_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.nonlinearity = nonlinearity
self.num_hidden_layers = num_hidden_layers
self.use_nonlinearity_output = use_nonlinearity_output
layers = []
if num_hidden_layers >= 1:
for i in range(num_hidden_layers):
layers += [WeightNormalizedSimplifiedBilinear(
in1_features=input_dim if i==0 else hidden_dim,
in2_features=context_dim,
out_features=hidden_dim,
)]
self.layers = nn.ModuleList(layers)
self.fc = WeightNormalizedSimplifiedBilinear(
in1_features=input_dim if num_hidden_layers==0 else hidden_dim,
in2_features=context_dim,
out_features=output_dim,
)
def reset_parameters(self):
for layer in self.layers:
layer.reset_parameters()
self.fc.reset_parameters()
def forward(self, input, context):
# init
batch_size = input.size(0)
x = input.view(batch_size, self.input_dim)
ctx = context.view(batch_size, self.context_dim)
# forward
hidden = x
if self.num_hidden_layers >= 1:
for i in range(self.num_hidden_layers):
hidden = get_nonlinear_func(self.nonlinearity)(self.layers[i](hidden, ctx))
output = self.fc(hidden, ctx)
if self.use_nonlinearity_output:
output = get_nonlinear_func(self.nonlinearity)(output)
return output
class ContextSWNBilinearMLP(nn.Module):
def __init__(self,
input_dim=2,
context_dim=2,
hidden_dim=8,
output_dim=2,
nonlinearity='relu',
num_hidden_layers=3,
use_nonlinearity_output=False,
):
super().__init__()
self.input_dim = input_dim
self.context_dim = context_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.nonlinearity = nonlinearity
self.num_hidden_layers = num_hidden_layers
self.use_nonlinearity_output = use_nonlinearity_output
layers = []
if num_hidden_layers >= 1:
for i in range(num_hidden_layers):
layers += [StackedWeightNormalizedSimplifiedBilinear(
in1_features=input_dim if i==0 else hidden_dim,
in2_features=context_dim,
hid_features=hidden_dim,
out_features=hidden_dim,
)]
self.layers = nn.ModuleList(layers)
self.fc = StackedWeightNormalizedSimplifiedBilinear(
in1_features=input_dim if num_hidden_layers==0 else hidden_dim,
in2_features=context_dim,
hid_features=hidden_dim,
out_features=output_dim,
)
def reset_parameters(self):
for layer in self.layers:
layer.reset_parameters()
self.fc.reset_parameters()
def forward(self, input, context):
# init
batch_size = input.size(0)
x = input.view(batch_size, self.input_dim)
ctx = context.view(batch_size, self.context_dim)
# forward
hidden = x
if self.num_hidden_layers >= 1:
for i in range(self.num_hidden_layers):
hidden = get_nonlinear_func(self.nonlinearity)(self.layers[i](hidden, ctx))
output = self.fc(hidden, ctx)
if self.use_nonlinearity_output:
output = get_nonlinear_func(self.nonlinearity)(output)
return output
| 40.271571
| 195
| 0.614312
| 5,303
| 44,339
| 4.846313
| 0.036583
| 0.033619
| 0.056031
| 0.026615
| 0.896459
| 0.876226
| 0.840506
| 0.824202
| 0.81751
| 0.808249
| 0
| 0.013245
| 0.28652
| 44,339
| 1,100
| 196
| 40.308182
| 0.799115
| 0.050001
| 0
| 0.79
| 0
| 0.001111
| 0.023124
| 0.000597
| 0
| 0
| 0
| 0
| 0
| 1
| 0.082222
| false
| 0
| 0.006667
| 0.012222
| 0.161111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
80412fa824cbdb5851b127ac81d8e82b68ae617a
| 5,673
|
py
|
Python
|
testing/sudoku/problems.py
|
BCCN-Prog/materials
|
4317ab52521093cc84c33b41ab027b46d1e5e48a
|
[
"MIT"
] | null | null | null |
testing/sudoku/problems.py
|
BCCN-Prog/materials
|
4317ab52521093cc84c33b41ab027b46d1e5e48a
|
[
"MIT"
] | null | null | null |
testing/sudoku/problems.py
|
BCCN-Prog/materials
|
4317ab52521093cc84c33b41ab027b46d1e5e48a
|
[
"MIT"
] | null | null | null |
"""Example Sudoku problems and solutions."""
# keys of problems that are easy to solve by brute force
# used by the tests
TEST_KEYS = ['easy1', 'hard1', 'hard2', 'swordfish1']
# ##### Example Sudoku problems
# Notes:
# 1) 'swordfish1' requires the complicated swordfish manoeuver
# http://www.sudokuoftheday.com/pages/techniques-9.php
# 2) it takes a *long* time to 'minimal1' or 'minimal2' with
# my brute-force solver
sudoku_problems = {'easy1': [[0,0,3,7,0,0,0,5,0],
[0,7,0,0,5,0,8,0,0],
[1,0,0,0,0,6,0,0,4],
[5,0,2,0,0,0,0,0,0],
[8,0,0,9,0,4,0,0,6],
[0,0,0,0,0,0,9,0,2],
[3,0,0,5,0,0,0,0,7],
[0,0,4,0,9,0,0,6,0],
[0,2,0,0,0,7,4,0,0]],
'hard1': [[0,0,0,0,5,8,0,0,9],
[5,0,8,3,0,0,0,0,6],
[0,0,3,4,0,0,0,0,0],
[7,0,0,0,0,4,3,5,0],
[8,0,0,0,0,0,0,0,2],
[0,4,1,5,0,0,0,0,8],
[0,0,0,0,0,3,8,7,0],
[0,0,0,0,0,5,0,0,0],
[3,2,0,8,1,0,0,6,0]],
'hard2': [[5,0,1,2,8,0,0,0,0],
[8,0,0,0,0,0,7,0,2],
[2,0,0,0,0,0,1,8,5],
[0,1,4,7,0,0,5,0,0],
[0,0,0,4,0,0,0,2,0],
[0,2,6,0,0,0,0,0,0],
[1,0,0,0,3,6,0,0,0],
[4,0,0,0,0,0,0,5,1],
[6,0,0,0,4,1,0,0,0]],
'minimal1':
[[0,0,0,0,0,0,0,1,0],
[4,0,0,0,0,0,0,0,0],
[0,2,0,0,0,0,0,0,0],
[0,0,0,0,5,0,4,0,7],
[0,0,8,0,0,0,3,0,0],
[0,0,1,0,9,0,0,0,0],
[3,0,0,4,0,0,2,0,0],
[0,5,0,1,0,0,0,0,0],
[0,0,0,8,0,6,0,0,0]],
'minimal2':
[[2,0,0,4,0,8,0,0,0],
[1,0,0,0,0,0,0,3,0],
[0,0,0,0,0,0,0,0,0],
[0,6,0,0,4,0,0,0,0],
[0,0,0,2,0,0,0,5,0],
[0,8,5,0,0,0,0,0,0],
[0,0,0,1,0,0,2,0,0],
[7,0,0,3,0,0,0,0,0],
[0,0,0,0,0,0,5,0,8]],
'swordfish1':
[[0,0,0,4,7,0,6,0,0],
[0,0,4,0,0,0,3,0,5],
[9,2,0,0,0,0,0,0,0],
[0,3,1,0,0,0,0,0,0],
[0,0,0,9,3,6,0,0,0],
[0,0,0,0,0,0,2,8,0],
[0,0,0,0,0,0,0,1,6],
[4,0,8,0,0,0,9,0,0],
[0,0,7,0,5,2,0,0,0]]
}
# ##### Solutions to problems
sudoku_solutions = {'easy1': [[4,8,3,7,1,2,6,5,9],
[2,7,6,4,5,9,8,1,3],
[1,5,9,8,3,6,7,2,4],
[5,9,2,6,7,3,1,4,8],
[8,3,1,9,2,4,5,7,6],
[6,4,7,1,8,5,9,3,2],
[3,6,8,5,4,1,2,9,7],
[7,1,4,2,9,8,3,6,5],
[9,2,5,3,6,7,4,8,1 ]],
'hard1': [[4,6,2,7,5,8,1,3,9],
[5,7,8,3,9,1,4,2,6],
[9,1,3,4,6,2,5,8,7],
[7,9,6,2,8,4,3,5,1],
[8,3,5,1,7,9,6,4,2],
[2,4,1,5,3,6,7,9,8],
[1,5,9,6,2,3,8,7,4],
[6,8,7,9,4,5,2,1,3],
[3,2,4,8,1,7,9,6,5]],
'hard2': [[5,7,1,2,8,4,9,6,3],
[8,6,3,1,9,5,7,4,2],
[2,4,9,6,7,3,1,8,5],
[3,1,4,7,6,2,5,9,8],
[7,8,5,4,1,9,3,2,6],
[9,2,6,3,5,8,4,1,7],
[1,9,2,5,3,6,8,7,4],
[4,3,8,9,2,7,6,5,1],
[6,5,7,8,4,1,2,3,9]],
'minimal1':
[[6,9,3,7,8,4,5,1,2],
[4,8,7,5,1,2,9,3,6],
[1,2,5,9,6,3,8,7,4],
[9,3,2,6,5,1,4,8,7],
[5,6,8,2,4,7,3,9,1],
[7,4,1,3,9,8,6,2,5],
[3,1,9,4,7,5,2,6,8],
[8,5,6,1,2,9,7,4,3],
[2,7,4,8,3,6,1,5,9]],
'swordfish1':
[[3,1,5,4,7,9,6,2,8],
[7,8,4,2,6,1,3,9,5],
[9,2,6,5,8,3,1,4,7],
[5,3,1,7,2,8,4,6,9],
[8,4,2,9,3,6,5,7,1],
[6,7,9,1,4,5,2,8,3],
[2,5,3,8,9,4,7,1,6],
[4,6,8,3,1,7,9,5,2],
[1,9,7,6,5,2,8,3,4]]
}
| 45.75
| 62
| 0.232505
| 969
| 5,673
| 1.358101
| 0.057792
| 0.370821
| 0.378419
| 0.346505
| 0.329027
| 0.280395
| 0.238602
| 0.193009
| 0.139058
| 0.071429
| 0
| 0.360759
| 0.55438
| 5,673
| 123
| 63
| 46.121951
| 0.15981
| 0.064869
| 0
| 0.037383
| 0
| 0
| 0.018746
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
33bbb7d221cf2f972e03f9f783e83566dd551e2a
| 156
|
py
|
Python
|
logical_permissions/exceptions/PermissionTypeNotRegisteredException.py
|
ordermind/logical-permissions-py
|
3a64ad49ad1d4c2e471746456e88deb554683067
|
[
"MIT"
] | 1
|
2016-01-04T17:28:35.000Z
|
2016-01-04T17:28:35.000Z
|
logical_permissions/exceptions/PermissionTypeNotRegisteredException.py
|
ordermind/logical-permissions-py
|
3a64ad49ad1d4c2e471746456e88deb554683067
|
[
"MIT"
] | null | null | null |
logical_permissions/exceptions/PermissionTypeNotRegisteredException.py
|
ordermind/logical-permissions-py
|
3a64ad49ad1d4c2e471746456e88deb554683067
|
[
"MIT"
] | null | null | null |
from logical_permissions.exceptions import InvalidArgumentValueException
class PermissionTypeNotRegisteredException(InvalidArgumentValueException):
pass
| 31.2
| 74
| 0.910256
| 10
| 156
| 14.1
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.064103
| 156
| 4
| 75
| 39
| 0.965753
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
33d4c27702f628ca271596f4794dce5f82a3a55f
| 1,751
|
py
|
Python
|
pkpgcounter-3.50/build/lib.linux-x86_64-2.7/pkpgpdls/version.py
|
philips558/PPS
|
2960336da8e19723879bfb15623563f2bde69f01
|
[
"CC0-1.0"
] | null | null | null |
pkpgcounter-3.50/build/lib.linux-x86_64-2.7/pkpgpdls/version.py
|
philips558/PPS
|
2960336da8e19723879bfb15623563f2bde69f01
|
[
"CC0-1.0"
] | 3
|
2020-02-06T12:47:26.000Z
|
2020-02-09T18:47:02.000Z
|
pkpgcounter-3.50/pkpgpdls/version.py
|
philips558/PPS
|
2960336da8e19723879bfb15623563f2bde69f01
|
[
"CC0-1.0"
] | null | null | null |
#
# pkpgcounter : a generic Page Description Language parser
#
# (c) 2003, 2004, 2005, 2006, 2007 Jerome Alet <alet@librelogiciel.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# $Id: version.py 376 2007-12-09 20:32:26Z jerome $
#
"""This modules defines some important constants used in this software."""
__version__ = "3.50"
__doc__ = """pkpgcounter : a generic Page Description Languages parser."""
__author__ = "Jerome Alet"
__authoremail__ = "alet@librelogiciel.com"
__years__ = "2003, 2004, 2005, 2006, 2007"
__gplblurb__ = """This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>."""
| 38.911111
| 86
| 0.759566
| 272
| 1,751
| 4.801471
| 0.375
| 0.050536
| 0.059724
| 0.087289
| 0.826953
| 0.744257
| 0.744257
| 0.744257
| 0.744257
| 0.744257
| 0
| 0.042582
| 0.168475
| 1,751
| 44
| 87
| 39.795455
| 0.854396
| 0.490006
| 0
| 0
| 0
| 0
| 0.845622
| 0.025346
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d51374492311f2e5936724698d5451d98e6c77c8
| 11,149
|
py
|
Python
|
pkgs/conf-pkg/src/genie/libs/conf/static_routing/nxos/tests/test_static_routing.py
|
jbronikowski/genielibs
|
200a34e5fe4838a27b5a80d5973651b2e34ccafb
|
[
"Apache-2.0"
] | 94
|
2018-04-30T20:29:15.000Z
|
2022-03-29T13:40:31.000Z
|
pkgs/conf-pkg/src/genie/libs/conf/static_routing/nxos/tests/test_static_routing.py
|
jbronikowski/genielibs
|
200a34e5fe4838a27b5a80d5973651b2e34ccafb
|
[
"Apache-2.0"
] | 67
|
2018-12-06T21:08:09.000Z
|
2022-03-29T18:00:46.000Z
|
pkgs/conf-pkg/src/genie/libs/conf/static_routing/nxos/tests/test_static_routing.py
|
jbronikowski/genielibs
|
200a34e5fe4838a27b5a80d5973651b2e34ccafb
|
[
"Apache-2.0"
] | 49
|
2018-06-29T18:59:03.000Z
|
2022-03-10T02:07:59.000Z
|
#!/usr/bin/env python
#python
import unittest
from unittest.mock import Mock
# Genie package
from genie.tests.conf import TestCase
from genie.conf import Genie
from genie.conf.base import Testbed, Device, Link, Interface
# Genie XBu_shared
from genie.libs.conf.static_routing.static_routing import StaticRouting
class test_static_routing(TestCase):
def test_static_routing_with_interface_cfg(self):
Genie.testbed = testbed = Testbed()
dev1 = Device(testbed=testbed, name='PE1', os='nxos')
static_routing = StaticRouting()
static_routing.interface = 'Ethernet0/1'
static_routing.vrf = 'VRF1'
static_routing.af = 'ipv4'
static_routing.route = '10.2.1.0/24'
static_routing.device_attr[dev1].vrf_attr[static_routing.vrf].address_family_attr[static_routing.af].route_attr[
static_routing.route].interface_attr[static_routing.interface].if_nexthop = '192.168.1.2'
static_routing.device_attr[dev1].vrf_attr[static_routing.vrf].address_family_attr[static_routing.af].route_attr[
static_routing.route].interface_attr[static_routing.interface].if_preference = 2
static_routing.device_attr[dev1].vrf_attr[static_routing.vrf].address_family_attr[static_routing.af].route_attr[
static_routing.route].interface_attr[static_routing.interface].if_nh_vrf = 'VRF1'
self.assertIs(static_routing.testbed, testbed)
dev1.add_feature(static_routing)
cfgs = static_routing.build_config(apply=False)
self.assertCountEqual(cfgs.keys(), [dev1.name])
self.maxDiff = None
self.assertMultiLineEqual(str(cfgs[dev1.name]), '\n'.join(
['vrf context VRF1',
' ip route 10.2.1.0/24 Ethernet0/1 192.168.1.2 vrf VRF1 2',
' exit',
]))
static_routing = StaticRouting()
static_routing.interface = 'Ethernet0/1'
static_routing.vrf = 'default'
static_routing.af = 'ipv4'
static_routing.route = '10.2.1.0/24'
static_routing.device_attr[dev1].vrf_attr[static_routing.vrf].address_family_attr[static_routing.af].route_attr[
static_routing.route].interface_attr[static_routing.interface].if_nexthop = '192.168.1.2'
static_routing.device_attr[dev1].vrf_attr[static_routing.vrf].address_family_attr[static_routing.af].route_attr[
static_routing.route].interface_attr[static_routing.interface].if_preference = 2
static_routing.device_attr[dev1].vrf_attr[static_routing.vrf].address_family_attr[static_routing.af].route_attr[
static_routing.route].interface_attr[static_routing.interface].if_nh_vrf = 'VRF1'
self.assertIs(static_routing.testbed, testbed)
dev1.add_feature(static_routing)
cfgs = static_routing.build_config(apply=False)
self.assertCountEqual(cfgs.keys(), [dev1.name])
self.maxDiff = None
self.assertMultiLineEqual(str(cfgs[dev1.name]), '\n'.join(
['ip route 10.2.1.0/24 Ethernet0/1 192.168.1.2 vrf VRF1 2',
]))
def test_static_routing_without_interface_cfg(self):
Genie.testbed = testbed = Testbed()
dev1 = Device(testbed=testbed, name='PE1', os='nxos')
static_routing = StaticRouting()
static_routing.af = 'ipv4'
static_routing.route = '10.2.1.0/24'
static_routing.device_attr[dev1].vrf_attr[None].address_family_attr[static_routing.af].route_attr[
static_routing.route].next_hop_attr['192.168.1.2'].preference = 3
self.assertIs(static_routing.testbed, testbed)
dev1.add_feature(static_routing)
cfgs = static_routing.build_config(apply=False)
self.assertCountEqual(cfgs.keys(), [dev1.name])
self.maxDiff = None
self.assertEqual(str(cfgs[dev1.name]), '\n'.join(
['ip route 10.2.1.0/24 192.168.1.2 3',
]))
def test_static_routing_with_interface_next_vrf_cfg(self):
Genie.testbed = testbed = Testbed()
dev1 = Device(testbed=testbed, name='PE1', os='nxos')
static_routing = StaticRouting()
static_routing.interface = 'Ethernet1/2'
static_routing.vrf = 'default'
static_routing.af = 'ipv4'
static_routing.route = '1.1.1.1/32'
static_routing.device_attr[dev1].vrf_attr[static_routing.vrf].address_family_attr[static_routing.af].route_attr[
static_routing.route].interface_attr[static_routing.interface].if_nexthop = '10.1.3.1'
static_routing.device_attr[dev1].vrf_attr[static_routing.vrf].address_family_attr[static_routing.af].route_attr[
static_routing.route].interface_attr[static_routing.interface].if_preference = 4
static_routing.device_attr[dev1].vrf_attr[static_routing.vrf].address_family_attr[static_routing.af].route_attr[
static_routing.route].interface_attr[static_routing.interface].if_tag = 10
static_routing.device_attr[dev1].vrf_attr[static_routing.vrf].address_family_attr[static_routing.af].route_attr[
static_routing.route].interface_attr[static_routing.interface].if_track = 1
static_routing.device_attr[dev1].vrf_attr[static_routing.vrf].address_family_attr[static_routing.af].route_attr[
static_routing.route].interface_attr[static_routing.interface].if_nh_vrf = 'VRF1'
self.assertIs(static_routing.testbed, testbed)
dev1.add_feature(static_routing)
cfgs = static_routing.build_config(apply=False)
self.assertCountEqual(cfgs.keys(), [dev1.name])
self.maxDiff = None
self.assertMultiLineEqual(str(cfgs[dev1.name]), '\n'.join(
['ip route 1.1.1.1/32 Ethernet1/2 10.1.3.1 vrf VRF1 track 1 tag 10 4',
]))
def test_static_routing_ipv6_without_interface_cfg(self):
Genie.testbed = testbed = Testbed()
dev1 = Device(testbed=testbed, name='PE1', os='nxos')
static_routing = StaticRouting()
static_routing.af = 'ipv6'
static_routing.route = '2001:2:2:2::2/128'
static_routing.device_attr[dev1].vrf_attr['default'].address_family_attr[static_routing.af].route_attr[
static_routing.route].next_hop_attr['2001:10:2:3::2'].preference = 3
self.assertIs(static_routing.testbed, testbed)
dev1.add_feature(static_routing)
cfgs = static_routing.build_config(apply=False)
self.assertCountEqual(cfgs.keys(), [dev1.name])
self.maxDiff = None
self.assertEqual(str(cfgs[dev1.name]), '\n'.join(
['ipv6 route 2001:2:2:2::2/128 2001:10:2:3::2 3',
]))
def test_static_routing_ipv6_with_interface_cfg(self):
Genie.testbed = testbed = Testbed()
dev1 = Device(testbed=testbed, name='PE1', os='nxos')
static_routing = StaticRouting()
static_routing.interface = 'Ethernet1/4'
static_routing.vrf = 'default'
static_routing.af = 'ipv6'
static_routing.route = '2001:2:2:2::2/128'
static_routing.device_attr[dev1].vrf_attr[static_routing.vrf].address_family_attr[static_routing.af].route_attr[
static_routing.route].interface_attr[static_routing.interface].if_nexthop = '2001:10:2:3::2'
static_routing.device_attr[dev1].vrf_attr[static_routing.vrf].address_family_attr[static_routing.af].route_attr[
static_routing.route].interface_attr[static_routing.interface].if_tag = 10
static_routing.device_attr[dev1].vrf_attr[static_routing.vrf].address_family_attr[static_routing.af].route_attr[
static_routing.route].interface_attr[static_routing.interface].if_track = 1
static_routing.device_attr[dev1].vrf_attr[static_routing.vrf].address_family_attr[static_routing.af].route_attr[
static_routing.route].interface_attr[static_routing.interface].if_nh_vrf = 'VRF1'
self.assertIs(static_routing.testbed, testbed)
dev1.add_feature(static_routing)
cfgs = static_routing.build_config(apply=False)
self.assertCountEqual(cfgs.keys(), [dev1.name])
self.maxDiff = None
self.assertMultiLineEqual(str(cfgs[dev1.name]), '\n'.join(
['ipv6 route 2001:2:2:2::2/128 Ethernet1/4 2001:10:2:3::2 vrf VRF1 track 1 tag 10',
]))
def test_static_routing_ipv6_with_interface_vrf_cfg(self):
Genie.testbed = testbed = Testbed()
dev1 = Device(testbed=testbed, name='PE1', os='nxos')
static_routing = StaticRouting()
static_routing.interface = 'Null0'
static_routing.vrf = 'VRF1'
static_routing.af = 'ipv6'
static_routing.route = '2001:1:1:1::1/128'
static_routing.device_attr[dev1].vrf_attr[static_routing.vrf].address_family_attr[static_routing.af].route_attr[
static_routing.route].interface_attr[static_routing.interface]
self.assertIs(static_routing.testbed, testbed)
dev1.add_feature(static_routing)
cfgs = static_routing.build_config(apply=False)
self.assertCountEqual(cfgs.keys(), [dev1.name])
self.maxDiff = None
self.assertMultiLineEqual(str(cfgs[dev1.name]), '\n'.join(
['vrf context VRF1',
' ipv6 route 2001:1:1:1::1/128 Null0',
' exit',
]))
def test_static_routing_uncfg(self):
Genie.testbed = testbed = Testbed()
dev1 = Device(testbed=testbed, name='PE1', os='nxos')
static_routing = StaticRouting()
static_routing.af = 'ipv4'
static_routing.interface = 'Ethernet0/1'
static_routing.route = '10.2.1.0/24'
static_routing.device_attr[dev1].vrf_attr['VRF1'].address_family_attr[static_routing.af].route_attr[
static_routing.route].interface_attr[static_routing.interface].if_nexthop ='192.168.2.2'
dev1.add_feature(static_routing)
un_cfgs = static_routing.build_unconfig(apply=False)
self.assertCountEqual(un_cfgs.keys(), [dev1.name])
self.maxDiff = None
self.assertEqual(str(un_cfgs[dev1.name]), '\n'.join(
['vrf context VRF1',
' no ip route 10.2.1.0/24 Ethernet0/1 192.168.2.2',
' exit',
]))
def test_static_routing_default_uncfg(self):
Genie.testbed = testbed = Testbed()
dev1 = Device(testbed=testbed, name='PE1', os='nxos')
static_routing = StaticRouting()
static_routing.af = 'ipv4'
static_routing.interface = 'Ethernet0/1'
static_routing.route = '10.2.1.0/24'
static_routing.device_attr[dev1].vrf_attr['default'].address_family_attr[static_routing.af].route_attr[
static_routing.route].interface_attr[static_routing.interface].if_nexthop = '192.168.2.2'
dev1.add_feature(static_routing)
un_cfgs = static_routing.build_unconfig(apply=False)
self.assertCountEqual(un_cfgs.keys(), [dev1.name])
self.maxDiff = None
self.assertEqual(str(un_cfgs[dev1.name]), '\n'.join(
['no ip route 10.2.1.0/24 Ethernet0/1 192.168.2.2',
]))
if __name__ == '__main__':
unittest.main()
| 42.716475
| 120
| 0.682752
| 1,464
| 11,149
| 4.939208
| 0.064208
| 0.303831
| 0.173973
| 0.063615
| 0.9433
| 0.928779
| 0.914258
| 0.896556
| 0.88895
| 0.88895
| 0
| 0.046216
| 0.19652
| 11,149
| 260
| 121
| 42.880769
| 0.760996
| 0.005113
| 0
| 0.792553
| 0
| 0.037234
| 0.089015
| 0
| 0
| 0
| 0
| 0
| 0.132979
| 1
| 0.042553
| false
| 0
| 0.031915
| 0
| 0.079787
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
1d54d450b6db6456e39e05a582ac53f8c2906b77
| 120,412
|
py
|
Python
|
orca_predict.py
|
jzhoulab/orca
|
8ebfda95dec118ee4069b12d25a2a93f9ea7ec8f
|
[
"Apache-2.0"
] | 22
|
2021-05-10T19:42:00.000Z
|
2022-03-14T08:34:07.000Z
|
orca_predict.py
|
jzhoulab/orca
|
8ebfda95dec118ee4069b12d25a2a93f9ea7ec8f
|
[
"Apache-2.0"
] | null | null | null |
orca_predict.py
|
jzhoulab/orca
|
8ebfda95dec118ee4069b12d25a2a93f9ea7ec8f
|
[
"Apache-2.0"
] | 4
|
2021-05-28T16:35:35.000Z
|
2022-03-19T12:23:08.000Z
|
"""
This module provides functions for using Orca models for various
types of the predictions. This is the main module that you need for
interacting with Orca models.
To use any of the prediction functions, `load_resources` has to be
called first to load the necessary resources.
The coordinates used in Orca are 0-based, inclusive for the start
coordinate and exclusive for the end coordinate, consistent with
python conventions.
"""
import os
import pathlib
import numpy as np
import torch
from scipy.stats import spearmanr
from selene_utils2 import MemmapGenome, Genomic2DFeatures
import selene_sdk
from selene_sdk.sequences import Genome
from orca_models import H1esc, Hff, H1esc_1M, Hff_1M, H1esc_256M, Hff_256M
from orca_utils import (
genomeplot,
genomeplot_256Mb,
StructuralChange2,
process_anno,
coord_round,
coord_clip,
)
ORCA_PATH = str(pathlib.Path(__file__).parent.absolute())
model_dict_global, target_dict_global = {}, {}
def load_resources(models=["32M"], use_cuda=True, use_memmapgenome=True):
"""
Load resources for Orca predictions including the specified
Orca models and hg38 reference genome. It also creates Genomic2DFeatures
objects for experimental micro-C datasets (for comparison with prediction).
Load resourced are accessible as global variables.
The list of globl variables generated is here:
Global Variables
----------------
hg38 : selene_utils2.MemmapGenome or selene_sdk.sequences.Genome
If `use_memmapgenome==True` and the resource file for hg38 mmap exists,
use MemmapGenome instead of Genome.
h1esc : orca_models.H1esc
1-32Mb Orca H1-ESC model
hff : orca_models.Hff
1-32Mb Orca HFF model
h1esc_256m : orca_models.H1esc_256M
32-256Mb Orca H1-ESC model
hff_256m : orca_models.Hff_256M
32-256Mb Orca HFF model
h1esc_1m : orca_models.H1esc_1M
1Mb Orca H1-ESC model
hff_1m : orca_models.Hff_1M
1Mb Orca HFF model
target_h1esc : selene_utils2.Genomic2DFeatures
Genomic2DFeatures object that load H1-ESC micro-C dataset 4DNFI9GMP2J8
at 4kb resolution, used for comparison with 1-32Mb models.
target_hff : selene_utils2.Genomic2DFeatures
Genomic2DFeatures object that load HFF micro-C dataset 4DNFI643OYP9
at 4kb resolution, used for comparison with 1-32Mb models.
target_h1esc_256m : selene_utils2.Genomic2DFeatures
Genomic2DFeatures object that load H1-ESC micro-C dataset 4DNFI9GMP2J8
at 32kb resolution, used for comparison with 32-256Mb models.
target_hff_256m : selene_utils2.Genomic2DFeatures
Genomic2DFeatures object that load HFF micro-C dataset 4DNFI643OYP9
at 32kb resolution, used for comparison with 32-256Mb models.
target_h1esc_1m : selene_utils2.Genomic2DFeatures
Genomic2DFeatures object that load H1-ESC micro-C dataset 4DNFI9GMP2J8
at 32kb resolution, used for comparison with 1Mb models.
target_hff_1m : selene_utils2.Genomic2DFeatures
Genomic2DFeatures object that load HFF micro-C dataset 4DNFI643OYP9
at 1kb resolution, used for comparison with 1Mb models.
target_available : bool
Indicate whether the micro-C dataset resource file is available.
Parameters
----------
models : list(str)
List of model types to load, supported model types includes
"32M", "256M", "1M", corresponding to 1-32Mb, 32-256Mb, and 1Mb
models. Lower cases are also accepted.
use_cuda : bool, optional
Default is True. If true, loaded models are moved to GPU.
use_memmapgenome : bool, optional
Default is True. If True and the resource file for hg38 mmap exists,
use MemmapGenome instead of Genome.
"""
global hg38, target_hff, target_h1esc, target_hff_256m, target_h1esc_256m, target_hff_1m, target_h1esc_1m, target_available
if "32M" in models or "32m" in models:
global h1esc, hff
h1esc = H1esc()
h1esc.eval()
hff = Hff()
hff.eval()
if use_cuda:
h1esc.cuda()
hff.cuda()
else:
h1esc.cpu()
hff.cpu()
model_dict_global["h1esc"] = h1esc
model_dict_global["hff"] = hff
if "1M" in models or "1m" in models:
global h1esc_1m, hff_1m
h1esc_1m = H1esc_1M()
h1esc_1m.eval()
hff_1m = Hff_1M()
hff_1m.eval()
if use_cuda:
h1esc_1m.cuda()
hff_1m.cuda()
else:
h1esc_1m.cpu()
hff_1m.cpu()
model_dict_global["h1esc_1m"] = h1esc_1m
model_dict_global["hff_1m"] = hff_1m
if "256M" in models or "256m" in models:
global h1esc_256m, hff_256m
h1esc_256m = H1esc_256M()
h1esc_256m.eval()
hff_256m = Hff_256M()
hff_256m.eval()
if use_cuda:
h1esc_256m.cuda()
hff_256m.cuda()
else:
h1esc_256m.cpu()
hff_256m.cpu()
model_dict_global["h1esc_256m"] = h1esc_256m
model_dict_global["hff_256m"] = hff_256m
if (
use_memmapgenome
and pathlib.Path("/resources/Homo_sapiens.GRCh38.dna.primary_assembly.fa.mmap").exists()
):
hg38 = MemmapGenome(
input_path=ORCA_PATH + "/resources/Homo_sapiens.GRCh38.dna.primary_assembly.fa",
memmapfile=ORCA_PATH + "/resources/Homo_sapiens.GRCh38.dna.primary_assembly.fa.mmap",
)
else:
hg38 = Genome(
input_path=ORCA_PATH + "/resources/Homo_sapiens.GRCh38.dna.primary_assembly.fa",
)
target_available = True
if os.path.exists(ORCA_PATH + "/resources/4DNFI643OYP9.rebinned.mcool"):
target_hff = Genomic2DFeatures(
[ORCA_PATH + "/resources/4DNFI643OYP9.rebinned.mcool::/resolutions/4000"],
["r4000"],
(8000, 8000),
cg=True,
)
target_hff_256m = Genomic2DFeatures(
[ORCA_PATH + "/resources/4DNFI643OYP9.rebinned.mcool::/resolutions/32000"],
["r32000"],
(8000, 8000),
cg=True,
)
target_hff_1m = Genomic2DFeatures(
[ORCA_PATH + "/resources/4DNFI643OYP9.rebinned.mcool::/resolutions/1000"],
["r1000"],
(8000, 8000),
cg=True,
)
target_dict_global['hff'] = target_hff
target_dict_global['hff_256m'] = target_hff_256m
target_dict_global['hff_1m'] = target_hff_1m
else:
target_available = False
if os.path.exists(ORCA_PATH + "/resources/4DNFI9GMP2J8.rebinned.mcool"):
target_h1esc = Genomic2DFeatures(
[ORCA_PATH + "/resources/4DNFI9GMP2J8.rebinned.mcool::/resolutions/4000"],
["r4000"],
(8000, 8000),
cg=True,
)
target_h1esc_256m = Genomic2DFeatures(
[ORCA_PATH + "/resources/4DNFI9GMP2J8.rebinned.mcool::/resolutions/32000"],
["r32000"],
(8000, 8000),
cg=True,
)
target_h1esc_1m = Genomic2DFeatures(
[ORCA_PATH + "/resources/4DNFI9GMP2J8.rebinned.mcool::/resolutions/1000"],
["r1000"],
(8000, 8000),
cg=True,
)
target_dict_global['h1esc'] = target_h1esc
target_dict_global['h1esc_256m'] = target_h1esc_256m
target_dict_global['h1esc_1m'] = target_h1esc_1m
else:
target_available = False
def genomepredict(
sequence, mchr, mpos=-1, wpos=-1, models=["h1esc", "hff"], targets=None, annotation=None, use_cuda=True, nan_thresh=1,
):
"""Multiscale prediction for a 32Mb sequence
input, zooming into the position specified when generating a series
of 32Mb, 16Mb, 8Mb, 4Mb, 2Mb and 1Mb predictions with increasing
resolutions (up to 4kb). This function also processes
information used only for plotting including targets and annotation.
For larger sequence and interchromosomal predictions, you can use
256Mb input with genomepredict_256Mb.
Parameters
----------
sequence : numpy.ndarray
One-hot sequence encoding of shape 1 x 4 x 32000000.
The encoding can be generated with `selene_sdk.Genome.sequence_to_encoding()`.
mchr : str
Chromosome name. This is used for annotation purpose only.
mpos : int, optional
The coordinate to zoom into for multiscale prediction.
wpos : int, optional
The coordinate of the center position of the sequence, which is
start position + 16000000.
models : list(torch.nn.Module or str), optional
Models to use. Default is H1-ESC and HFF Orca models.
targets : list(numpy.ndarray), optional
The observed balanced contact matrices from the
32Mb region. Used only for plotting when used with genomeplot. The length and
order of the list of targets should match the models specified (default is
H1-ESC and HFF Orca models).
The dimensions of the arrays should be 8000 x 8000 (1kb resolution).
annotation : str or None, optional
List of annotations for plotting. The annotation can be generated with
See orca_utils.process_anno and see its documentation for more details.
use_cuda : bool, optional
Default is True. If False, use CPU.
nan_thresh : int, optional
Default is 1. Specify the threshold of the proportion of NaNs values
allowed during downsampling for the observed matrices. Only relevant for plotting.
The lower resolution observed matrix value are computed by averaging multiple
bins into one. By default, we allow missing values and only average over the
non-missing values, and the values with more than the specified proprotion
of missing values will be filled with NaN.
Returns
----------
output : dict
Result dictionary that can be used as input for genomeplot. The dictionary
has the following keys:
- predictions : list(list(numpy.ndarray), list(numpy.ndarray))
Multi-level predictions for H1-ESC and HFF cell types.
- experiments : list(list(numpy.ndarray), list(numpy.ndarray))
Observations for H1-ESC and HFF cell types that matches the predictions.
Exists if `targets` is specified.
- normmats : list(list(numpy.ndarray), list(numpy.ndarray))
Background distance-based expected balanced contact matrices for
H1-ESC and HFF cell types that matches the predictions.
- start_coords : list(int)
Start coordinates for the prediction at each level.
- end_coords : list(int)
End coordinates for the prediction at each level.
- chr : str
The chromosome name.
- annos : list(list(...))
Annotation information. The format is as outputed by orca_utils.process_anno
Exists if `annotation` is specified.
"""
model_objs = []
for m in models:
if isinstance(m, torch.nn.Module):
model_objs.append(m)
else:
try:
if m in model_dict_global:
model_objs.append(model_dict_global[m])
except KeyError:
load_resources(models=["32M"], use_cuda=use_cuda)
if m in model_dict_global:
model_objs.append(model_dict_global[m])
models = model_objs
n_models = len(models)
with torch.no_grad():
allpreds = []
allstarts = []
if targets:
alltargets = []
if annotation is not None:
allannos = []
for iii, seq in enumerate(
[
torch.FloatTensor(sequence),
torch.FloatTensor(np.ascontiguousarray(sequence[:, ::-1, ::-1])),
]
):
for ii, model in enumerate(models):
if targets and iii == 0:
target = targets[ii]
(encoding1, encoding2, encoding4, encoding8, encoding16, encoding32,) = model.net(
model.net0(torch.Tensor(seq.float()).transpose(1, 2).cuda())
if use_cuda
else model.net0(torch.Tensor(seq.float()).transpose(1, 2))
)
encodings = {
1: encoding1,
2: encoding2,
4: encoding4,
8: encoding8,
16: encoding16,
32: encoding32,
}
def eval_step(level, start, coarse_pred=None):
distenc = torch.log(
torch.FloatTensor(model.normmats[level][None, None, :, :]).cuda()
if use_cuda
else torch.FloatTensor(model.normmats[level][None, None, :, :])
).expand(sequence.shape[0], 1, 250, 250)
if coarse_pred is not None:
if level == 1:
pred = model.denets[level].forward(
encodings[level][
:, :, int(start / level) : int(start / level) + 250
],
distenc,
coarse_pred,
) + model.denet_1_pt.forward(
encodings[level][
:, :, int(start / level) : int(start / level) + 250
]
)
else:
pred = model.denets[level].forward(
encodings[level][
:, :, int(start / level) : int(start / level) + 250
],
distenc,
coarse_pred,
)
else:
pred = model.denets[level].forward(
encodings[level][:, :, int(start / level) : int(start / level) + 250],
distenc,
)
return pred
preds = []
starts = [0]
if targets and iii == 0:
ts = []
if annotation is not None and iii == 0:
annos = []
for j, level in enumerate([32, 16, 8, 4, 2, 1]):
if j == 0:
pred = eval_step(level, starts[j])
else:
pred = eval_step(
level,
starts[j],
preds[j - 1][
:,
:,
start_index : start_index + 125,
start_index : start_index + 125,
],
)
if targets and iii == 0:
target_r = np.nanmean(
np.nanmean(
np.reshape(
target[
:,
starts[j] : starts[j] + 250 * level,
starts[j] : starts[j] + 250 * level,
].numpy(),
(target.shape[0], 250, level, 250, level),
),
axis=4,
),
axis=2,
)
target_nan = np.mean(
np.mean(
np.isnan(
np.reshape(
target[
:,
starts[j] : starts[j] + 250 * level,
starts[j] : starts[j] + 250 * level,
].numpy(),
(target.shape[0], 250, level, 250, level),
)
),
axis=4,
),
axis=2,
)
target_r[target_nan > nan_thresh] = np.nan
target_np = np.log(
(target_r + model.epss[level])
/ (model.normmats[level] + model.epss[level])
)[0, 0:, 0:]
ts.append(target_np)
if annotation is not None and iii == 0:
newstart = starts[j] / 8000.0
newend = (starts[j] + 250 * level) / 8000.0
anno_r = []
for r in annotation:
if len(r) == 3:
if not (r[0] >= newend or r[1] <= newstart):
anno_r.append(
(
np.fmax((r[0] - newstart) / (newend - newstart), 0,),
np.fmin((r[1] - newstart) / (newend - newstart), 1,),
r[2],
)
)
else:
if r[0] >= newstart and r[0] < newend:
anno_r.append(((r[0] - newstart) / (newend - newstart), r[1]))
annos.append(anno_r)
if iii == 0:
start_index = int(
np.clip(
np.floor(
(
(mpos - level * 1000000 / 4)
- (wpos - 16000000 + starts[j] * 4000)
)
/ (4000 * level)
),
0,
125,
)
)
else:
start_index = int(
np.clip(
np.ceil(
(
(wpos + 16000000 - starts[j] * 4000)
- (mpos + level * 1000000 / 4)
)
/ (4000 * level)
),
0,
125,
)
)
starts.append(starts[j] + start_index * level)
preds.append(pred)
allpreds.append(preds)
if iii == 0:
if targets:
alltargets.append(ts)
if annotation is not None:
allannos.append(annos)
allstarts.append(starts[:-1])
output = {}
output["predictions"] = [[] for _ in range(n_models)]
for i in range(n_models):
for j in range(len(allpreds[i])):
if allpreds[i][j].shape[1] == 1:
output["predictions"][i].append(
allpreds[i][j].cpu().detach().numpy()[0, 0, :, :] * 0.5
+ allpreds[i + n_models][j].cpu().detach().numpy()[0, 0, ::-1, ::-1] * 0.5
)
else:
output["predictions"][i].append(
allpreds[i][j].cpu().detach().numpy()[0, :, :, :] * 0.5
+ allpreds[i + n_models][j].cpu().detach().numpy()[0, :, ::-1, ::-1] * 0.5
)
if targets:
output["experiments"] = alltargets
else:
output["experiments"] = None
output["start_coords"] = [wpos - 16000000 + s * 4000 for s in allstarts[0]]
output["end_coords"] = [
int(output["start_coords"][ii] + 32000000 / 2 ** (ii)) for ii in range(6)
]
output["chr"] = mchr
if annotation is not None:
output["annos"] = allannos[0]
else:
output["annos"] = None
output["normmats"] = [
[model.normmats[ii] for ii in [32, 16, 8, 4, 2, 1]] for model in models
]
return output
def genomepredict_256Mb(
sequence,
mchr,
normmats,
chrlen,
mpos=-1,
wpos=-1,
models=["h1esc_256m", "hff_256m"],
targets=None,
annotation=None,
padding_chr=None,
use_cuda=True,
nan_thresh=1,
):
"""Multiscale prediction for a 256Mb sequence
input, zooming into the position specified when generating a series
of 256Mb, 128Mb, 64Mb, and 32Mb predictions with increasing
resolutions (up to 128kb). This function also processes
information used only for plotting including targets and annotation.
This function accepts multichromosal input sequence. Thus it needs an
extra input `normmats` to encode the chromosomal information. See documentation
for normmats argument for details.
Parameters
----------
sequence : numpy.ndarray
One-hot sequence encoding of shape 1 x 4 x 256000000.
The encoding can be generated with `selene_sdk.Genome.sequence_to_encoding()`.
mchr : str
The chromosome name of the first chromosome included in the seqeunce.
This is used for annotation purpose only.
normmats : list(numpy.ndarray)
A list of distance-based background matrices for H1-ESC and HFF.The
normmats contains arrays with dimensions 8000 x 8000 (32kb resolution).
Interchromosomal interactions are filled with the expected balanced contact
score for interchromomsal interactions.
chrlen : int
The coordinate of the end of the first chromosome in the input, which is the
chromosome that will be zoomed into.
mpos : int, optional
Default is -1. The coordinate to zoom into for multiscale prediction. If neither
`mpos` nor `wpos` are specified, it zooms into the center of the input by default.
wpos : int, optional
Default is -1. The coordinate of the center position of the sequence, which is
start position + 16000000. If neither `mpos` nor `wpos` are specified, it zooms
into the center of the input by default.
models : list(torch.nn.Module or str), optional
Models to use. Default is H1-ESC(256Mb) and HFF(256Mb) Orca models.
targets : list(numpy.ndarray), optional
The observed balanced contact matrices from the 256Mb sequence.
Used only for plotting when used with genomeplot. The length and
order of the list of targets should match the models specified (default is
H1-ESC and HFF Orca models). The dimensions of the arrays should be
8000 x 8000 (32kb resolution).
annotation : str or None, optional
Default is None. List of annotations for plotting. The annotation can be generated with
See orca_utils.process_anno and see its documentation for more details.
padding_chr : str, None, optional
Default is None. Name of the padding chromosome after the first. Used for annotation
only. TODO: be more flexible in the support for multiple chromosomes.
use_cuda : bool, optional
Default is True. If False, use CPU.
nan_thresh : int, optional
Default is 1. Specify the threshold of the proportion of NaNs values
allowed during downsampling for the observed matrices. Only relevant for plotting.
The lower resolution observed matrix value are computed by averaging multiple
bins into one. By default, we allow missing values and only average over the
non-missing values, and the values with more than the specified proprotion
of missing values will be filled with NaN.
Returns
----------
output : dict
Result dictionary that can be used as input for genomeplot. The dictionary
has the following keys:
- predictions : list(list(numpy.ndarray), list(numpy.ndarray))
Multi-level predictions for H1-ESC and HFF cell types.
- experiments : list(list(numpy.ndarray), list(numpy.ndarray))
Observations for H1-ESC and HFF cell types that matches the predictions.
Exists if `targets` is specified.
- normmats : list(list(numpy.ndarray), list(numpy.ndarray))
Background distance-based expected balanced contact matrices for
H1-ESC and HFF cell types that matches the predictions.
- start_coords : list(int)
Start coordinates for the prediction at each level.
- end_coords : list(int)
End coordinates for the prediction at each level.
- chr : str
The chromosome name.
- annos : list(list(...))
Annotation information. The format is as outputed by orca_utils.process_anno
Exists if `annotation` is specified.
"""
model_objs = []
for m in models:
if isinstance(m, torch.nn.Module):
model_objs.append(m)
else:
try:
if m in model_dict_global:
model_objs.append(model_dict_global[m])
except KeyError:
load_resources(models=["256M"], use_cuda=use_cuda)
if m in model_dict_global:
model_objs.append(model_dict_global[m])
models = model_objs
with torch.no_grad():
allpreds = []
allstarts = []
allnormmats = []
if targets:
alltargets = []
if annotation is not None:
allannos = []
for iii, seq in enumerate(
[
torch.FloatTensor(sequence),
torch.FloatTensor(np.ascontiguousarray(sequence[:, ::-1, ::-1])),
]
):
for ii, model in enumerate(models):
normmat = normmats[ii]
normmat_nan = np.isnan(normmat)
if np.any(normmat_nan):
normmat[normmat_nan] = np.nanmin(normmat[~normmat_nan])
if targets and iii == 0:
target = targets[ii]
(encoding32, encoding64, encoding128, encoding256) = model.net(
model.net1(
model.net0(
torch.Tensor(seq.float()).transpose(1, 2).cuda()
if use_cuda
else torch.Tensor(seq.float()).transpose(1, 2)
)
)[-1]
)
encodings = {
32: encoding32,
64: encoding64,
128: encoding128,
256: encoding256,
}
def eval_step(level, start, coarse_pred=None):
distenc = torch.log(
torch.FloatTensor(ns[level][None, :, :]).cuda()
if use_cuda
else torch.FloatTensor(ns[level][None, :, :])
).expand(sequence.shape[0], 1, 250, 250)
if coarse_pred is not None:
pred = model.denets[level].forward(
encodings[level][
:, :, int(start / (level // 8)) : int(start / (level // 8)) + 250,
],
distenc if iii == 0 else torch.flip(distenc, [2, 3]),
coarse_pred,
)
else:
pred = model.denets[level].forward(
encodings[level][
:, :, int(start / (level // 8)) : int(start / (level // 8)) + 250,
],
distenc if iii == 0 else torch.flip(distenc, [2, 3]),
)
return pred
preds = []
starts = [0]
ns = {}
if targets and iii == 0:
ts = []
if annotation is not None and iii == 0:
annos = []
for j, level in enumerate([256, 128, 64, 32]):
normmat_r = np.nanmean(
np.nanmean(
np.reshape(
normmat[
starts[j] : starts[j] + 250 * level // 8,
starts[j] : starts[j] + 250 * level // 8,
],
(1, 250, level // 8, 250, level // 8),
),
axis=4,
),
axis=2,
)
ns[level] = normmat_r
if j == 0:
pred = eval_step(level, starts[j])
else:
pred = eval_step(
level,
starts[j],
preds[j - 1][
:,
:,
start_index : start_index + 125,
start_index : start_index + 125,
],
)
if targets and iii == 0:
target_r = np.nanmean(
np.nanmean(
np.reshape(
target[
:,
starts[j] : starts[j] + 250 * level // 8,
starts[j] : starts[j] + 250 * level // 8,
].numpy(),
(target.shape[0], 250, level // 8, 250, level // 8),
),
axis=4,
),
axis=2,
)
target_nan = np.mean(
np.mean(
np.isnan(
np.reshape(
target[
:,
starts[j] : starts[j] + 250 * level // 8,
starts[j] : starts[j] + 250 * level // 8,
].numpy(),
(target.shape[0], 250, level // 8, 250, level // 8,),
)
),
axis=4,
),
axis=2,
)
target_r[target_nan > nan_thresh] = np.nan
eps = np.nanmin(normmat_r)
target_np = np.log((target_r + eps) / (normmat_r + eps))[0, 0:, 0:]
ts.append(target_np)
if annotation is not None and iii == 0:
newstart = starts[j] / 8000.0
newend = (starts[j] + 250 * level // 8) / 8000.0
anno_r = []
for r in annotation:
if len(r) == 3:
if not (r[0] >= newend or r[1] <= newstart):
anno_r.append(
(
np.fmax((r[0] - newstart) / (newend - newstart), 0,),
np.fmin((r[1] - newstart) / (newend - newstart), 1,),
r[2],
)
)
else:
if r[0] >= newstart and r[0] < newend:
anno_r.append(((r[0] - newstart) / (newend - newstart), r[1]))
annos.append(anno_r)
if iii == 0:
proposed_start = (mpos - level * 1000000 / 4) - (
wpos - 128000000 + starts[j] * 4000 * 8
)
else:
proposed_start = (mpos - level * 1000000 / 4) - (
wpos + 128000000 - starts[j] * 4000 * 8 - level * 1000000
)
if chrlen is not None:
bounds = [
0 - (wpos - 128000000),
chrlen - level * 1000000 / 2 - (wpos - 128000000),
]
if bounds[0] < bounds[1]:
proposed_start = np.clip(proposed_start, bounds[0], bounds[1])
else:
proposed_start = bounds[0]
start_index = int(np.clip(np.floor(proposed_start / (4000 * level)), 0, 125,))
if iii != 0:
start_index = 250 - (start_index + 125)
starts.append(starts[j] + start_index * level // 8)
preds.append(pred)
allpreds.append(preds)
allnormmats.append(ns)
if iii == 0:
if targets:
alltargets.append(ts)
if annotation is not None:
allannos.append(annos)
allstarts.append(starts[:-1])
output = {}
output["predictions"] = [[] for _ in range(n_models)]
for i in range(n_models):
for j in range(len(allpreds[i])):
if allpreds[i][j].shape[1] == 1:
output["predictions"][i].append(
allpreds[i][j].cpu().detach().numpy()[0, 0, :, :] * 0.5
+ allpreds[i + n_models][j].cpu().detach().numpy()[0, 0, ::-1, ::-1] * 0.5
)
else:
output["predictions"][i].append(
allpreds[i][j].cpu().detach().numpy()[0, :, :, :] * 0.5
+ allpreds[i + n_models][j].cpu().detach().numpy()[0, :, ::-1, ::-1] * 0.5
)
if targets:
output["experiments"] = alltargets
else:
output["experiments"] = None
output["start_coords"] = [wpos - 128000000 + s * 32000 for s in allstarts[0]]
output["end_coords"] = [
np.fmin(int(output["start_coords"][ii] + 256000000 / 2 ** (ii)), chrlen) for ii in range(4)
]
if annotation is not None:
output["annos"] = allannos[0]
else:
output["annos"] = None
output["chr"] = mchr
output["padding_chr"] = padding_chr
output["normmats"] = allnormmats
return output
def _retrieve_multi(regionlist, genome, target=True, normmat=True, normmat_regionlist=None):
sequences = []
for region in regionlist:
if len(region) == 4:
chrom, start, end, strand = region
sequences.append(genome.get_encoding_from_coords(chrom, start, end, strand))
else:
chrom, start, end = region
sequences.append(genome.get_encoding_from_coords(chrom, start, end, "+"))
sequence = np.vstack(sequences)[None, :, :]
if isinstance(target, list):
target_objs = target
has_target = True
elif target and target_available:
target_objs = [target_h1esc_256m, target_hff_256m]
has_target = True
else:
has_target = False
if has_target:
targets = []
for target_obj in target_objs:
targets_ = []
for region in regionlist:
if len(region) == 4:
chrom, start, end, strand = region
else:
chrom, start, end = region
strand = "+"
t = []
for region2 in regionlist:
if len(region2) == 4:
chrom2, start2, end2, strand2 = region2
else:
chrom2, start2, end2 = region2
strand = "+"
t.append(
target_obj.get_feature_data(
chrom, start, end, chrom2=chrom2, start2=start2, end2=end2
)
)
if strand == "-":
t[-1] = t[-1][::-1, :]
if strand2 == "-":
t[-1] = t[-1][:, ::-1]
targets_.append(t)
targets_= np.vstack([np.hstack(l) for l in targets_])
targets.append(targets_)
targets = [
torch.FloatTensor(l[None, :, :]) for l in targets
]
if normmat:
if isinstance(normmat, list):
normmat_objs = normmat
else:
normmat_objs = [h1esc_256m, hff_256m]
if normmat_regionlist is None:
normmat_regionlist = regionlist
normmats = []
for normmat_obj in normmat_objs:
normmats_ = []
for chrom, start, end, strand in normmat_regionlist:
b = []
for chrom2, start2, end2, strand2 in normmat_regionlist:
if chrom2 != chrom:
b.append(
np.full(
(int((end - start) / 32000), int((end2 - start2) / 32000)),
normmat_obj.background_trans,
)
)
else:
binsize = 32000
acoor = np.linspace(start, end, int((end - start) / 32000) + 1)[:-1]
bcoor = np.linspace(start2, end2, int((end2 - start2) / 32000) + 1)[:-1]
b.append(
normmat_obj.background_cis[
(np.abs(acoor[:, None] - bcoor[None, :]) / binsize).astype(int)
]
)
if strand == "-":
b[-1] = b[-1][::-1, :]
if strand2 == "-":
b[-1] = b[-1][:, ::-1]
normmats_.append(b)
normmats_ = np.vstack([np.hstack(l) for l in normmats_])
normmats.append(normmats_)
datatuple = (sequence,)
if normmat:
datatuple = datatuple + (normmats,)
if has_target:
datatuple = datatuple + (targets,)
return datatuple
def process_region(
mchr,
mstart,
mend,
genome,
file=None,
custom_models=None,
target=True,
show_genes=True,
show_tracks=False,
window_radius=16000000,
padding_chr="chr1",
use_cuda=True,
):
"""
Generate multiscale genome interaction predictions for
the specified region.
Parameters
----------
mchr : str
The chromosome name of the first segment
mstart : int
The start coordinate of the region.
mend : ind
The end coordinate of the region.
genome : selene_utils2.MemmapGenome or selene_sdk.sequences.Genome
The reference genome object to extract sequence from
custom_models : list(torch.nn.Module or str) or None, optional
Models to use instead of the default H1-ESC and HFF Orca models.
Default is None.
target : list(selene_utils2.Genomic2DFeatures or str) or bool, optional
If specified as list, use this list of targets to retrieve experimental
data (for plotting only). Default is True and will use micro-C data
for H1-ESC and HFF cells (4DNFI9GMP2J8, 4DNFI643OYP9) that correspond
to the default models.
file : str or None, optional
Default is None. The output file prefix.
show_genes : bool, optional
Default is True. If True, generate gene annotation visualization
file in pdf format that matches the windows of multiscale predictions.
show_tracks : bool, optional
Default is False. If True, generate chromatin tracks visualization
file in pdf format that matches the windows of multiscale predictions.
window_radius : int, optional
Default is 16000000. The acceptable values are 16000000 which selects
the 1-32Mb models or 128000000 which selects the 32-256Mb models.
padding_chr : str, optional
Default is "chr1". If window_radius is 128000000, padding is generally
needed to fill the sequence to 256Mb. The padding sequence will be
extracted from the padding_chr.
use_cuda : bool, optional
Default is True. Use CPU if False.
Returns
-------
outputs_ref_l, outputs_ref_r, outputs_alt : dict, dict, dict
Reference allele predictions zooming into the left boundary of the
duplication,
Reference allele predictions zooming into the right boundary of the
duplication,
Alternative allele predictions zooming into the duplication breakpoint.
The returned results are in the format of dictonaries
containing the prediction outputs and other
retrieved information. These dictionaries can be directly used as
input to genomeplot or genomeplot_256Mb. See documentation of `genomepredict` or `genomepredict_256Mb` for
details of the dictionary content.
"""
chrlen = [l for c, l in genome.get_chr_lens() if c == mchr].pop()
mpos = int((int(mstart) + int(mend)) / 2)
if custom_models is None:
if window_radius == 16000000:
models = ["h1esc", "hff"]
elif window_radius == 128000000:
models = ["h1esc_256m", "hff_256m"]
else:
raise ValueError(
"Only window_radius 16000000 (32Mb models) or 128000000 (256Mb models) are supported"
)
else:
models = custom_models
if target:
try:
if target == True:
if window_radius == 16000000:
target = ["h1esc", "hff"]
elif window_radius == 128000000:
target = ["h1esc_256m", "hff_256m"]
target = [t if isinstance(t, Genomic2DFeatures) else target_dict_global[t] for t in target]
except KeyError:
target = False
if window_radius == 16000000:
wpos = coord_clip(mpos, chrlen)
sequence = genome.get_encoding_from_coords(
mchr, wpos - window_radius, wpos + window_radius
)[None, :]
if target:
targets = [
torch.FloatTensor(
t.get_feature_data(
mchr, coord_round(wpos - window_radius), coord_round(wpos + window_radius),
)[None, :]
)
for t in target
]
else:
targets = None
elif window_radius == 128000000:
chrlen_round = chrlen - chrlen % 32000
wpos = 128000000
if has_target:
sequence, normmats, targets = _retrieve_multi(
[[mchr, 0, chrlen_round, "+"], [padding_chr, 0, 256000000 - chrlen_round, "+"]],
genome,
target=target,
)
else:
sequence, normmats = _retrieve_multi(
[[mchr, 0, chrlen_round, "+"], [padding_chr, 0, 256000000 - chrlen_round, "+"]],
genome,
target=target,
)
targets = None
else:
raise ValueError(
"Only window_radius 16000000 (32Mb models) or 128000000 (256Mb models) are supported"
)
if mstart - mend < 2 * window_radius:
anno_scaled = process_anno(
[
[
np.clip(mstart, wpos - window_radius, wpos + window_radius),
np.clip(mend, wpos - window_radius, wpos + window_radius),
"black",
]
],
base=wpos - window_radius,
window_radius=window_radius,
)
else:
anno_scaled = None
if window_radius == 128000000:
outputs_ref = genomepredict_256Mb(
sequence,
mchr,
normmats,
chrlen_round,
mpos,
wpos,
models=models,
annotation=anno_scaled,
padding_chr=padding_chr,
targets=targets,
use_cuda=use_cuda,
)
else:
outputs_ref = genomepredict(
sequence, mchr, mpos, wpos, annotation=anno_scaled, models=models, targets=targets, use_cuda=use_cuda,
)
if file is not None:
if window_radius == 128000000:
genomeplot_256Mb(
outputs_ref, show_coordinates=True, file=file + ".256m.pdf",
)
else:
genomeplot(
outputs_ref,
show_genes=show_genes,
show_tracks=show_tracks,
show_coordinates=True,
file=file + ".pdf",
)
return outputs_ref
def process_dup(
mchr,
mstart,
mend,
genome,
file=None,
custom_models=None,
target=True,
show_genes=True,
show_tracks=False,
window_radius=16000000,
padding_chr="chr1",
use_cuda=True,
):
"""
Generate multiscale genome interaction predictions for
an duplication variant.
Parameters
----------
mchr : str
The chromosome name of the first segment
mstart : int
The start coordinate of the duplication.
mend : ind
The end coordinate of the duplication.
genome : selene_utils2.MemmapGenome or selene_sdk.sequences.Genome
The reference genome object to extract sequence from
custom_models : list(torch.nn.Module or str) or None, optional
Models to use instead of the default H1-ESC and HFF Orca models.
Default is None.
target : list(selene_utils2.Genomic2DFeatures or str) or bool, optional
If specified as list, use this list of targets to retrieve experimental
data (for plotting only). Default is True and will use micro-C data
for H1-ESC and HFF cells (4DNFI9GMP2J8, 4DNFI643OYP9) that correspond
to the default models.
file : str or None, optional
Default is None. The output file prefix.
show_genes : bool, optional
Default is True. If True, generate gene annotation visualization
file in pdf format that matches the windows of multiscale predictions.
show_tracks : bool, optional
Default is False. If True, generate chromatin tracks visualization
file in pdf format that matches the windows of multiscale predictions.
window_radius : int, optional
Default is 16000000. The acceptable values are 16000000 which selects
the 1-32Mb models or 128000000 which selects the 32-256Mb models.
padding_chr : str, optional
Default is "chr1". If window_radius is 128000000, padding is generally
needed to fill the sequence to 256Mb. The padding sequence will be
extracted from the padding_chr.
use_cuda : bool, optional
Default is True. Use CPU if False.
Returns
-------
outputs_ref_l, outputs_ref_r, outputs_alt : dict, dict, dict
Reference allele predictions zooming into the left boundary of the
duplication,
Reference allele predictions zooming into the right boundary of the
duplication,
Alternative allele predictions zooming into the duplication breakpoint.
The returned results are in the format of dictonaries
containing the prediction outputs and other
retrieved information. These dictionaries can be directly used as
input to genomeplot or genomeplot_256Mb. See documentation of `genomepredict` or `genomepredict_256Mb` for
details of the dictionary content.
"""
chrlen = [l for c, l in genome.get_chr_lens() if c == mchr].pop()
if custom_models is None:
if window_radius == 16000000:
models = ["h1esc", "hff"]
elif window_radius == 128000000:
models = ["h1esc_256m", "hff_256m"]
else:
raise ValueError(
"Only window_radius 16000000 (32Mb models) or 128000000 (256Mb models) are supported"
)
else:
models = custom_models
if target:
try:
if target == True:
if window_radius == 16000000:
target = ["h1esc", "hff"]
elif window_radius == 128000000:
target = ["h1esc_256m", "hff_256m"]
target = [t if isinstance(t, Genomic2DFeatures) else target_dict_global[t] for t in target]
except KeyError:
target = False
# ref.l
if window_radius == 16000000:
wpos = coord_clip(mstart, chrlen)
sequence = genome.get_encoding_from_coords(
mchr, wpos - window_radius, wpos + window_radius
)[None, :]
if target:
targets = [
torch.FloatTensor(
t.get_feature_data(
mchr, coord_round(wpos - window_radius), coord_round(wpos + window_radius),
)[None, :]
) for t in target
]
else:
targets = None
elif window_radius == 128000000:
chrlen_round = chrlen - chrlen % 32000
wpos = 128000000
if target:
sequence, normmats, targets = _retrieve_multi(
[[mchr, 0, chrlen_round, "+"], [padding_chr, 0, 256000000 - chrlen_round, "+"]],
genome,
target=target,
)
else:
sequence, normmats = _retrieve_multi(
[[mchr, 0, chrlen_round, "+"], [padding_chr, 0, 256000000 - chrlen_round, "+"]],
genome,
target=target,
)
targets = None
else:
raise ValueError(
"Only window_radius 16000000 (32Mb models) or 128000000 (256Mb models) are supported"
)
if wpos + window_radius > mend:
anno_scaled = process_anno(
[[mstart, mend, "black"]], base=wpos - window_radius, window_radius=window_radius
)
else:
anno_scaled = process_anno(
[[mstart, wpos + window_radius, "black"]],
base=wpos - window_radius,
window_radius=window_radius,
)
if window_radius == 128000000:
outputs_ref_l = genomepredict_256Mb(
sequence,
mchr,
normmats,
chrlen_round,
mstart,
wpos,
annotation=anno_scaled,
padding_chr=padding_chr,
models=models,
targets=targets,
use_cuda=use_cuda,
)
else:
outputs_ref_l = genomepredict(
sequence,
mchr,
mstart,
wpos,
annotation=anno_scaled,
models=models,
targets=targets,
use_cuda=use_cuda,
)
if file is not None:
if window_radius == 128000000:
genomeplot_256Mb(
outputs_ref_l, show_coordinates=True, file=file + ".ref.l.256m.pdf",
)
else:
genomeplot(
outputs_ref_l,
show_genes=show_genes,
show_tracks=show_tracks,
show_coordinates=True,
file=file + ".ref.l.pdf",
)
# ref.r
if window_radius == 16000000:
wpos = coord_clip(mend, chrlen)
sequence = genome.get_encoding_from_coords(
mchr, wpos - window_radius, wpos + window_radius
)[None, :]
if target:
targets = [
torch.FloatTensor(
t.get_feature_data(
mchr, coord_round(wpos - window_radius), coord_round(wpos + window_radius),
)[None, :]
) for t in target
]
else:
targets = None
if wpos - window_radius < mstart:
anno_scaled = process_anno(
[[mstart, mend, "black"]], base=wpos - window_radius, window_radius=window_radius
)
else:
anno_scaled = process_anno(
[[wpos - window_radius, mend, "black"]],
base=wpos - window_radius,
window_radius=window_radius,
)
if window_radius == 16000000:
outputs_ref_r = genomepredict(
sequence, mchr, mend, wpos, models=models, annotation=anno_scaled, targets=targets, use_cuda=use_cuda,
)
if file is not None:
genomeplot(
outputs_ref_r,
show_genes=show_genes,
show_tracks=show_tracks,
show_coordinates=True,
file=file + ".ref.r.pdf",
)
else:
outputs_ref_r = genomepredict_256Mb(
sequence,
mchr,
normmats,
chrlen_round,
mend,
wpos,
annotation=anno_scaled,
padding_chr=padding_chr,
models=models,
targets=targets,
use_cuda=use_cuda,
)
genomeplot_256Mb(
outputs_ref_r, show_coordinates=True, file=file + ".ref.r.256m.pdf",
)
# alt (r)
s = StructuralChange2(mchr, chrlen)
s.duplicate(mstart, mend)
chrlen_alt = chrlen + mend - mstart
if window_radius == 16000000:
wpos = coord_clip(mend, chrlen_alt)
sequence = []
for chrm, start, end, strand in s[wpos - window_radius : wpos + window_radius]:
seq = genome.get_encoding_from_coords(chrm, start, end)
if strand == "-":
seq = seq[None, ::-1, ::-1]
else:
seq = seq[None, :, :]
sequence.append(seq)
sequence = np.concatenate(sequence, axis=1)
else:
chrlen_alt_round = chrlen_alt - chrlen_alt % 32000
if chrlen_alt_round < 256000000:
wpos = 128000000
(sequence, normmats) = _retrieve_multi(
list(s[0:chrlen_alt_round]) + [[padding_chr, 0, 256000000 - chrlen_alt_round, "+"]],
genome,
target=False,
normmat=True,
normmat_regionlist=[
[mchr, 0, chrlen_alt_round, "+"],
[padding_chr, 0, 256000000 - chrlen_alt_round, "+"],
],
)
else:
wpos = coord_clip(mend, chrlen_alt_round, window_radius=128000000)
(sequence, normmats) = _retrieve_multi(
list(s[wpos - window_radius : wpos + window_radius]),
genome,
target=False,
normmat=True,
normmat_regionlist=[[mchr, wpos - window_radius, wpos + window_radius, "+"]],
)
if wpos - window_radius < mstart and mend + mend - mstart < wpos + window_radius:
anno_scaled = process_anno(
[[mstart, mend, "black"], [mend, mend + mend - mstart, "gray"]],
base=wpos - window_radius,
window_radius=window_radius,
)
elif wpos - window_radius >= mstart and mend + mend - mstart < wpos + window_radius:
anno_scaled = process_anno(
[[wpos - window_radius, mend, "black"], [mend, mend + mend - mstart, "gray"],],
base=wpos - window_radius,
window_radius=window_radius,
)
elif wpos - window_radius < mstart and mend + mend - mstart >= wpos + window_radius:
anno_scaled = process_anno(
[[mstart, mend, "black"], [mend, wpos + window_radius, "gray"]],
base=wpos - window_radius,
window_radius=window_radius,
)
else:
anno_scaled = process_anno(
[[wpos - window_radius, mend, "black"], [mend, wpos + window_radius, "gray"],],
base=wpos - window_radius,
window_radius=window_radius,
)
if window_radius == 16000000:
outputs_alt = genomepredict(
sequence, mchr, mend, wpos, models=models, annotation=anno_scaled, use_cuda=use_cuda
)
if file is not None:
genomeplot(outputs_alt, show_coordinates=True, file=file + ".alt.pdf")
else:
outputs_alt = genomepredict_256Mb(
sequence,
mchr,
normmats,
chrlen_alt_round,
mend,
wpos,
models=models,
annotation=anno_scaled,
padding_chr=padding_chr,
use_cuda=use_cuda,
)
if file is not None:
genomeplot_256Mb(
outputs_alt, show_coordinates=True, file=file + ".alt.256m.pdf",
)
return outputs_ref_l, outputs_ref_r, outputs_alt
def process_del(
mchr,
mstart,
mend,
genome,
cmap=None,
file=None,
custom_models=None,
target=True,
show_genes=True,
show_tracks=False,
window_radius=16000000,
padding_chr="chr1",
use_cuda=True,
):
"""
Generate multiscale genome interaction predictions for
an deletion variant.
Parameters
----------
mchr : str
The chromosome name of the first segment
mstart : int
The start coordinate of the deletion.
mend : ind
The end coordinate of the deletion.
genome : selene_utils2.MemmapGenome or selene_sdk.sequences.Genome
The reference genome object to extract sequence from
custom_models : list(torch.nn.Module or str) or None, optional
Models to use instead of the default H1-ESC and HFF Orca models.
Default is None.
target : list(selene_utils2.Genomic2DFeatures or str) or bool, optional
If specified as list, use this list of targets to retrieve experimental
data (for plotting only). Default is True and will use micro-C data
for H1-ESC and HFF cells (4DNFI9GMP2J8, 4DNFI643OYP9) that correspond
to the default models.
file : str or None, optional
Default is None. The output file prefix.
show_genes : bool, optional
Default is True. If True, generate gene annotation visualization
file in pdf format that matches the windows of multiscale predictions.
show_tracks : bool, optional
Default is False. If True, generate chromatin tracks visualization
file in pdf format that matches the windows of multiscale predictions.
window_radius : int, optional
Default is 16000000. The acceptable values are 16000000 which selects
the 1-32Mb models or 128000000 which selects the 32-256Mb models.
padding_chr : str, optional
Default is "chr1". If window_radius is 128000000, padding is generally
needed to fill the sequence to 256Mb. The padding sequence will be
extracted from the padding_chr.
use_cuda : bool, optional
Default is True. Use CPU if False.
Returns
-------
outputs_ref_l, outputs_ref_r, outputs_alt : dict, dict, dict
Reference allele predictions zooming into the left boundary of the
deletion,
Reference allele predictions zooming into the right boundary of the
deletion,
Alternative allele predictions zooming into the deletion breakpoint.
The returned results are in the format of dictonaries
containing the prediction outputs and other
retrieved information. These dictionaries can be directly used as
input to genomeplot or genomeplot_256Mb. See documentation of `genomepredict` or `genomepredict_256Mb` for
details of the dictionary content.
"""
chrlen = [l for c, l in genome.get_chr_lens() if c == mchr].pop()
if custom_models is None:
if window_radius == 16000000:
models = ["h1esc", "hff"]
elif window_radius == 128000000:
models = ["h1esc_256m", "hff_256m"]
else:
raise ValueError(
"Only window_radius 16000000 (32Mb models) or 128000000 (256Mb models) are supported"
)
else:
models = custom_models
if target:
try:
if target == True:
if window_radius == 16000000:
target = ["h1esc", "hff"]
elif window_radius == 128000000:
target = ["h1esc_256m", "hff_256m"]
target = [t if isinstance(t, Genomic2DFeatures) else target_dict_global[t] for t in target]
except KeyError:
target = False
# ref.l
if window_radius == 16000000:
wpos = coord_clip(mstart, chrlen)
sequence = genome.get_encoding_from_coords(
mchr, wpos - window_radius, wpos + window_radius
)[None, :]
if target:
targets = [
torch.FloatTensor(
t.get_feature_data(
mchr, coord_round(wpos - window_radius), coord_round(wpos + window_radius),
)[None, :]
) for t in target
]
else:
targets = None
elif window_radius == 128000000:
chrlen_round = chrlen - chrlen % 32000
wpos = 128000000
if target:
sequence, normmats, targets = _retrieve_multi(
[[mchr, 0, chrlen_round, "+"], [padding_chr, 0, 256000000 - chrlen_round, "+"]],
genome,
target=target,
)
else:
sequence, normmats = _retrieve_multi(
[[mchr, 0, chrlen_round, "+"], [padding_chr, 0, 256000000 - chrlen_round, "+"]],
genome,
target=target,
)
else:
raise ValueError(
"Only window_radius 16000000 (32Mb models) or 128000000 (256Mb models) are supported"
)
if wpos + window_radius > mend:
anno_scaled = process_anno(
[[mstart, mend, "black"]], base=wpos - window_radius, window_radius=window_radius
)
else:
anno_scaled = process_anno(
[[mstart, wpos + window_radius, "black"]],
base=wpos - window_radius,
window_radius=window_radius,
)
if window_radius == 128000000:
outputs_ref_l = genomepredict_256Mb(
sequence,
mchr,
normmats,
chrlen_round,
mstart,
wpos,
models=models,
annotation=anno_scaled,
padding_chr=padding_chr,
targets=targets,
use_cuda=use_cuda,
)
else:
outputs_ref_l = genomepredict(
sequence,
mchr,
mstart,
wpos,
models=models,
annotation=anno_scaled,
targets=targets,
use_cuda=use_cuda,
)
if file is not None:
if window_radius == 128000000:
genomeplot_256Mb(
outputs_ref_l, show_coordinates=True, file=file + ".ref.l.256m.pdf",
)
else:
genomeplot(
outputs_ref_l,
show_genes=show_genes,
show_tracks=show_tracks,
show_coordinates=True,
cmap=cmap,
file=file + ".ref.l.pdf",
)
# ref.r
if window_radius == 16000000:
wpos = coord_clip(mend, chrlen)
sequence = genome.get_encoding_from_coords(
mchr, wpos - window_radius, wpos + window_radius
)[None, :]
if target:
targets = [
torch.FloatTensor(
t.get_feature_data(
mchr, coord_round(wpos - window_radius), coord_round(wpos + window_radius),
)[None, :]
) for t in target
]
else:
targets = None
if wpos - window_radius < mstart:
anno_scaled = process_anno(
[[mstart, mend, "black"]], base=wpos - window_radius, window_radius=window_radius
)
else:
anno_scaled = process_anno(
[[wpos - window_radius, mend, "black"]],
base=wpos - window_radius,
window_radius=window_radius,
)
if window_radius == 16000000:
outputs_ref_r = genomepredict(
sequence, mchr, mend, wpos, models=models, annotation=anno_scaled, targets=targets, use_cuda=use_cuda,
)
if file is not None:
genomeplot(
outputs_ref_r,
show_genes=show_genes,
show_tracks=show_tracks,
show_coordinates=True,
cmap=cmap,
file=file + ".ref.r.pdf",
)
else:
outputs_ref_r = genomepredict_256Mb(
sequence,
mchr,
normmats,
chrlen_round,
mend,
wpos,
models=models,
annotation=anno_scaled,
padding_chr=padding_chr,
targets=targets,
use_cuda=use_cuda,
)
if file is not None:
genomeplot_256Mb(
outputs_ref_r, show_coordinates=True, file=file + ".ref.r.256m.pdf",
)
# alt
s = StructuralChange2(mchr, chrlen)
s.delete(mstart, mend)
chrlen_alt = chrlen - (mend - mstart)
if window_radius == 16000000:
wpos = coord_clip(mstart, chrlen_alt)
sequence = []
for chrm, start, end, strand in s[wpos - window_radius : wpos + window_radius]:
seq = genome.get_encoding_from_coords(chrm, start, end)
if strand == "-":
seq = seq[None, ::-1, ::-1]
else:
seq = seq[None, :, :]
sequence.append(seq)
sequence = np.concatenate(sequence, axis=1)
else:
chrlen_alt_round = chrlen_alt - chrlen_alt % 32000
wpos = 128000000
(sequence, normmats) = _retrieve_multi(
list(s[0:chrlen_alt_round]) + [[padding_chr, 0, 256000000 - chrlen_alt_round, "+"]],
genome,
target=False,
normmat=True,
normmat_regionlist=[
[mchr, 0, chrlen_alt_round, "+"],
[padding_chr, 0, 256000000 - chrlen_alt_round, "+"],
],
)
anno_scaled = process_anno(
[[mstart, "double"]], base=wpos - window_radius, window_radius=window_radius
)
if window_radius == 16000000:
outputs_alt = genomepredict(
sequence, mchr, mstart, wpos, models=models, annotation=anno_scaled, use_cuda=use_cuda
)
if file is not None:
genomeplot(outputs_alt, show_coordinates=True, cmap=cmap, file=file + ".alt.pdf")
else:
outputs_alt = genomepredict_256Mb(
sequence,
mchr,
normmats,
chrlen_alt_round,
mstart,
wpos,
models=models,
annotation=anno_scaled,
padding_chr=padding_chr,
use_cuda=use_cuda,
)
if file is not None:
genomeplot_256Mb(
outputs_alt, show_coordinates=True, file=file + ".alt.256m.pdf",
)
return outputs_ref_l, outputs_ref_r, outputs_alt
def process_inv(
mchr,
mstart,
mend,
genome,
file=None,
custom_models=None,
target=True,
show_genes=True,
show_tracks=False,
window_radius=16000000,
padding_chr="chr1",
use_cuda=True,
):
"""
Generate multiscale genome interaction predictions for
an inversion variant.
Parameters
----------
mchr : str
The chromosome name of the first segment
mstart : int
The start coordinate of the inversion.
mend : ind
The end coordinate of the inversion.
genome : selene_utils2.MemmapGenome or selene_sdk.sequences.Genome
The reference genome object to extract sequence from
custom_models : list(torch.nn.Module or str) or None, optional
Models to use instead of the default H1-ESC and HFF Orca models.
Default is None.
target : list(selene_utils2.Genomic2DFeatures or str) or bool, optional
If specified as list, use this list of targets to retrieve experimental
data (for plotting only). Default is True and will use micro-C data
for H1-ESC and HFF cells (4DNFI9GMP2J8, 4DNFI643OYP9) that correspond
to the default models.
file : str or None, optional
Default is None. The output file prefix.
show_genes : bool, optional
Default is True. If True, generate gene annotation visualization
file in pdf format that matches the windows of multiscale predictions.
show_tracks : bool, optional
Default is False. If True, generate chromatin tracks visualization
file in pdf format that matches the windows of multiscale predictions.
window_radius : int, optional
Default is 16000000. The acceptable values are 16000000 which selects
the 1-32Mb models or 128000000 which selects the 32-256Mb models.
padding_chr : str, optional
Default is "chr1". If window_radius is 128000000, padding is generally
needed to fill the sequence to 256Mb. The padding sequence will be
extracted from the padding_chr.
use_cuda : bool, optional
Default is True. Use CPU if False.
Returns
-------
outputs_ref_l, outputs_ref_r, outputs_alt_l, outputs_alt_r : dict, dict, dict, dict
Reference allele predictions zooming into the left boundary of the
inversion,
Reference allele predictions zooming into the right boundary of the
inversion,
Alternative allele predictions zooming into the left boundary of
the inversion,
Alternative allele prediction zooming into the right boundary of
the inversion.
The returned results are in the format of dictonaries
containing the prediction outputs and other
retrieved information. These dictionaries can be directly used as
input to genomeplot or genomeplot_256Mb. See documentation of `genomepredict` or `genomepredict_256Mb` for
details of the dictionary content.
"""
chrlen = [l for c, l in genome.get_chr_lens() if c == mchr].pop()
if custom_models is None:
if window_radius == 16000000:
models = ["h1esc", "hff"]
elif window_radius == 128000000:
models = ["h1esc_256m", "hff_256m"]
else:
raise ValueError(
"Only window_radius 16000000 (32Mb models) or 128000000 (256Mb models) are supported"
)
else:
models = custom_models
if target:
try:
if target == True:
if window_radius == 16000000:
target = ["h1esc", "hff"]
elif window_radius == 128000000:
target = ["h1esc_256m", "hff_256m"]
target = [t if isinstance(t, Genomic2DFeatures) else target_dict_global[t] for t in target]
except KeyError:
target = False
if window_radius == 16000000:
wpos = coord_clip(mstart, chrlen)
sequence = genome.get_encoding_from_coords(
mchr, wpos - window_radius, wpos + window_radius
)[None, :]
if target:
targets = [
torch.FloatTensor(
t.get_feature_data(
mchr, coord_round(wpos - window_radius), coord_round(wpos + window_radius),
)[None, :]
) for t in target
]
else:
targets = None
elif window_radius == 128000000:
chrlen_round = chrlen - chrlen % 32000
wpos = 128000000
if target:
sequence, normmats, targets = _retrieve_multi(
[[mchr, 0, chrlen_round, "+"], [padding_chr, 0, 256000000 - chrlen_round, "+"]],
genome,
target=target,
)
else:
sequence, normmats = _retrieve_multi(
[[mchr, 0, chrlen_round, "+"], [padding_chr, 0, 256000000 - chrlen_round, "+"]],
genome,
target=target,
)
targets = None
else:
raise ValueError(
"Only window_radius 16000000 (32Mb models) or 128000000 (256Mb models) are supported"
)
if wpos + window_radius > mend:
anno_scaled = process_anno(
[[mstart, mend, "black"]], base=wpos - window_radius, window_radius=window_radius,
)
else:
anno_scaled = process_anno(
[[mstart, wpos + window_radius, "black"]],
base=wpos - window_radius,
window_radius=window_radius,
)
if window_radius == 128000000:
outputs_ref_l = genomepredict_256Mb(
sequence,
mchr,
normmats,
chrlen_round,
mstart,
wpos,
models=models,
annotation=anno_scaled,
padding_chr=padding_chr,
targets=targets,
use_cuda=use_cuda,
)
else:
outputs_ref_l = genomepredict(
sequence,
mchr,
mstart,
wpos,
models=models,
annotation=anno_scaled,
targets=targets,
use_cuda=use_cuda,
)
if file is not None:
if window_radius == 128000000:
genomeplot_256Mb(
outputs_ref_l, show_coordinates=True, file=file + ".ref.l.256m.pdf",
)
else:
genomeplot(
outputs_ref_l,
show_genes=show_genes,
show_tracks=show_tracks,
show_coordinates=True,
file=file + ".ref.l.pdf",
)
# ref.r
if window_radius == 16000000:
wpos = coord_clip(mend, chrlen)
sequence = genome.get_encoding_from_coords(
mchr, wpos - window_radius, wpos + window_radius
)[None, :]
if target:
targets = [
torch.FloatTensor(
t.get_feature_data(
mchr, coord_round(wpos - window_radius), coord_round(wpos + window_radius),
)[None, :]
) for t in target
]
else:
targets = None
if wpos - window_radius < mstart:
anno_scaled = process_anno(
[[mstart, mend, "black"]], base=wpos - window_radius, window_radius=window_radius,
)
else:
anno_scaled = process_anno(
[[wpos - window_radius, mend, "black"]],
base=wpos - window_radius,
window_radius=window_radius,
)
if window_radius == 16000000:
outputs_ref_r = genomepredict(
sequence, mchr, mend, wpos, models=models, annotation=anno_scaled, targets=targets, use_cuda=use_cuda,
)
if file is not None:
genomeplot(
outputs_ref_r,
show_genes=show_genes,
show_tracks=show_tracks,
show_coordinates=True,
file=file + ".ref.r.pdf",
)
else:
outputs_ref_r = genomepredict_256Mb(
sequence,
mchr,
normmats,
chrlen_round,
mend,
wpos,
models=models,
annotation=anno_scaled,
padding_chr=padding_chr,
targets=targets,
use_cuda=use_cuda,
)
if file is not None:
genomeplot_256Mb(
outputs_ref_r, show_coordinates=True, file=file + ".ref.r.256m.pdf",
)
# alt.l
s = StructuralChange2(mchr, chrlen)
s.invert(mstart, mend)
if window_radius == 16000000:
wpos = coord_clip(mstart, chrlen)
sequence = []
for chrm, start, end, strand in s[wpos - window_radius : wpos + window_radius]:
seq = genome.get_encoding_from_coords(chrm, start, end)
if strand == "-":
seq = seq[None, ::-1, ::-1]
else:
seq = seq[None, :, :]
sequence.append(seq)
sequence = np.concatenate(sequence, axis=1)
else:
wpos = 128000000
(sequence,) = _retrieve_multi(
list(s[0:chrlen_round]) + [[padding_chr, 0, 256000000 - chrlen_round, "+"]],
genome,
target=False,
normmat=False,
)
# normmats are not changed for inversion
if mend < wpos + window_radius:
anno_scaled = process_anno(
[[mstart, mend, "gray"]], base=wpos - window_radius, window_radius=window_radius,
)
else:
anno_scaled = process_anno(
[[mstart, wpos + window_radius, "gray"]],
base=wpos - window_radius,
window_radius=window_radius,
)
if window_radius == 16000000:
outputs_alt_l = genomepredict(
sequence, mchr, mstart, wpos, models=models, annotation=anno_scaled, use_cuda=use_cuda
)
if file is not None:
genomeplot(outputs_alt_l, show_coordinates=True, file=file + ".alt.l.pdf")
else:
outputs_alt_l = genomepredict_256Mb(
sequence,
mchr,
normmats,
chrlen_round,
mstart,
wpos,
models=models,
annotation=anno_scaled,
padding_chr=padding_chr,
use_cuda=use_cuda,
)
if file is not None:
genomeplot_256Mb(
outputs_alt_l, show_coordinates=True, file=file + ".alt.l.256m.pdf",
)
if window_radius == 16000000:
wpos = coord_clip(mend, chrlen)
sequence = []
for chrm, start, end, strand in s[wpos - window_radius : wpos + window_radius]:
seq = genome.get_encoding_from_coords(chrm, start, end)
if strand == "-":
seq = seq[None, ::-1, ::-1]
else:
seq = seq[None, :, :]
sequence.append(seq)
sequence = np.concatenate(sequence, axis=1)
if mstart > wpos - window_radius:
anno_scaled = process_anno(
[[mstart, mend, "gray"]], base=wpos - window_radius, window_radius=window_radius,
)
else:
anno_scaled = process_anno(
[[wpos - window_radius, mend, "gray"]],
base=wpos - window_radius,
window_radius=window_radius,
)
if window_radius == 16000000:
outputs_alt_r = genomepredict(
sequence, mchr, mend, wpos, models=models, annotation=anno_scaled, use_cuda=use_cuda
)
if file is not None:
genomeplot(outputs_alt_r, show_coordinates=True, file=file + ".alt.r.pdf")
else:
outputs_alt_r = genomepredict_256Mb(
sequence,
mchr,
normmats,
chrlen_round,
mend,
wpos,
models=models,
annotation=anno_scaled,
padding_chr=padding_chr,
use_cuda=use_cuda,
)
if file is not None:
genomeplot_256Mb(
outputs_alt_r, show_coordinates=True, file=file + ".alt.r.256m.pdf",
)
return outputs_ref_l, outputs_ref_r, outputs_alt_l, outputs_alt_r
def process_ins(
mchr,
mpos,
ins_seq,
genome,
strand="+",
file=None,
custom_models=None,
target=True,
show_genes=True,
show_tracks=False,
window_radius=16000000,
padding_chr="chr1",
use_cuda=True,
):
"""
Generate multiscale genome interaction predictions for
an insertion variant that inserts the specified sequence
to the insertion site.
Parameters
----------
mchr : str
The chromosome name of the first segment
mpos : int
The insertion site coordinate.
ins_seq : str
The inserted sequence in string format.
genome : selene_utils2.MemmapGenome or selene_sdk.sequences.Genome
The reference genome object to extract sequence from
custom_models : list(torch.nn.Module or str) or None, optional
Models to use instead of the default H1-ESC and HFF Orca models.
Default is None.
target : list(selene_utils2.Genomic2DFeatures or str) or bool, optional
If specified as list, use this list of targets to retrieve experimental
data (for plotting only). Default is True and will use micro-C data
for H1-ESC and HFF cells (4DNFI9GMP2J8, 4DNFI643OYP9) that correspond
to the default models.
file : str or None, optional
Default is None. The output file prefix.
show_genes : bool, optional
Default is True. If True, generate gene annotation visualization
file in pdf format that matches the windows of multiscale predictions.
show_tracks : bool, optional
Default is False. If True, generate chromatin tracks visualization
file in pdf format that matches the windows of multiscale predictions.
window_radius : int, optional
Default is 16000000. The acceptable values are 16000000 which selects
the 1-32Mb models or 128000000 which selects the 32-256Mb models.
padding_chr : str, optional
Default is "chr1". If window_radius is 128000000, padding is generally
needed to fill the sequence to 256Mb. The padding sequence will be
extracted from the padding_chr.
use_cuda : bool, optional
Default is True. Use CPU if False.
Returns
-------
outputs_ref, outputs_alt_l, outputs_alt_r : dict, dict, dict
Reference allele predictions zooming into the insertion site,
Alternative allele predictions zooming into the left boundary of
the insertion seqeunce,
Alternative allele prediction zooming into the right boundary of
the insertion seqeunce.
The returned results are in the format of dictonaries
containing the prediction outputs and other
retrieved information. These dictionaries can be directly used as
input to genomeplot or genomeplot_256Mb. See documentation of `genomepredict` or `genomepredict_256Mb` for
details of the dictionary content.
"""
chrlen = [l for c, l in genome.get_chr_lens() if c == mchr].pop()
if custom_models is None:
if window_radius == 16000000:
models = ["h1esc", "hff"]
elif window_radius == 128000000:
models = ["h1esc_256m", "hff_256m"]
else:
raise ValueError(
"Only window_radius 16000000 (32Mb models) or 128000000 (256Mb models) are supported"
)
else:
models = custom_models
if target:
try:
if target == True:
if window_radius == 16000000:
target = ["h1esc", "hff"]
elif window_radius == 128000000:
target = ["h1esc_256m", "hff_256m"]
target = [t if isinstance(t, Genomic2DFeatures) else target_dict_global[t] for t in target]
except KeyError:
target = False
if window_radius == 16000000:
wpos = coord_clip(mpos, chrlen)
sequence = genome.get_encoding_from_coords(
mchr, wpos - window_radius, wpos + window_radius
)[None, :]
if target:
targets = [
torch.FloatTensor(
t.get_feature_data(
"chr" + mchr.replace("chr", ""),
coord_round(wpos - window_radius),
coord_round(wpos + window_radius),
)[None, :]
) for t in target
]
else:
targets = None
elif window_radius == 128000000:
chrlen_round = chrlen - chrlen % 32000
wpos = 128000000
if target:
sequence, normmats, targets = _retrieve_multi(
[[mchr, 0, chrlen_round, "+"], [padding_chr, 0, 256000000 - chrlen_round, "+"]],
genome,
target=target,
)
else:
sequence, normmats = _retrieve_multi(
[[mchr, 0, chrlen_round, "+"], [padding_chr, 0, 256000000 - chrlen_round, "+"]],
genome,
target=target,
)
targets = None
else:
raise ValueError(
"Only window_radius 16000000 (32Mb models) or 128000000 (256Mb models) are supported"
)
anno_scaled = process_anno(
[[mpos, "single"]], base=wpos - window_radius, window_radius=window_radius
)
if window_radius == 128000000:
outputs_ref_l = genomepredict_256Mb(
sequence,
mchr,
normmats,
chrlen_round,
mpos,
wpos,
models=models,
annotation=anno_scaled,
padding_chr=padding_chr,
targets=targets,
use_cuda=use_cuda,
)
else:
outputs_ref = genomepredict(
sequence, mchr, mpos, wpos, annotation=anno_scaled, models=models, targets=targets, use_cuda=use_cuda,
)
if file is not None:
if window_radius == 128000000:
genomeplot_256Mb(
outputs_ref_l, show_coordinates=True, file=file + ".ref.256m.pdf",
)
else:
genomeplot(
outputs_ref,
show_genes=show_genes,
show_tracks=show_tracks,
show_coordinates=True,
file=file + ".ref.pdf",
)
# alt
s = StructuralChange2(mchr, chrlen)
s.insert(mpos, len(ins_seq), strand=strand)
chrlen_alt = chrlen + len(ins_seq)
if window_radius == 16000000:
wpos = coord_clip(mpos, chrlen_alt)
sequence = []
for chr_name, start, end, strand in s[wpos - window_radius : wpos + window_radius]:
if chr_name.startswith("ins"):
seq = Genome.sequence_to_encoding(ins_seq[start:end])
else:
seq = genome.get_encoding_from_coords(chr_name, start, end)
if strand == "-":
seq = seq[None, ::-1, ::-1]
else:
seq = seq[None, :, :]
sequence.append(seq)
sequence = np.concatenate(sequence, axis=1)
else:
chrlen_alt_round = chrlen_alt - chrlen_alt % 32000
if chrlen_alt_round < 256000000:
wpos = 128000000
(sequence, normmats) = _retrieve_multi(
list(s[0:chrlen_alt_round]) + [[padding_chr, 0, 256000000 - chrlen_alt_round, "+"]],
genome,
target=False,
normmat=True,
normmat_regionlist=[
[mchr, 0, chrlen_alt_round, "+"],
[padding_chr, 0, 256000000 - chrlen_alt_round, "+"],
],
)
else:
wpos = coord_clip(mpos, chrlen_alt_round, window_radius=128000000)
(sequence, normmats) = _retrieve_multi(
list(s[wpos - window_radius : wpos + window_radius]),
genome,
target=False,
normmat=True,
normmat_regionlist=[[mchr, wpos - window_radius, wpos + window_radius, "+"]],
)
if mpos + len(ins_seq) < wpos + window_radius:
anno_scaled = process_anno(
[[mpos, mpos + len(ins_seq), "gray"]],
base=wpos - window_radius,
window_radius=window_radius,
)
else:
anno_scaled = process_anno(
[[mpos, wpos + window_radius, "gray"]],
base=wpos - window_radius,
window_radius=window_radius,
)
if window_radius == 16000000:
outputs_alt_l = genomepredict(
sequence, mchr, mpos, wpos, models=models, annotation=anno_scaled, use_cuda=use_cuda
)
if file is not None:
genomeplot(outputs_alt_l, show_coordinates=True, file=file + ".alt.l.pdf")
else:
outputs_alt_l = genomepredict_256Mb(
sequence,
mchr,
normmats,
chrlen_alt_round,
mpos,
wpos,
models=models,
annotation=anno_scaled,
padding_chr=padding_chr,
use_cuda=use_cuda,
)
if file is not None:
genomeplot_256Mb(
outputs_alt_l, show_coordinates=True, file=file + ".alt.l.256m.pdf",
)
if window_radius == 16000000:
wpos = coord_clip(mpos + len(ins_seq), chrlen_alt)
sequence = []
for chr_name, start, end, strand in s[wpos - window_radius : wpos + window_radius]:
if chr_name.startswith("ins"):
seq = Genome.sequence_to_encoding(ins_seq[start:end])
else:
seq = genome.get_encoding_from_coords(chr_name, start, end)
if strand == "-":
seq = seq[None, ::-1, ::-1]
else:
seq = seq[None, :, :]
sequence.append(seq)
sequence = np.concatenate(sequence, axis=1)
else:
if chrlen_alt_round > 256000000:
wpos = coord_clip(mpos + len(ins_seq), chrlen_alt_round, window_radius=128000000)
(sequence, normmats) = _retrieve_multi(
list(s[wpos - window_radius : wpos + window_radius]),
genome,
target=False,
normmat=True,
normmat_regionlist=[[mchr, wpos - window_radius, wpos + window_radius, "+"]],
)
if mpos > wpos + window_radius:
anno_scaled = process_anno(
[[mpos, mpos + len(ins_seq), "gray"]],
base=wpos - window_radius,
window_radius=window_radius,
)
else:
anno_scaled = process_anno(
[[wpos - window_radius, mpos + len(ins_seq), "gray"]],
base=wpos - window_radius,
window_radius=window_radius,
)
if window_radius == 16000000:
outputs_alt_r = genomepredict(
sequence, mchr, mpos + len(ins_seq), wpos, annotation=anno_scaled, use_cuda=use_cuda
)
if file is not None:
genomeplot(outputs_alt_r, show_coordinates=True, file=file + ".alt.r.pdf")
else:
outputs_alt = genomepredict_256Mb(
sequence,
mchr,
normmats,
chrlen_alt_round,
mpos + len(ins_seq),
wpos,
models=models,
annotation=anno_scaled,
padding_chr=padding_chr,
use_cuda=use_cuda,
)
if file is not None:
genomeplot_256Mb(
outputs_alt, show_coordinates=True, file=file + ".alt.r.256m.pdf",
)
return outputs_ref, outputs_alt_l, outputs_alt_r
def process_custom(
region_list,
ref_region_list,
mpos,
genome,
ref_mpos_list=None,
anno_list=None,
ref_anno_list=None,
custom_models=None,
target=True,
file=None,
show_genes=True,
show_tracks=False,
window_radius=16000000,
use_cuda=True,
):
"""
Generate multiscale genome interaction predictions for
a custom variant by an ordered list of genomic segments.
Parameters
----------
region_list : list(list(...))
List of segments to complete the alternative. Each segment is specified
by a list( chr: str, start: int, end: int, strand: str), and segments
are concatenated together in the given order. The total length
should sum up to 32Mb. An example input is
[['chr5', 89411065, 89411065+16000000, '-'], ['chr7', 94378248, 94378248+16000000,'+']].
ref_region_list : list(list(...))
The reference regions to predict. This can be any reference regions with
the length of the specified window size. If the Each reference region is specified
with a list( chr: str, start: int, end: int, strand: str). The strand must
be '+'. The intended use is predicting the genome interactions for each
segment that constitute the alternative allele within the native
reference sequence context. An example
input is [['chr5', 89411065-16000000, 89411065+16000000,'+'],
['chr7', 94378248-16000000, 94378248+16000000,'+']].
mpos : int
The position to zoom into in the alternative allele. Note that `mpos`
here specify the relative position with respect to the to start of the 32Mb.
genome : selene_utils2.MemmapGenome or selene_sdk.sequences.Genome
The reference genome object to extract sequence from.
ref_mpos_list : list(int) or None, optional
Default is None. List of positions to zoom into for each of the
reference regions specified in `ref_region_list`. If not specified,
then zoom into the center of each region. Note that `ref_mpos_list`
specifies the relative positions with respect to start of the 32Mb.
For example, `16000000` means the center of the sequence.
custom_models : list(torch.nn.Module or str) or None, optional
Models to use instead of the default H1-ESC and HFF Orca models.
Default is None.
target : list(selene_utils2.Genomic2DFeatures or str) or bool, optional
If specified as list, use this list of targets to retrieve experimental
data (for plotting only). Default is True and will use micro-C data
for H1-ESC and HFF cells (4DNFI9GMP2J8, 4DNFI643OYP9) that correspond
to the default models.
file : str or None, optional
Default is None. The output file prefix.
show_genes : bool, optional
Default is True. If True, generate gene annotation visualization
file in pdf format that matches the windows of multiscale predictions.
show_tracks : bool, optional
Default is False. If True, generate chromatin tracks visualization
file in pdf format that matches the windows of multiscale predictions.
window_radius : int, optional
Default is 16000000. Currently only 16000000 (32Mb window) is accepted.
use_cuda : bool, optional
Default is True. Use CPU if False.
Returns
-------
outputs_ref_l, outputs_ref_r, outputs_alt : dict, dict, dict
Reference allele predictions zooming into the left boundary of the
duplication,
Reference allele predictions zooming into the right boundary of the
duplication,
Alternative allele predictions zooming into the duplication breakpoint.
The returned results are in the format of dictonaries
containing the prediction outputs and other
retrieved information. These dictionaries can be directly used as
input to genomeplot or genomeplot_256Mb. See documentation of `genomepredict` or `genomepredict_256Mb` for
details of the dictionary content.
"""
if custom_models is None:
if window_radius == 16000000:
models = ["h1esc", "hff"]
elif window_radius == 128000000:
models = ["h1esc_256m", "hff_256m"]
else:
raise ValueError(
"Only window_radius 16000000 (32Mb models) or 128000000 (256Mb models) are supported"
)
else:
models = custom_models
if target:
try:
if target == True:
if window_radius == 16000000:
target = ["h1esc", "hff"]
elif window_radius == 128000000:
target = ["h1esc_256m", "hff_256m"]
target = [t if isinstance(t, Genomic2DFeatures) else target_dict_global[t] for t in target]
except KeyError:
target = False
def validate_region_list(region_list, enforce_strand=None):
sumlen = 0
for chrm, start, end, strand in region_list:
chrlen = [l for c, l in genome.get_chr_lens() if c == chrm].pop()
assert start >= 0 and end <= chrlen
sumlen += end - start
if enforce_strand:
if strand != enforce_strand:
raise ValueError("The specified strand must be " + enforce_strand)
assert sumlen == 2 * window_radius
validate_region_list(region_list)
for i, ref_region in enumerate(ref_region_list):
validate_region_list([ref_region], enforce_strand="+")
ref_sequence = genome.get_encoding_from_coords(*ref_region)[None, :]
if target:
targets = [
torch.FloatTensor(
t.get_feature_data(
ref_region[0], coord_round(ref_region[1]), coord_round(ref_region[2]),
)[None, :]
) for t in target
]
else:
targets = None
anno_scaled = process_anno(ref_anno_list, base=0, window_radius=window_radius)
outputs_ref = genomepredict(
ref_sequence,
ref_region[0],
ref_region[1] + window_radius if ref_mpos_list is None else ref_mpos_list[i],
ref_region[1] + window_radius,
annotation=anno_scaled,
models=models,
targets=targets,
use_cuda=use_cuda,
)
if file is not None:
genomeplot(
outputs_ref,
show_genes=show_genes,
show_tracks=show_tracks,
show_coordinates=True,
file=file + ".ref." + str(i) + ".pdf",
)
sequence = []
for chrm, start, end, strand in region_list:
seq = genome.get_encoding_from_coords(chrm, start, end)
if strand == "-":
seq = seq[None, ::-1, ::-1].copy()
else:
seq = seq[None, :, :]
sequence.append(seq)
alt_sequence = np.concatenate(sequence, axis=1)
anno_scaled = process_anno(anno_list, base=0, window_radius=window_radius)
outputs_alt = genomepredict(
alt_sequence, "chimeric", mpos, window_radius, models=models, annotation=anno_scaled, use_cuda=use_cuda,
)
if file is not None:
genomeplot(outputs_alt, show_coordinates=False, file=file + ".alt.pdf")
return outputs_ref, outputs_alt
def process_single_breakpoint(
chr1,
pos1,
chr2,
pos2,
orientation1,
orientation2,
genome,
custom_models=None,
target=True,
file=None,
show_genes=True,
show_tracks=False,
window_radius=16000000,
padding_chr="chr1",
use_cuda=True,
):
"""
Generate multiscale genome interaction predictions for
a simple translocation event that connects
two chromosomal breakpoints. Specifically, two breakpoint
positions and the corresponding two orientations are needed.
The orientations decide how the breakpoints are connected.
The ‘+’ or ‘-’ sign indicate whether the left or right side of
the breakpoint is used. For example, for an input
('chr1', 85691449, 'chr5', 89533745 '+', '+'), two plus signs
indicate connecting chr1:0-85691449 with chr5:0-89533745.
Parameters
----------
chr1 : str
The chromosome name of the first segment
pos1 : int
The coorindate of breakpoint on the first segment
chr2 : str
The chromosome name of the second segment
pos2 : int
The coorindate of breakpoint on the second segment
orientation1 : str
Indicate which side of the breakpoint should be used for
the first segment,
'+' indicate the left and '-' indicate the right side.
orientation2 : str
Indicate which side of the breakpoint should be used for
the second segment,
'+' indicate the left and '-' indicate the right side.
genome : selene_utils2.MemmapGenome or selene_sdk.sequences.Genome
The reference genome object to extract sequence from
custom_models : list(torch.nn.Module or str) or None, optional
Models to use instead of the default H1-ESC and HFF Orca models.
Default is None.
target : list(selene_utils2.Genomic2DFeatures or str) or bool, optional
If specified as list, use this list of targets to retrieve experimental
data (for plotting only). Default is True and will use micro-C data
for H1-ESC and HFF cells (4DNFI9GMP2J8, 4DNFI643OYP9) that correspond
to the default models.
file : str or None, optional
Default is None. The output file prefix.
show_genes : bool, optional
Default is True. If True, generate gene annotation visualization
file in pdf format that matches the windows of multiscale predictions.
show_tracks : bool, optional
Default is False. If True, generate chromatin tracks visualization
file in pdf format that matches the windows of multiscale predictions.
window_radius : int, optional
Default is 16000000. The acceptable values are 16000000 which selects
the 1-32Mb models or 128000000 which selects the 32-256Mb models.
padding_chr : str, optional
Default is "chr1". If window_radius is 128000000, padding is generally
needed to fill the sequence to 256Mb. The padding sequence will be
extracted from the padding_chr.
use_cuda : bool, optional
Default is True. Use CPU if False.
Returns
-------
outputs_ref_1, outputs_ref_2, outputs_alt : dict, dict, dict
Reference allele predictions zooming into the chr1 breakpoint,
Reference allele predictions zooming into the chr2 breakpoint,
Alternative allele prediction zooming into the junction.
The returned results are in the format of dictonaries
containing the prediction outputs and other
retrieved information. These dictionaries can be directly used as
input to genomeplot or genomeplot_256Mb. See documentation of `genomepredict` or `genomepredict_256Mb` for
details of the dictionary content.
"""
if custom_models is None:
if window_radius == 16000000:
models = ["h1esc", "hff"]
elif window_radius == 128000000:
models = ["h1esc_256m", "hff_256m"]
else:
raise ValueError(
"Only window_radius 16000000 (32Mb models) or 128000000 (256Mb models) are supported"
)
else:
models = custom_models
if target:
try:
if target == True:
if window_radius == 16000000:
target = ["h1esc", "hff"]
elif window_radius == 128000000:
target = ["h1esc_256m", "hff_256m"]
target = [t if isinstance(t, Genomic2DFeatures) else target_dict_global[t] for t in target]
except KeyError:
target = False
chrlen1 = [l for c, l in genome.get_chr_lens() if c == chr1].pop()
# ref.l
if window_radius == 16000000:
wpos = coord_clip(pos1, chrlen1)
sequence = genome.get_encoding_from_coords(
chr1, wpos - window_radius, wpos + window_radius
)[None, :]
if target:
targets = [
torch.FloatTensor(
t.get_feature_data(
chr1, coord_round(wpos - window_radius), coord_round(wpos + window_radius),
)[None, :]
) for t in target
]
else:
targets = None
elif window_radius == 128000000:
chrlen1_round = chrlen1 - chrlen1 % 32000
wpos = 128000000
if target:
sequence, normmats, targets = _retrieve_multi(
[[chr1, 0, chrlen1_round, "+"], [padding_chr, 0, 256000000 - chrlen1_round, "+"]],
genome,
target=target,
)
else:
sequence, normmats = _retrieve_multi(
[[chr1, 0, chrlen1_round, "+"], [padding_chr, 0, 256000000 - chrlen1_round, "+"]],
genome,
target=target,
)
targets = None
else:
raise ValueError(
"Only window_radius 16000000 (32Mb models) or 128000000 (256Mb models) are supported"
)
anno_scaled = process_anno(
[[pos1, "single"]], base=wpos - window_radius, window_radius=window_radius
)
if window_radius == 128000000:
outputs_ref_1 = genomepredict_256Mb(
sequence,
chr1,
normmats,
chrlen1_round,
pos1,
wpos,
models=models,
annotation=anno_scaled,
padding_chr=padding_chr,
targets=targets,
use_cuda=use_cuda,
)
else:
outputs_ref_1 = genomepredict(
sequence, chr1, pos1, wpos, models=models, annotation=anno_scaled, targets=targets, use_cuda=use_cuda,
)
if file is not None:
if window_radius == 128000000:
genomeplot_256Mb(
outputs_ref_1, show_coordinates=True, file=file + ".ref.1.256m.pdf",
)
else:
genomeplot(
outputs_ref_1,
show_genes=show_genes,
show_tracks=show_tracks,
show_coordinates=True,
file=file + ".ref.1.pdf",
colorbar=True,
)
chrlen2 = [l for c, l in genome.get_chr_lens() if c == chr2].pop()
if window_radius == 16000000:
wpos = coord_clip(pos2, chrlen2)
sequence = genome.get_encoding_from_coords(
chr2, wpos - window_radius, wpos + window_radius
)[None, :]
if target:
targets = [
torch.FloatTensor(
t.get_feature_data(
chr2, coord_round(wpos - window_radius), coord_round(wpos + window_radius),
)[None, :]
) for t in target
]
else:
targets = None
elif window_radius == 128000000:
chrlen2_round = chrlen2 - chrlen2 % 32000
wpos = 128000000
if target:
sequence, normmats, targets = _retrieve_multi(
[[chr2, 0, chrlen2_round, "+"], [padding_chr, 0, 256000000 - chrlen2_round, "+"]],
genome,
target=target,
)
else:
sequence, normmats = _retrieve_multi(
[[chr2, 0, chrlen2_round, "+"], [padding_chr, 0, 256000000 - chrlen2_round, "+"]],
genome,
target=target,
)
targets = None
anno_scaled = process_anno(
[[pos2, "single"]], base=wpos - window_radius, window_radius=window_radius
)
if window_radius == 128000000:
outputs_ref_2 = genomepredict_256Mb(
sequence,
chr2,
normmats,
chrlen2_round,
pos2,
wpos,
models=models,
annotation=anno_scaled,
padding_chr=padding_chr,
targets=targets,
use_cuda=use_cuda,
)
else:
outputs_ref_2 = genomepredict(
sequence, chr2, pos2, wpos, models=models, annotation=anno_scaled, targets=targets, use_cuda=use_cuda,
)
if file is not None:
if window_radius == 128000000:
genomeplot_256Mb(
outputs_ref_2, show_coordinates=True, file=file + ".ref.2.256m.pdf",
)
else:
genomeplot(
outputs_ref_2,
show_genes=show_genes,
show_tracks=show_tracks,
show_coordinates=True,
file=file + ".ref.2.pdf",
colorbar=True,
)
chrlen = [l for c, l in genome.get_chr_lens() if c == chr1].pop()
s = StructuralChange2(chr1, chrlen)
if orientation1 == "+":
s.delete(pos1, chrlen)
else:
s.delete(0, pos1 - 1)
s.invert(0, chrlen - pos1 + 1)
chrlen = [l for c, l in genome.get_chr_lens() if c == chr2].pop()
s2 = StructuralChange2(chr2, chrlen)
if orientation2 == "-":
s2.delete(0, pos2 - 1)
else:
s2.delete(pos2, chrlen)
s2.invert(0, pos2)
breakpos = s.coord_points[-1]
s = s + s2
if window_radius == 16000000:
wpos = coord_clip(breakpos, s.coord_points[-1])
sequence = []
curpos = 0
anno = []
for chrm, start, end, strand in s[wpos - window_radius : wpos + window_radius]:
seq = genome.get_encoding_from_coords(chrm, start, end)
if strand == "-":
seq = seq[None, ::-1, ::-1]
else:
seq = seq[None, :, :]
sequence.append(seq)
anno.append([curpos, curpos + end - start])
curpos = curpos + end - start
sequence = np.concatenate(sequence, axis=1)
else:
chrlen_alt_round = s.coord_points[-1] - s.coord_points[-1] % 32000
if chrlen_alt_round < 256000000:
wpos = 128000000
(sequence, normmats) = _retrieve_multi(
list(s[0:chrlen_alt_round]) + [[padding_chr, 0, 256000000 - chrlen_alt_round, "+"]],
genome,
target=False,
normmat=True,
normmat_regionlist=[
[chr1 + "|" + chr2, 0, chrlen_alt_round, "+"],
[padding_chr, 0, 256000000 - chrlen_alt_round, "+"],
],
)
curpos = 0
anno = []
for chrm, start, end, strand in s[0:chrlen_alt_round]:
anno.append([curpos, curpos + end - start])
curpos = curpos + end - start
else:
wpos = coord_clip(breakpos, chrlen_alt_round, window_radius=128000000)
(sequence, normmats) = _retrieve_multi(
list(s[wpos - window_radius : wpos + window_radius]),
genome,
target=False,
normmat=True,
normmat_regionlist=[
[chr1 + "|" + chr2, wpos - window_radius, wpos + window_radius, "+"]
],
)
curpos = 0
anno = []
for chrm, start, end, strand in s[wpos - window_radius : wpos + window_radius]:
anno.append([curpos, curpos + end - start])
curpos = curpos + end - start
anno_scaled = process_anno([[anno[0][-1], "double"]], base=0, window_radius=window_radius)
if window_radius == 16000000:
outputs_alt = genomepredict(
sequence, chr1 + "|" + chr2, breakpos, wpos, models=models, annotation=anno_scaled, use_cuda=use_cuda
)
if file is not None:
genomeplot(outputs_alt, show_coordinates=False, file=file + ".alt.pdf", colorbar=True)
else:
outputs_alt = genomepredict_256Mb(
sequence,
chr1 + "|" + chr2,
normmats,
chrlen_alt_round,
breakpos,
wpos,
models=models,
annotation=anno_scaled,
padding_chr=padding_chr,
use_cuda=use_cuda,
)
if file is not None:
genomeplot_256Mb(
outputs_alt, show_coordinates=True, file=file + ".alt.256m.pdf",
)
return outputs_ref_1, outputs_ref_2, outputs_alt
if __name__ == "__main__":
from docopt import docopt
import sys
import os
doc = """
Orca multiscale genome interaction sequence model prediction tool.
Usage:
orca_predict region [options] <coordinate> <output_dir>
orca_predict del [options] <coordinate> <output_dir>
orca_predict dup [options] <coordinate> <output_dir>
orca_predict inv [options] <coordinate> <output_dir>
orca_predict break [options] <coordinate> <output_dir>
Options:
-h --help Show this screen.
--show_genes Show gene annotation (only supported for 32Mb models).
--show_tracks Show chromatin tracks (only supported for 32Mb models).
--256m Use 256Mb models (default is 32Mb).
--nocuda Use CPU implementation.
--version Show version.
"""
if len(sys.argv) == 1:
sys.argv.append("-h")
arguments = docopt(doc, version="Orca v0.1")
show_genes = arguments["--show_genes"]
show_tracks = arguments["--show_tracks"]
window_radius = 128000000 if arguments["--256m"] else 16000000
use_cuda = not arguments["--nocuda"]
load_resources(models=["32M"], use_cuda=use_cuda)
if arguments["region"]:
predtype = "region"
elif arguments["del"]:
predtype = "del"
elif arguments["dup"]:
predtype = "dup"
elif arguments["inv"]:
predtype = "inv"
elif arguments["break"]:
predtype = "break"
def predict(chrm, start, end, savedir):
if not os.path.exists(savedir):
os.makedirs(savedir)
with torch.no_grad():
outputs = process_region(
chrm,
start,
end,
hg38,
target=target_available,
file=savedir + "/orca_pred",
show_genes=show_genes,
show_tracks=show_tracks,
window_radius=window_radius,
padding_chr="chr1",
use_cuda=use_cuda,
)
torch.save(outputs, savedir + "/orca_pred.pth")
return None
def get_interactions(predtype, content, savedir):
if predtype == "region":
pdf_names = ["orca_pred.pdf"]
if show_genes or show_tracks:
pdf_names += ["orca_pred.anno.pdf"]
chrstr, coordstr = str(content).split(":")
chrstr = "chr" + chrstr.replace("chr", "")
coord_s, coord_e = coordstr.split("-")
predict(chrstr, int(coord_s), int(coord_e), savedir)
elif predtype in ["dup", "del"]:
pdf_names = ["orca_pred.ref.l.pdf", "orca_pred.ref.r.pdf", "orca_pred.alt.pdf"]
if show_genes or show_tracks:
pdf_names += [
"orca_pred.ref.l.anno.pdf",
"orca_pred.ref.r.anno.pdf",
"orca_pred.alt.anno.pdf",
]
chrstr, coordstr = str(content).split(":")
chrstr = "chr" + chrstr.replace("chr", "")
coord_s, coord_e = coordstr.split("-")
if not os.path.exists(savedir):
os.makedirs(savedir)
if predtype == "dup":
outputs_ref_l, outputs_ref_r, outputs_alt = process_dup(
chrstr,
int(coord_s),
int(coord_e),
hg38,
target=target_available,
show_genes=show_genes,
show_tracks=show_tracks,
file=savedir + "/orca_pred",
window_radius=window_radius,
use_cuda=use_cuda,
)
else:
outputs_ref_l, outputs_ref_r, outputs_alt = process_del(
chrstr,
int(coord_s),
int(coord_e),
hg38,
target=target_available,
show_genes=show_genes,
show_tracks=show_tracks,
file=savedir + "/orca_pred",
window_radius=window_radius,
use_cuda=use_cuda,
)
torch.save(
{
"outputs_ref_l": outputs_ref_l,
"outputs_ref_r": outputs_ref_r,
"outputs_alt": outputs_alt,
},
savedir + "/orca_pred.pth",
)
elif predtype == "inv":
pdf_names = [
"orca_pred.ref.l.pdf",
"orca_pred.ref.r.pdf",
"orca_pred.alt.l.pdf",
"orca_pred.alt.r.pdf",
]
if show_genes or show_tracks:
pdf_names += [
"orca_pred.ref.l.anno.pdf",
"orca_pred.ref.r.anno.pdf",
"orca_pred.alt.l.anno.pdf",
"orca_pred.alt.r.anno.pdf",
]
chrstr, coordstr = str(content).split(":")
chrstr = "chr" + chrstr.replace("chr", "")
coord_s, coord_e = coordstr.split("-")
if not os.path.exists(savedir):
os.makedirs(savedir)
outputs_ref_l, outputs_ref_r, outputs_alt_l, outputs_alt_r = process_inv(
chrstr,
int(coord_s),
int(coord_e),
hg38,
target=target_available,
show_genes=show_genes,
show_tracks=show_tracks,
file=savedir + "/orca_pred",
window_radius=window_radius,
use_cuda=use_cuda,
)
torch.save(
{
"outputs_ref_l": outputs_ref_l,
"outputs_ref_r": outputs_ref_r,
"outputs_alt_l": outputs_alt_l,
"outputs_alt_r": outputs_alt_r,
},
savedir + "/orca_pred.pth",
)
elif predtype == "break":
pdf_names = ["orca_pred.ref.1.pdf", "orca_pred.ref.2.pdf", "orca_pred.alt.pdf"]
if show_genes or show_tracks:
pdf_names += [
"orca_pred.ref.1.anno.pdf",
"orca_pred.ref.2.anno.pdf",
"orca_pred.alt.anno.pdf",
]
chr_coord_1, chr_coord_2, orientations = str(content.replace("\t", " ")).split(" ")
chr1, coord1 = chr_coord_1.split(":")
chr2, coord2 = chr_coord_2.split(":")
chr1 = "chr" + chr1.replace("chr", "")
chr2 = "chr" + chr2.replace("chr", "")
orientation1, orientation2 = orientations.split("/")
if not os.path.exists(savedir):
os.makedirs(savedir)
outputs_ref_1, outputs_ref_2, outputs_alt = process_single_breakpoint(
chr1,
int(coord1),
chr2,
int(coord2),
orientation1,
orientation2,
hg38,
target=target_available,
show_genes=show_genes,
show_tracks=show_tracks,
file=savedir + "/orca_pred",
window_radius=window_radius,
use_cuda=use_cuda,
)
torch.save(
{
"outputs_ref_1": outputs_ref_1,
"outputs_ref_2": outputs_ref_2,
"outputs_alt": outputs_alt,
},
savedir + "/orca_pred.pth",
)
else:
raise ValueError("Unexpected prediction type!")
return None
get_interactions(predtype, arguments["<coordinate>"], arguments["<output_dir>"])
| 37.984858
| 127
| 0.537529
| 12,769
| 120,412
| 4.909703
| 0.048477
| 0.062783
| 0.034454
| 0.025266
| 0.83773
| 0.817042
| 0.793594
| 0.779063
| 0.757146
| 0.746858
| 0
| 0.051659
| 0.37865
| 120,412
| 3,169
| 128
| 37.996844
| 0.786268
| 0.243506
| 0
| 0.708457
| 0
| 0
| 0.05444
| 0.009947
| 0
| 0
| 0
| 0.000316
| 0.00085
| 1
| 0.0068
| false
| 0
| 0.005525
| 0
| 0.018275
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1d6bd75869d508a91649421475bef40f335b890f
| 9,193
|
py
|
Python
|
app_simulator/tests/test.py
|
nicetester/newmonkey_tab
|
e23e310c93163eb98a8573248534eceea0fb1141
|
[
"MIT"
] | null | null | null |
app_simulator/tests/test.py
|
nicetester/newmonkey_tab
|
e23e310c93163eb98a8573248534eceea0fb1141
|
[
"MIT"
] | null | null | null |
app_simulator/tests/test.py
|
nicetester/newmonkey_tab
|
e23e310c93163eb98a8573248534eceea0fb1141
|
[
"MIT"
] | 2
|
2018-02-28T05:04:41.000Z
|
2020-12-17T12:18:55.000Z
|
coverd = """PhoneWindow$DecorView0 TopGestureLayout0 LinearLayout0 FrameLayout0 RelativeLayout0 AccountDetailXListView2 LinearLayout1 LinearLayout2 LinearLayout2 LinearLayout0 TextView0
2016-09-19 23:53:07
PhoneWindow$DecorView0 FrameLayout0 FrameLayout0 RelativeLayout0 LinearLayout2 TextView0
2016-09-19 23:53:06
PhoneWindow$DecorView0 TopGestureLayout0 LinearLayout0 FrameLayout0 RelativeLayout0 AccountDetailXListView2 LinearLayout1 LinearLayout2 LinearLayout0 LinearLayout0 TextView0
2016-09-19 23:53:04
PhoneWindow$DecorView0 TopGestureLayout0 LinearLayout0 FrameLayout0 RelativeLayout0 RelativeLayout1 RelativeLayout3 TextView0
PhoneWindow$DecorView0 DragFrameLayout0 TopGestureLayout0 LinearLayout0 FrameLayout1 RelativeLayout0 RelativeLayout1 RelativeLayout0 SwipListView0 LinearLayout2 LinearLayout0 RelativeLayout0 ImageView0
2016-09-19 23:53:02
PhoneWindow$DecorView0 LinearLayout0 FrameLayout0 NoSaveStateFrameLayout0 QQTabHost0 DrawerFrame0 DragFrameLayout0 FrameLayout0 RelativeLayout0 RelativeLayout1 HongBaoListView1 LinearLayout1 RelativeLayout0 RelativeLayout1 SingleLineTextView0
2016-09-19 23:53:00
PhoneWindow$DecorView0 LinearLayout0 FrameLayout0 NoSaveStateFrameLayout0 QQTabHost0 DrawerFrame0 DragFrameLayout0 FrameLayout0 RelativeLayout0 RelativeLayout1 HongBaoListView1 LinearLayout2 SimpleTextView1
2016-09-19 23:52:59
PhoneWindow$DecorView0 LinearLayout0 FrameLayout0 NoSaveStateFrameLayout0 QQTabHost0 DrawerFrame0 DragFrameLayout0 FrameLayout0 RelativeLayout0 RelativeLayout1 FrameLayout0
2016-09-19 23:52:57
PhoneWindow$DecorView0 LinearLayout0 FrameLayout0 NoSaveStateFrameLayout0 QQTabHost0 DrawerFrame0 DragFrameLayout0 QQTabWidget1 RelativeLayout0 ImageView1
2016-09-19 23:52:56
PhoneWindow$DecorView0 TopGestureLayout0 LinearLayout0 FrameLayout0 LinearLayout0 RelativeLayout0 TextView1
2016-09-19 23:52:55
2016-09-19 23:52:54
PhoneWindow$DecorView0 View1
2016-09-19 23:52:51
PhoneWindow$DecorView0 TopGestureLayout0 LinearLayout0 FrameLayout0 LinearLayout0 FrameLayout1 SystemMsgListView0 FrameLayout0 LinearLayout0 TextView1
2016-09-19 23:52:50
PhoneWindow$DecorView0 LinearLayout0 FrameLayout0 NoSaveStateFrameLayout0 QQTabHost0 DrawerFrame0 DragFrameLayout0 FrameLayout0 LinearLayout0 FPSPinnedHeaderExpandableListView1 LinearLayout1 LinearLayout1 RelativeLayout0 ImageView0
2016-09-19 23:52:49
PhoneWindow$DecorView0 LinearLayout0 FrameLayout0 NoSaveStateFrameLayout0 QQTabHost0 DrawerFrame0 DragFrameLayout0 QQTabWidget1 RedTouch1 RelativeLayout0 ImageView0
2016-09-19 23:52:48
PressBack
2016-09-19 23:52:46
2016-09-19 23:52:45
2016-09-19 23:52:43
2016-09-19 23:52:42
PhoneWindow$DecorView0 LinearLayout0 FrameLayout0 NoSaveStateFrameLayout0 QQTabHost0 DrawerFrame0 DragFrameLayout2 FrameLayout0 RelativeLayout0 RelativeLayout1 FrameLayout0
2016-09-19 23:52:41
PhoneWindow$DecorView0 LinearLayout0 FrameLayout0 NoSaveStateFrameLayout0 QQTabHost0 DrawerFrame0 DragFrameLayout2 FrameLayout0 RelativeLayout0 RelativeLayout1 HongBaoListView1 LinearLayout1 RelativeLayout0 RelativeLayout1 SingleLineTextView0
2016-09-19 23:52:39
PhoneWindow$DecorView0 LinearLayout0 FrameLayout0 NoSaveStateFrameLayout0 QQTabHost0 DrawerFrame0 DragFrameLayout0 RelativeLayout2 RedTouch0 ImageView0
2016-09-19 23:52:38
2016-09-19 23:53:03
2016-09-19 23:54:08
PhoneWindow$DecorView0 LinearLayout0 FrameLayout0 NoSaveStateFrameLayout0 TopGestureLayout0 RelativeLayout0 XPanelContainer1 RelativeLayout0 AIOAnimationConatiner2
2016-09-19 23:54:06
2016-09-19 23:53:56
2016-09-19 23:53:53
2016-09-19 23:53:52
PhoneWindow$DecorView0 LinearLayout0 FrameLayout1 RelativeLayout0 RelativeLayout0 TextView1
2016-09-19 23:53:51
PhoneWindow$DecorView0 LinearLayout0 FrameLayout1 RelativeLayout0 RelativeLayout1 FrameLayout0 RefreshView2 TouchWebView0 X5WebViewAdapter0
2016-09-19 23:53:49
2016-09-19 23:53:48
PhoneWindow$DecorView0 LinearLayout0 FrameLayout1 RelativeLayout0 RelativeLayout0 TextView0
2016-09-19 23:53:47
2016-09-19 23:53:46
2016-09-19 23:53:44
2016-09-19 23:53:43
PhoneWindow$DecorView0 LinearLayout0 FrameLayout1 RelativeLayout0 RelativeLayout1 WebViewProgressBar1
2016-09-19 23:53:42
2016-09-19 23:55:07
2016-09-19 23:55:06
2016-09-19 23:55:05
2016-09-19 23:55:02
2016-09-19 23:54:52
2016-09-19 23:54:49
2016-09-19 23:54:48
2016-09-19 23:54:47
PhoneWindow$DecorView0 LinearLayout0 FrameLayout1 RelativeLayout0 RelativeLayout1 FrameLayout0 LinearLayout1 TextView0
2016-09-19 23:54:45
2016-09-19 23:54:44
2016-09-19 23:55:57
2016-09-19 23:55:55
2016-09-19 23:55:53
2016-09-19 23:55:52
2016-09-19 23:55:51
2016-09-19 23:55:50"""
all = """PhoneWindow$DecorView0 FrameLayout0 FrameLayout0 RelativeLayout0 LinearLayout2 TextView0
2016-09-19 23:53:07
PhoneWindow$DecorView0 TopGestureLayout0 LinearLayout0 FrameLayout0 RelativeLayout0 AccountDetailXListView2 LinearLayout1 LinearLayout2 LinearLayout2 LinearLayout0 TextView0
PhoneWindow$DecorView0 DragFrameLayout0 TopGestureLayout0 LinearLayout0 FrameLayout1 RelativeLayout0 RelativeLayout1 RelativeLayout0 SwipListView0 LinearLayout2 LinearLayout0 RelativeLayout0 ImageView0
PhoneWindow$DecorView0 TopGestureLayout0 LinearLayout0 FrameLayout0 RelativeLayout0 AccountDetailXListView2 LinearLayout1 LinearLayout2 LinearLayout0 LinearLayout0 TextView0
2016-09-19 23:53:04
2016-09-19 23:53:06
2016-09-19 23:53:03
PhoneWindow$DecorView0 TopGestureLayout0 LinearLayout0 FrameLayout0 RelativeLayout0 RelativeLayout1 RelativeLayout3 TextView0
2016-09-19 23:53:02
PhoneWindow$DecorView0 LinearLayout0 FrameLayout0 NoSaveStateFrameLayout0 QQTabHost0 DrawerFrame0 DragFrameLayout0 FrameLayout0 RelativeLayout0 RelativeLayout1 HongBaoListView1 LinearLayout1 RelativeLayout0 RelativeLayout1 SingleLineTextView0
2016-09-19 23:52:57
2016-09-19 23:53:00
2016-09-19 23:52:59
PhoneWindow$DecorView0 LinearLayout0 FrameLayout0 NoSaveStateFrameLayout0 QQTabHost0 DrawerFrame0 DragFrameLayout0 FrameLayout0 RelativeLayout0 RelativeLayout1 FrameLayout0
PhoneWindow$DecorView0 LinearLayout0 FrameLayout0 NoSaveStateFrameLayout0 QQTabHost0 DrawerFrame0 DragFrameLayout0 FrameLayout0 RelativeLayout0 RelativeLayout1 HongBaoListView1 LinearLayout2 SimpleTextView1
2016-09-19 23:52:56
PhoneWindow$DecorView0 LinearLayout0 FrameLayout0 NoSaveStateFrameLayout0 QQTabHost0 DrawerFrame0 DragFrameLayout0 FrameLayout0 LinearLayout0 FPSPinnedHeaderExpandableListView1 LinearLayout1 LinearLayout1 RelativeLayout0 ImageView0
PhoneWindow$DecorView0 View1
2016-09-19 23:52:54
2016-09-19 23:52:51
PhoneWindow$DecorView0 LinearLayout0 FrameLayout0 NoSaveStateFrameLayout0 QQTabHost0 DrawerFrame0 DragFrameLayout0 QQTabWidget1 RelativeLayout0 ImageView1
PhoneWindow$DecorView0 TopGestureLayout0 LinearLayout0 FrameLayout0 LinearLayout0 RelativeLayout0 TextView1
PhoneWindow$DecorView0 TopGestureLayout0 LinearLayout0 FrameLayout0 LinearLayout0 FrameLayout1 SystemMsgListView0 FrameLayout0 LinearLayout0 TextView1
2016-09-19 23:52:55
2016-09-19 23:52:48
2016-09-19 23:52:49
PhoneWindow$DecorView0 LinearLayout0 FrameLayout0 NoSaveStateFrameLayout0 QQTabHost0 DrawerFrame0 DragFrameLayout0 QQTabWidget1 RedTouch1 RelativeLayout0 ImageView0
PressBack
2016-09-19 23:52:50
2016-09-19 23:52:46
2016-09-19 23:52:41
2016-09-19 23:52:42
PhoneWindow$DecorView0 LinearLayout0 FrameLayout0 NoSaveStateFrameLayout0 QQTabHost0 DrawerFrame0 DragFrameLayout2 FrameLayout0 RelativeLayout0 RelativeLayout1 FrameLayout0
PhoneWindow$DecorView0 LinearLayout0 FrameLayout0 NoSaveStateFrameLayout0 QQTabHost0 DrawerFrame0 DragFrameLayout0 RelativeLayout2 RedTouch0 ImageView0
2016-09-19 23:52:43
2016-09-19 23:52:45
2016-09-19 23:52:39
PhoneWindow$DecorView0 LinearLayout0 FrameLayout0 NoSaveStateFrameLayout0 QQTabHost0 DrawerFrame0 DragFrameLayout2 FrameLayout0 RelativeLayout0 RelativeLayout1 HongBaoListView1 LinearLayout1 RelativeLayout0 RelativeLayout1 SingleLineTextView0
2016-09-19 23:54:08
2016-09-19 23:53:49
PhoneWindow$DecorView0 LinearLayout0 FrameLayout1 RelativeLayout0 RelativeLayout1 FrameLayout0 RefreshView2 TouchWebView0 X5WebViewAdapter0
PhoneWindow$DecorView0 LinearLayout0 FrameLayout0 NoSaveStateFrameLayout0 TopGestureLayout0 RelativeLayout0 XPanelContainer1 RelativeLayout0 AIOAnimationConatiner2
PhoneWindow$DecorView0 LinearLayout0 FrameLayout1 RelativeLayout0 RelativeLayout0 TextView0
2016-09-19 23:53:52
2016-09-19 23:53:53
2016-09-19 23:53:51
2016-09-19 23:54:06
PhoneWindow$DecorView0 LinearLayout0 FrameLayout1 RelativeLayout0 RelativeLayout0 TextView1
2016-09-19 23:53:56
2016-09-19 23:53:47
2016-09-19 23:53:48
2016-09-19 23:53:44
PhoneWindow$DecorView0 LinearLayout0 FrameLayout1 RelativeLayout0 RelativeLayout1 WebViewProgressBar1
2016-09-19 23:53:42
2016-09-19 23:53:43
2016-09-19 23:53:46
2016-09-19 23:55:07
2016-09-19 23:54:47
2016-09-19 23:54:44
2016-09-19 23:54:45
PhoneWindow$DecorView0 LinearLayout0 FrameLayout1 RelativeLayout0 RelativeLayout1 FrameLayout0 LinearLayout1 TextView0
2016-09-19 23:54:49
2016-09-19 23:54:48
2016-09-19 23:54:52
2016-09-19 23:55:02
2016-09-19 23:55:05
2016-09-19 23:55:06
2016-09-19 23:55:51
2016-09-19 23:55:50
2016-09-19 23:55:57
2016-09-19 23:55:55
2016-09-19 23:55:52
2016-09-19 23:55:53"""
coverd = set(coverd.split('\n'))
all = set(all.split('\n'))
for c in coverd:
if c not in all:
print c
| 58.55414
| 242
| 0.873056
| 1,121
| 9,193
| 7.159679
| 0.073149
| 0.075505
| 0.100673
| 0.125841
| 0.990531
| 0.990531
| 0.957887
| 0.956392
| 0.919138
| 0.88612
| 0
| 0.218566
| 0.074296
| 9,193
| 157
| 243
| 58.55414
| 0.724559
| 0
| 0
| 0.909091
| 0
| 0
| 0.984555
| 0.182293
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.006494
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
1d85e34f290fb0efa91bc6e31229a4dbbe9764bc
| 4,503
|
py
|
Python
|
metabench/tests/test_objective.py
|
ComeBertrand/metabench
|
e5eaa32b94239b8fa475eda940b8086eec178cfe
|
[
"MIT"
] | null | null | null |
metabench/tests/test_objective.py
|
ComeBertrand/metabench
|
e5eaa32b94239b8fa475eda940b8086eec178cfe
|
[
"MIT"
] | 15
|
2018-03-07T21:47:56.000Z
|
2018-05-12T08:45:20.000Z
|
metabench/tests/test_objective.py
|
ComeBertrand/metabench
|
e5eaa32b94239b8fa475eda940b8086eec178cfe
|
[
"MIT"
] | null | null | null |
import pytest
from .fixtures import *
from ..common.fitness import *
def test_modifs_add_modif(modifs):
m = Modifs()
for index, val_bef, val_aft in modifs:
m.add_modif(index, val_bef, val_aft)
for i in range(len(modifs)):
assert m.get(modifs[i][0], None) == modifs[i][1:]
def test_modifs_add_double_modif(modifs_double):
modifs, expected = modifs_double
m = Modifs()
for index, val_bef, val_aft in modifs:
m.add_modif(index, val_bef, val_aft)
assert m.get(expected[0], None) == expected[1:]
def test_modifs_setitem(modifs):
m = Modifs()
with pytest.raises(NotImplementedError):
m[0] = ('a', 'b')
def test_objective_no_partial(fitness_func,
binary_solution,
modifs_as_modifs,
modifs_empty):
o = Objective(fitness_func)
binary_solution.fitness = None
assert o._compute_fitness_value(binary_solution, None) == VALUE_RETURNED_FIT
o(binary_solution)
assert binary_solution.fitness == VALUE_RETURNED_FIT
binary_solution.fitness = None
assert (o._compute_fitness_value(binary_solution, modifs_as_modifs) ==
VALUE_RETURNED_FIT)
o(binary_solution, modifs_as_modifs)
assert binary_solution.fitness == VALUE_RETURNED_FIT
binary_solution.fitness = None
assert (o._compute_fitness_value(binary_solution, modifs_empty) ==
VALUE_RETURNED_FIT)
o(binary_solution, modifs_empty)
assert binary_solution.fitness == VALUE_RETURNED_FIT
def test_objective_no_partial_fitness(fitness_func,
binary_solution,
modifs_as_modifs,
modifs_empty):
o = Objective(fitness_func)
binary_solution.fitness = VALUE_NOT_RETURNED
assert o._compute_fitness_value(binary_solution, None) == VALUE_RETURNED_FIT
o(binary_solution)
assert binary_solution.fitness == VALUE_NOT_RETURNED
binary_solution.fitness = VALUE_NOT_RETURNED
assert (o._compute_fitness_value(binary_solution, modifs_as_modifs) ==
VALUE_RETURNED_FIT)
o(binary_solution, modifs_as_modifs)
assert binary_solution.fitness == VALUE_RETURNED_FIT
binary_solution.fitness = VALUE_NOT_RETURNED
assert (o._compute_fitness_value(binary_solution, modifs_empty) ==
VALUE_RETURNED_FIT)
o(binary_solution, modifs_empty)
assert binary_solution.fitness == VALUE_RETURNED_FIT
def test_objective_partial(fitness_func,
fitness_partial_func,
binary_solution,
modifs_as_modifs,
modifs_empty):
o = Objective(fitness_func, fitness_partial_func)
binary_solution.fitness = None
assert o._compute_fitness_value(binary_solution, None) == VALUE_RETURNED_FIT
o(binary_solution)
assert binary_solution.fitness == VALUE_RETURNED_FIT
binary_solution.fitness = None
assert (o._compute_fitness_value(binary_solution, modifs_as_modifs) ==
VALUE_RETURNED_FIT)
o(binary_solution, modifs_as_modifs)
assert binary_solution.fitness == VALUE_RETURNED_FIT
binary_solution.fitness = None
assert (o._compute_fitness_value(binary_solution, modifs_empty) ==
VALUE_RETURNED_FIT)
o(binary_solution, modifs_empty)
assert binary_solution.fitness == VALUE_RETURNED_FIT
def test_objective_partial_fitness(fitness_func,
fitness_partial_func,
binary_solution,
modifs_as_modifs,
modifs_empty):
o = Objective(fitness_func, fitness_partial_func)
binary_solution.fitness = VALUE_NOT_RETURNED
assert o._compute_fitness_value(binary_solution, None) == VALUE_RETURNED_FIT
o(binary_solution)
assert binary_solution.fitness == VALUE_NOT_RETURNED
binary_solution.fitness = VALUE_NOT_RETURNED
assert (o._compute_fitness_value(binary_solution, modifs_as_modifs) ==
VALUE_RETURNED_FIT_PART)
o(binary_solution, modifs)
assert binary_solution.fitness == VALUE_RETURNED_FIT_PART
binary_solution.fitness = VALUE_NOT_RETURNED
assert (o._compute_fitness_value(binary_solution, modifs_empty) ==
VALUE_RETURNED_FIT)
o(binary_solution, modifs_empty)
assert binary_solution.fitness == VALUE_RETURNED_FIT
| 35.456693
| 80
| 0.6831
| 530
| 4,503
| 5.364151
| 0.092453
| 0.256068
| 0.177278
| 0.164615
| 0.886739
| 0.886739
| 0.872318
| 0.872318
| 0.855083
| 0.855083
| 0
| 0.001478
| 0.248723
| 4,503
| 126
| 81
| 35.738095
| 0.8389
| 0
| 0
| 0.804124
| 0
| 0
| 0.000444
| 0
| 0
| 0
| 0
| 0
| 0.268041
| 1
| 0.072165
| false
| 0
| 0.030928
| 0
| 0.103093
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
1d9f8e98ddffd3f1f5c08b4a4dd8899f5b300838
| 10,589
|
py
|
Python
|
Server_Python/Movefunctions.py
|
mehrdadzakershahrak/Online-Explanation-Generation
|
e41ad9b5a390abdaf271562a56105c191e33b74d
|
[
"MIT"
] | null | null | null |
Server_Python/Movefunctions.py
|
mehrdadzakershahrak/Online-Explanation-Generation
|
e41ad9b5a390abdaf271562a56105c191e33b74d
|
[
"MIT"
] | null | null | null |
Server_Python/Movefunctions.py
|
mehrdadzakershahrak/Online-Explanation-Generation
|
e41ad9b5a390abdaf271562a56105c191e33b74d
|
[
"MIT"
] | null | null | null |
import math
import almath
import almath as m # python's wrapping of almath
import sys
from naoqi import ALProxy
import naoqi
import time
import motion
##################################################################################################################
function moveforward
##################################################################################################################
# -*- encoding: UTF-8 -*-
'''Move To: Small example to make Nao Move To an Objective'''
def StiffnessOn(proxy):
# We use the "Body" name to signify the collection of all joints
pNames = "Body"
pStiffnessLists = 1.0
pTimeLists = 1.0
proxy.stiffnessInterpolation(pNames, pStiffnessLists, pTimeLists)
def main(robotIP):
# Init proxies.
try:
motionProxy = ALProxy("ALMotion", robotIP, 9559)
except Exception, e:
print "Could not create proxy to ALMotion"
print "Error was: ", e
try:
postureProxy = ALProxy("ALRobotPosture", robotIP, 9559)
except Exception, e:
print "Could not create proxy to ALRobotPosture"
print "Error was: ", e
# Set NAO in Stiffness On
StiffnessOn(motionProxy)
# Send NAO to Pose Init
postureProxy.goToPosture("StandInit", 0.5)
#####################
## Enable arms control by Walk algorithm
#####################
motionProxy.setWalkArmsEnabled(True, True)
#~ motionProxy.setWalkArmsEnabled(False, False)
#####################
## FOOT CONTACT PROTECTION
#####################
#~ motionProxy.setMotionConfig([["ENABLE_FOOT_CONTACT_PROTECTION", False]])
motionProxy.setMotionConfig([["ENABLE_FOOT_CONTACT_PROTECTION", True]])
#TARGET VELOCITY
X = 0.5
Y = 0.0
Theta = 0.0
motionProxy.post.moveTo(X, Y, Theta)
motionProxy.waitUntilMoveIsFinished()
#####################
## Arms User Motion
#####################
# Arms motion from user have always the priority than walk arms motion
JointNames = ["LShoulderPitch", "LShoulderRoll", "LElbowYaw", "LElbowRoll"]
Arm1 = [-40, 25, 0, -40]
Arm1 = [ x * motion.TO_RAD for x in Arm1]
Arm2 = [-40, 50, 0, -80]
Arm2 = [ x * motion.TO_RAD for x in Arm2]
pFractionMaxSpeed = 0.6
motionProxy.angleInterpolationWithSpeed(JointNames, Arm1, pFractionMaxSpeed)
motionProxy.angleInterpolationWithSpeed(JointNames, Arm2, pFractionMaxSpeed)
motionProxy.angleInterpolationWithSpeed(JointNames, Arm1, pFractionMaxSpeed)
time.sleep(2.0)
#####################
## End Walk
#####################
#TARGET VELOCITY
X = 0.0
Y = 0.0
Theta = 0.0
motionProxy.setWalkTargetVelocity(X, Y, Theta, Frequency)
motionProxy.stopMove()
if __name__ == "__main__":
robotIp = "127.0.0.1"
if len(sys.argv) <= 1:
print "Usage python motion_moveTo.py robotIP (optional default: 127.0.0.1)"
else:
robotIp = sys.argv[1]
main(robotIp)
##################################################################################################################
function turnleft
##################################################################################################################
def StiffnessOn(proxy):
# We use the "Body" name to signify the collection of all joints
pNames = "Body"
pStiffnessLists = 1.0
pTimeLists = 1.0
proxy.stiffnessInterpolation(pNames, pStiffnessLists, pTimeLists)
def main(robotIP):
# Init proxies.
try:
motionProxy = ALProxy("ALMotion", robotIP, 9559)
except Exception, e:
print "Could not create proxy to ALMotion"
print "Error was: ", e
try:
postureProxy = ALProxy("ALRobotPosture", robotIP, 9559)
except Exception, e:
print "Could not create proxy to ALRobotPosture"
print "Error was: ", e
# Set NAO in Stiffness On
StiffnessOn(motionProxy)
# Send NAO to Pose Init
postureProxy.goToPosture("StandInit", 0.5)
#####################
## Enable arms control by Walk algorithm
#####################
motionProxy.setWalkArmsEnabled(True, True)
#~ motionProxy.setWalkArmsEnabled(False, False)
#####################
## FOOT CONTACT PROTECTION
#####################
#~ motionProxy.setMotionConfig([["ENABLE_FOOT_CONTACT_PROTECTION", False]])
motionProxy.setMotionConfig([["ENABLE_FOOT_CONTACT_PROTECTION", True]])
#TARGET VELOCITY
X = 0.0
Y = 0.0
Theta = math.pi/2-.15
motionProxy.post.moveTo(X, Y, Theta)
motionProxy.waitUntilMoveIsFinished()
#####################
## Arms User Motion
#####################
# Arms motion from user have always the priority than walk arms motion
JointNames = ["LShoulderPitch", "LShoulderRoll", "LElbowYaw", "LElbowRoll"]
Arm1 = [-40, 25, 0, -40]
Arm1 = [ x * motion.TO_RAD for x in Arm1]
Arm2 = [-40, 50, 0, -80]
Arm2 = [ x * motion.TO_RAD for x in Arm2]
pFractionMaxSpeed = 0.6
motionProxy.angleInterpolationWithSpeed(JointNames, Arm1, pFractionMaxSpeed)
motionProxy.angleInterpolationWithSpeed(JointNames, Arm2, pFractionMaxSpeed)
motionProxy.angleInterpolationWithSpeed(JointNames, Arm1, pFractionMaxSpeed)
time.sleep(2.0)
#####################
## End Walk
#####################
#TARGET VELOCITY
X = 0.0
Y = 0.0
Theta = 0.0
motionProxy.setWalkTargetVelocity(X, Y, Theta, Frequency)
motionProxy.stopMove()
if __name__ == "__main__":
robotIp = "127.0.0.1"
if len(sys.argv) <= 1:
print "Usage python motion_moveTo.py robotIP (optional default: 127.0.0.1)"
else:
robotIp = sys.argv[1]
main(robotIp)
##################################################################################################################
function turnright
##################################################################################################################
def StiffnessOn(proxy):
# We use the "Body" name to signify the collection of all joints
pNames = "Body"
pStiffnessLists = 1.0
pTimeLists = 1.0
proxy.stiffnessInterpolation(pNames, pStiffnessLists, pTimeLists)
def main(robotIP):
# Init proxies.
try:
motionProxy = ALProxy("ALMotion", robotIP, 9559)
except Exception, e:
print "Could not create proxy to ALMotion"
print "Error was: ", e
try:
postureProxy = ALProxy("ALRobotPosture", robotIP, 9559)
except Exception, e:
print "Could not create proxy to ALRobotPosture"
print "Error was: ", e
# Set NAO in Stiffness On
StiffnessOn(motionProxy)
# Send NAO to Pose Init
postureProxy.goToPosture("StandInit", 0.5)
#####################
## Enable arms control by Walk algorithm
#####################
motionProxy.setWalkArmsEnabled(True, True)
#~ motionProxy.setWalkArmsEnabled(False, False)
#####################
## FOOT CONTACT PROTECTION
#####################
#~ motionProxy.setMotionConfig([["ENABLE_FOOT_CONTACT_PROTECTION", False]])
motionProxy.setMotionConfig([["ENABLE_FOOT_CONTACT_PROTECTION", True]])
#TARGET VELOCITY
X = 0.0
Y = 0.0
Theta = -math.pi/2+.15
motionProxy.post.moveTo(X, Y, Theta)
motionProxy.waitUntilMoveIsFinished()
#####################
## Arms User Motion
#####################
# Arms motion from user have always the priority than walk arms motion
JointNames = ["LShoulderPitch", "LShoulderRoll", "LElbowYaw", "LElbowRoll"]
Arm1 = [-40, 25, 0, -40]
Arm1 = [ x * motion.TO_RAD for x in Arm1]
Arm2 = [-40, 50, 0, -80]
Arm2 = [ x * motion.TO_RAD for x in Arm2]
pFractionMaxSpeed = 0.6
motionProxy.angleInterpolationWithSpeed(JointNames, Arm1, pFractionMaxSpeed)
motionProxy.angleInterpolationWithSpeed(JointNames, Arm2, pFractionMaxSpeed)
motionProxy.angleInterpolationWithSpeed(JointNames, Arm1, pFractionMaxSpeed)
time.sleep(2.0)
#####################
## End Walk
#####################
#TARGET VELOCITY
X = 0.0
Y = 0.0
Theta = 0.0
motionProxy.setWalkTargetVelocity(X, Y, Theta, Frequency)
motionProxy.stopMove()
if __name__ == "__main__":
robotIp = "127.0.0.1"
if len(sys.argv) <= 1:
print "Usage python motion_moveTo.py robotIP (optional default: 127.0.0.1)"
else:
robotIp = sys.argv[1]
main(robotIp)
##################################################################################################################
function turnaround
##################################################################################################################
def StiffnessOn(proxy):
# We use the "Body" name to signify the collection of all joints
pNames = "Body"
pStiffnessLists = 1.0
pTimeLists = 1.0
proxy.stiffnessInterpolation(pNames, pStiffnessLists, pTimeLists)
def main(robotIP):
# Init proxies.
try:
motionProxy = ALProxy("ALMotion", robotIP, 9559)
except Exception, e:
print "Could not create proxy to ALMotion"
print "Error was: ", e
try:
postureProxy = ALProxy("ALRobotPosture", robotIP, 9559)
except Exception, e:
print "Could not create proxy to ALRobotPosture"
print "Error was: ", e
# Set NAO in Stiffness On
StiffnessOn(motionProxy)
# Send NAO to Pose Init
postureProxy.goToPosture("StandInit", 0.5)
#####################
## Enable arms control by Walk algorithm
#####################
motionProxy.setWalkArmsEnabled(True, True)
#~ motionProxy.setWalkArmsEnabled(False, False)
#####################
## FOOT CONTACT PROTECTION
#####################
#~ motionProxy.setMotionConfig([["ENABLE_FOOT_CONTACT_PROTECTION", False]])
motionProxy.setMotionConfig([["ENABLE_FOOT_CONTACT_PROTECTION", True]])
#TARGET VELOCITY
X = 0.0
Y = 0.0
Theta = -math.pi+.15
motionProxy.post.moveTo(X, Y, Theta)
motionProxy.waitUntilMoveIsFinished()
#####################
## Arms User Motion
#####################
# Arms motion from user have always the priority than walk arms motion
JointNames = ["LShoulderPitch", "LShoulderRoll", "LElbowYaw", "LElbowRoll"]
Arm1 = [-40, 25, 0, -40]
Arm1 = [ x * motion.TO_RAD for x in Arm1]
Arm2 = [-40, 50, 0, -80]
Arm2 = [ x * motion.TO_RAD for x in Arm2]
pFractionMaxSpeed = 0.6
motionProxy.angleInterpolationWithSpeed(JointNames, Arm1, pFractionMaxSpeed)
motionProxy.angleInterpolationWithSpeed(JointNames, Arm2, pFractionMaxSpeed)
motionProxy.angleInterpolationWithSpeed(JointNames, Arm1, pFractionMaxSpeed)
time.sleep(2.0)
#####################
## End Walk
#####################
#TARGET VELOCITY
X = 0.0
Y = 0.0
Theta = 0.0
motionProxy.setWalkTargetVelocity(X, Y, Theta, Frequency)
motionProxy.stopMove()
if __name__ == "__main__":
robotIp = "127.0.0.1"
if len(sys.argv) <= 1:
print "Usage python motion_moveTo.py robotIP (optional default: 127.0.0.1)"
else:
robotIp = sys.argv[1]
main(robotIp)
| 27.361757
| 114
| 0.598168
| 1,108
| 10,589
| 5.655235
| 0.122744
| 0.008937
| 0.040217
| 0.033195
| 0.963773
| 0.963773
| 0.963773
| 0.962177
| 0.962177
| 0.962177
| 0
| 0.030525
| 0.161583
| 10,589
| 387
| 115
| 27.361757
| 0.675265
| 0
| 0
| 0.92
| 0
| 0
| 0.163897
| 0.016897
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.04
| null | null | 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
d56c3b3a9e798efc35a9919280b73426f17ac158
| 47
|
py
|
Python
|
app/db/schemas/user/__init__.py
|
ardihikaru/login-boilerplate
|
e10d077de0aefa9cb7a4633f915304e805c3f982
|
[
"MIT"
] | null | null | null |
app/db/schemas/user/__init__.py
|
ardihikaru/login-boilerplate
|
e10d077de0aefa9cb7a4633f915304e805c3f982
|
[
"MIT"
] | null | null | null |
app/db/schemas/user/__init__.py
|
ardihikaru/login-boilerplate
|
e10d077de0aefa9cb7a4633f915304e805c3f982
|
[
"MIT"
] | null | null | null |
from .user import *
from .user_signup import *
| 15.666667
| 26
| 0.744681
| 7
| 47
| 4.857143
| 0.571429
| 0.470588
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.170213
| 47
| 2
| 27
| 23.5
| 0.871795
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
d5746822006e3ee19344646ea20ae1cfb85f9a43
| 19,432
|
py
|
Python
|
src/librekpi/rest_controller.py
|
LibreKPI/librekpi
|
07bfbf18ff9f99a4b3347060b25699cb09f6f6b6
|
[
"MIT"
] | null | null | null |
src/librekpi/rest_controller.py
|
LibreKPI/librekpi
|
07bfbf18ff9f99a4b3347060b25699cb09f6f6b6
|
[
"MIT"
] | 10
|
2015-01-12T20:49:21.000Z
|
2015-03-12T17:20:18.000Z
|
src/librekpi/rest_controller.py
|
LibreKPI/librekpi
|
07bfbf18ff9f99a4b3347060b25699cb09f6f6b6
|
[
"MIT"
] | 1
|
2015-01-11T23:54:09.000Z
|
2015-01-11T23:54:09.000Z
|
"""REST Controller
Here you'll find handlers for RESTful API, which is meant to be stateless
"""
import tornado
from tornado.escape import json_decode
from librekpi.utils import routes
from librekpi.view import BaseRESTController
from librekpi.api import *
import requests
@routes('/api/kpi_schedule/', name="kpi_api")
class KPIApiHandler(BaseRESTController):
"""Register"""
@tornado.web.asynchronous
def post(self):
group = json_decode(self.request.body).get('group')
self.write(requests
.get('http://api.rozklad.org.ua/v1/groups/{}/lessons'
.format(group)).text)
self.finish()
@routes("/api/", name="api")
class ApiHandler(BaseRESTController):
"""API Handler"""
pass
@routes('/api/user/', name="user_api")
class UserApiHandler(BaseRESTController):
"""Register"""
@tornado.web.asynchronous
#@tornado.gen.coroutine
def post(self):
kwargs = json_decode(self.request.body)
self._return({'result': 'success'})
#create_user(self._return, **kwargs)
@routes('/api/auth/', name="auth_api")
class AuthApiHandler(BaseRESTController):
@tornado.web.asynchronous
def post(self):
"""Login"""
kwargs = json_decode(self.request.body)
#authenticate_user(self._return, **kwargs)
self._return({'result': 'success'}) # or error
@tornado.web.asynchronous
def delete(self):
"""Logout"""
#logout_user(**kwargs)
self._return({'result': 'success'}) # or error
@routes('/api/university/', name="univ_api")
class UniversityApiHandler(BaseRESTController):
@tornado.web.asynchronous
def post(self):
"""Create"""
#create_university(**kwargs)
self._return({'id': 1}) # or some other int, otherwise - error
@tornado.web.asynchronous
def get(self):
"""Autocomplete"""
#get_universities(**kwargs)
self._return([{'id': 1, 'name': 'National University of Ukraine \'Kyiv Polytechnic Institute\''}, {'id': 3, 'name': 'National University of Georgia'}]) # or error
@routes('/api/group/', name="group_api")
class GroupApiHandler(BaseRESTController):
@tornado.web.asynchronous
def post(self):
"""Create"""
#create_group(**kwargs)
self._return({'id': 1}) # or some other int, otherwise - error
@tornado.web.asynchronous
def get(self):
"""Autocomplete"""
#get_groups(**kwargs)
self._return([{'id': 1, 'name': 'IO-31m'}, {'id': 3, 'name': 'IK-32s'}]) # or error
@routes('/api/class/', name="class_api")
class ClassApiHandler(BaseRESTController):
@tornado.web.asynchronous
def post(self):
"""Create"""
#create_class(**kwargs)
self._return({'id': 1}) # or some other int, otherwise - error
@routes('/api/timetable/', name="sched_api")
class TimetableApiHandler(BaseRESTController):
@tornado.web.asynchronous
def get(self):
"""Create"""
#create_timetable(**kwargs)
self._return([
[
{
'lesson_name': 'Computer science',
'audience': '418-18',
'teacher_name': 'Oleh Lisovychenko',
'type': 'lection', # (lection\practice\laboratory)
'start_time': '08:30',
'color': '#ff0000', # (hex)
},
{
'lesson_name': 'Computer science',
'audience': '418-18',
'teacher_name': 'Oleh Lisovychenko',
'type': 'lection', # (lection\practice\laboratory)
'start_time': '08:30',
'color': '#ff0000', # (hex)
},
{
'lesson_name': 'Computer science',
'audience': '418-18',
'teacher_name': 'Oleh Lisovychenko',
'type': 'lection', # (lection\practice\laboratory)
'start_time': '08:30',
'color': '#ff0000', # (hex)
},
{
'lesson_name': 'Computer science',
'audience': '418-18',
'teacher_name': 'Oleh Lisovychenko',
'type': 'lection', # (lection\practice\laboratory)
'start_time': '08:30',
'color': '#ff0000', # (hex)
},
],
[
{
'lesson_name': 'Computer science',
'audience': '418-18',
'teacher_name': 'Oleh Lisovychenko',
'type': 'lection', # (lection\practice\laboratory)
'start_time': '08:30',
'color': '#ff0000', # (hex)
},
{
'lesson_name': 'Computer science',
'audience': '418-18',
'teacher_name': 'Oleh Lisovychenko',
'type': 'lection', # (lection\practice\laboratory)
'start_time': '08:30',
'color': '#ff0000', # (hex)
},
{
'lesson_name': 'Computer science',
'audience': '418-18',
'teacher_name': 'Oleh Lisovychenko',
'type': 'lection', # (lection\practice\laboratory)
'start_time': '08:30',
'color': '#ff0000', # (hex)
},
{
'lesson_name': 'Computer science',
'audience': '418-18',
'teacher_name': 'Oleh Lisovychenko',
'type': 'lection', # (lection\practice\laboratory)
'start_time': '08:30',
'color': '#ff0000', # (hex)
},
],
[
{
'lesson_name': 'Computer science',
'audience': '418-18',
'teacher_name': 'Oleh Lisovychenko',
'type': 'lection', # (lection\practice\laboratory)
'start_time': '08:30',
'color': '#ff0000', # (hex)
},
{
'lesson_name': 'Computer science',
'audience': '418-18',
'teacher_name': 'Oleh Lisovychenko',
'type': 'lection', # (lection\practice\laboratory)
'start_time': '08:30',
'color': '#ff0000', # (hex)
},
{
'lesson_name': 'Computer science',
'audience': '418-18',
'teacher_name': 'Oleh Lisovychenko',
'type': 'lection', # (lection\practice\laboratory)
'start_time': '08:30',
'color': '#ff0000', # (hex)
},
{
'lesson_name': 'Computer science',
'audience': '418-18',
'teacher_name': 'Oleh Lisovychenko',
'type': 'lection', # (lection\practice\laboratory)
'start_time': '08:30',
'color': '#ff0000', # (hex)
},
],
[
{
'lesson_name': 'Computer science',
'audience': '418-18',
'teacher_name': 'Oleh Lisovychenko',
'type': 'lection', # (lection\practice\laboratory)
'start_time': '08:30',
'color': '#ff0000', # (hex)
},
{
'lesson_name': 'Computer science',
'audience': '418-18',
'teacher_name': 'Oleh Lisovychenko',
'type': 'lection', # (lection\practice\laboratory)
'start_time': '08:30',
'color': '#ff0000', # (hex)
},
{
'lesson_name': 'Computer science',
'audience': '418-18',
'teacher_name': 'Oleh Lisovychenko',
'type': 'lection', # (lection\practice\laboratory)
'start_time': '08:30',
'color': '#ff0000', # (hex)
},
{
'lesson_name': 'Computer science',
'audience': '418-18',
'teacher_name': 'Oleh Lisovychenko',
'type': 'lection', # (lection\practice\laboratory)
'start_time': '08:30',
'color': '#ff0000', # (hex)
},
],
[
{
'lesson_name': 'Computer science',
'audience': '418-18',
'teacher_name': 'Oleh Lisovychenko',
'type': 'lection', # (lection\practice\laboratory)
'start_time': '08:30',
'color': '#ff0000', # (hex)
},
{
'lesson_name': 'Computer science',
'audience': '418-18',
'teacher_name': 'Oleh Lisovychenko',
'type': 'lection', # (lection\practice\laboratory)
'start_time': '08:30',
'color': '#ff0000', # (hex)
},
{
'lesson_name': 'Computer science',
'audience': '418-18',
'teacher_name': 'Oleh Lisovychenko',
'type': 'lection', # (lection\practice\laboratory)
'start_time': '08:30',
'color': '#ff0000', # (hex)
},
{
'lesson_name': 'Computer science',
'audience': '418-18',
'teacher_name': 'Oleh Lisovychenko',
'type': 'lection', # (lection\practice\laboratory)
'start_time': '08:30',
'color': '#ff0000', # (hex)
},
],
[
{
'lesson_name': 'Computer science',
'audience': '418-18',
'teacher_name': 'Oleh Lisovychenko',
'type': 'lection', # (lection\practice\laboratory)
'start_time': '08:30',
'color': '#ff0000', # (hex)
},
{
'lesson_name': 'Computer science',
'audience': '418-18',
'teacher_name': 'Oleh Lisovychenko',
'type': 'lection', # (lection\practice\laboratory)
'start_time': '08:30',
'color': '#ff0000', # (hex)
},
{
'lesson_name': 'Computer science',
'audience': '418-18',
'teacher_name': 'Oleh Lisovychenko',
'type': 'lection', # (lection\practice\laboratory)
'start_time': '08:30',
'color': '#ff0000', # (hex)
},
{
'lesson_name': 'Computer science',
'audience': '418-18',
'teacher_name': 'Oleh Lisovychenko',
'type': 'lection', # (lection\practice\laboratory)
'start_time': '08:30',
'color': '#ff0000', # (hex)
},
],
[
{
'lesson_name': 'Computer science',
'audience': '418-18',
'teacher_name': 'Oleh Lisovychenko',
'type': 'lection', # (lection\practice\laboratory)
'start_time': '08:30',
'color': '#ff0000', # (hex)
},
{
'lesson_name': 'Computer science',
'audience': '418-18',
'teacher_name': 'Oleh Lisovychenko',
'type': 'lection', # (lection\practice\laboratory)
'start_time': '08:30',
'color': '#ff0000', # (hex)
},
{
'lesson_name': 'Computer science',
'audience': '418-18',
'teacher_name': 'Oleh Lisovychenko',
'type': 'lection', # (lection\practice\laboratory)
'start_time': '08:30',
'color': '#ff0000', # (hex)
},
{
'lesson_name': 'Computer science',
'audience': '418-18',
'teacher_name': 'Oleh Lisovychenko',
'type': 'lection', # (lection\practice\laboratory)
'start_time': '08:30',
'color': '#ff0000', # (hex)
},
],
[
{
'lesson_name': 'Computer science',
'audience': '418-18',
'teacher_name': 'Oleh Lisovychenko',
'type': 'lection', # (lection\practice\laboratory)
'start_time': '08:30',
'color': '#ff0000', # (hex)
},
{
'lesson_name': 'Computer science',
'audience': '418-18',
'teacher_name': 'Oleh Lisovychenko',
'type': 'lection', # (lection\practice\laboratory)
'start_time': '08:30',
'color': '#ff0000', # (hex)
},
{
'lesson_name': 'Computer science',
'audience': '418-18',
'teacher_name': 'Oleh Lisovychenko',
'type': 'lection', # (lection\practice\laboratory)
'start_time': '08:30',
'color': '#ff0000', # (hex)
},
{
'lesson_name': 'Computer science',
'audience': '418-18',
'teacher_name': 'Oleh Lisovychenko',
'type': 'lection', # (lection\practice\laboratory)
'start_time': '08:30',
'color': '#ff0000', # (hex)
},
],
]) # or some other int, otherwise - error
@routes('/api/comment/', name="comment_api")
class CommentApiHandler(BaseRESTController):
@tornado.web.asynchronous
def post(self):
"""Post"""
#create_comment(**kwargs)
self._return({
'user_name': 'webknjaz',
#'user_picture_url': '',
'text': 'ololo',
'time': '08:00',
}) # or some other int, otherwise - error
@tornado.web.asynchronous
def get(self):
"""Get lesson messages"""
#get_comment_list(**kwargs)
self._return([{
'user_name': 'webknjaz',
#'user_picture_url': '',
'text': 'ololo',
'time': '08:00',
},{
'user_name': 'webknjaz',
#'user_picture_url': '',
'text': 'ololo',
'time': '08:00',
},{
'user_name': 'webknjaz',
#'user_picture_url': '',
'text': 'ololo',
'time': '08:00',
},{
'user_name': 'webknjaz',
#'user_picture_url': '',
'text': 'ololo',
'time': '08:00',
},]) # or error
| 47.279805
| 171
| 0.342734
| 1,179
| 19,432
| 5.522477
| 0.111111
| 0.034096
| 0.088466
| 0.122869
| 0.826908
| 0.805099
| 0.778529
| 0.751805
| 0.74474
| 0.715712
| 0
| 0.049866
| 0.53767
| 19,432
| 410
| 172
| 47.395122
| 0.674866
| 0.104055
| 0
| 0.691667
| 0
| 0
| 0.232259
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033333
| false
| 0.002778
| 0.016667
| 0
| 0.075
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d5a10b2e941541130e9ca314d706ec19bb4bf20b
| 2,561
|
py
|
Python
|
tests/path/vshadow_path_spec.py
|
dfjxs/dfvfs
|
a4154b07bb08c3c86afa2847f3224189dd80c138
|
[
"Apache-2.0"
] | 176
|
2015-01-02T13:55:39.000Z
|
2022-03-12T11:44:37.000Z
|
tests/path/vshadow_path_spec.py
|
dfjxs/dfvfs
|
a4154b07bb08c3c86afa2847f3224189dd80c138
|
[
"Apache-2.0"
] | 495
|
2015-01-13T06:47:06.000Z
|
2022-03-12T11:07:03.000Z
|
tests/path/vshadow_path_spec.py
|
dfjxs/dfvfs
|
a4154b07bb08c3c86afa2847f3224189dd80c138
|
[
"Apache-2.0"
] | 62
|
2015-02-23T08:19:38.000Z
|
2022-03-18T06:01:22.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the VSS path specification implementation."""
import unittest
from dfvfs.path import vshadow_path_spec
from tests.path import test_lib
class VShadowPathSpecTest(test_lib.PathSpecTestCase):
"""Tests for the VSS path specification implementation."""
def testInitialize(self):
"""Tests the path specification initialization."""
path_spec = vshadow_path_spec.VShadowPathSpec(parent=self._path_spec)
self.assertIsNotNone(path_spec)
path_spec = vshadow_path_spec.VShadowPathSpec(
location='/vss2', parent=self._path_spec)
self.assertIsNotNone(path_spec)
path_spec = vshadow_path_spec.VShadowPathSpec(
store_index=1, parent=self._path_spec)
self.assertIsNotNone(path_spec)
path_spec = vshadow_path_spec.VShadowPathSpec(
location='/vss2', store_index=1, parent=self._path_spec)
self.assertIsNotNone(path_spec)
with self.assertRaises(ValueError):
vshadow_path_spec.VShadowPathSpec(parent=None)
with self.assertRaises(ValueError):
vshadow_path_spec.VShadowPathSpec(
parent=self._path_spec, bogus='BOGUS')
def testComparable(self):
"""Tests the path specification comparable property."""
path_spec = vshadow_path_spec.VShadowPathSpec(parent=self._path_spec)
self.assertIsNotNone(path_spec)
expected_comparable = '\n'.join([
'type: TEST',
'type: VSHADOW',
''])
self.assertEqual(path_spec.comparable, expected_comparable)
path_spec = vshadow_path_spec.VShadowPathSpec(
location='/vss2', parent=self._path_spec)
self.assertIsNotNone(path_spec)
expected_comparable = '\n'.join([
'type: TEST',
'type: VSHADOW, location: /vss2',
''])
self.assertEqual(path_spec.comparable, expected_comparable)
path_spec = vshadow_path_spec.VShadowPathSpec(
store_index=1, parent=self._path_spec)
self.assertIsNotNone(path_spec)
expected_comparable = '\n'.join([
'type: TEST',
'type: VSHADOW, store index: 1',
''])
self.assertEqual(path_spec.comparable, expected_comparable)
path_spec = vshadow_path_spec.VShadowPathSpec(
location='/vss2', store_index=1, parent=self._path_spec)
self.assertIsNotNone(path_spec)
expected_comparable = '\n'.join([
'type: TEST',
'type: VSHADOW, location: /vss2, store index: 1',
''])
self.assertEqual(path_spec.comparable, expected_comparable)
if __name__ == '__main__':
unittest.main()
| 26.957895
| 73
| 0.701289
| 289
| 2,561
| 5.930796
| 0.183391
| 0.186698
| 0.096266
| 0.175029
| 0.855309
| 0.82147
| 0.82147
| 0.768961
| 0.76196
| 0.684947
| 0
| 0.006235
| 0.185865
| 2,561
| 94
| 74
| 27.244681
| 0.815827
| 0.094885
| 0
| 0.727273
| 0
| 0
| 0.086672
| 0
| 0
| 0
| 0
| 0
| 0.254545
| 1
| 0.036364
| false
| 0
| 0.054545
| 0
| 0.109091
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
8927547eb72aaeb750e9f602231d8a40bc695613
| 206
|
py
|
Python
|
art/estimators/certification/neural_cleanse/__init__.py
|
meghana-sesetti/adversarial-robustness-toolbox
|
6a5ce9e4142734ad9004e5c093ef8fa754ea6b39
|
[
"MIT"
] | 1
|
2020-12-26T10:02:05.000Z
|
2020-12-26T10:02:05.000Z
|
art/estimators/certification/neural_cleanse/__init__.py
|
Tikquuss/adversarial-robustness-toolbox
|
62ffe7c951d8a60d49a9ea6ac7b04aa4432a3fb7
|
[
"MIT"
] | 33
|
2021-01-18T08:30:34.000Z
|
2022-03-11T07:05:13.000Z
|
art/estimators/certification/neural_cleanse/__init__.py
|
Tikquuss/adversarial-robustness-toolbox
|
62ffe7c951d8a60d49a9ea6ac7b04aa4432a3fb7
|
[
"MIT"
] | 1
|
2020-09-28T12:58:01.000Z
|
2020-09-28T12:58:01.000Z
|
"""
Neural cleanse estimators.
"""
from art.estimators.certification.neural_cleanse.neural_cleanse import NeuralCleanseMixin
from art.estimators.certification.neural_cleanse.keras import KerasNeuralCleanse
| 34.333333
| 89
| 0.864078
| 22
| 206
| 7.954545
| 0.454545
| 0.297143
| 0.194286
| 0.342857
| 0.491429
| 0.491429
| 0
| 0
| 0
| 0
| 0
| 0
| 0.063107
| 206
| 5
| 90
| 41.2
| 0.906736
| 0.126214
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
893bbc5c5c999b175cf4af93798bf788812f9507
| 23,722
|
py
|
Python
|
tests/dhcpv4/kea_only/test_cve2019v4.py
|
isc-projects/forge
|
dfec8b41003d6b5a229f69ee93616e0e5cc6d71b
|
[
"0BSD"
] | 22
|
2015-02-27T11:51:05.000Z
|
2022-02-28T12:39:29.000Z
|
tests/dhcpv4/kea_only/test_cve2019v4.py
|
isc-projects/forge
|
dfec8b41003d6b5a229f69ee93616e0e5cc6d71b
|
[
"0BSD"
] | 16
|
2018-10-30T15:00:12.000Z
|
2019-01-11T17:55:13.000Z
|
tests/dhcpv4/kea_only/test_cve2019v4.py
|
isc-projects/forge
|
dfec8b41003d6b5a229f69ee93616e0e5cc6d71b
|
[
"0BSD"
] | 11
|
2015-02-27T11:51:36.000Z
|
2021-03-30T08:33:54.000Z
|
"""CVE-2019-6472 and -6473"""
# pylint: disable=invalid-name,line-too-long
import pytest
import srv_msg
import srv_control
import misc
from forge_cfg import world
def _get_offer():
misc.test_procedure()
srv_msg.client_does_include_with_value('client_id', '00010203040111')
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
@pytest.mark.v4
def test_cve_2019_6472():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.50')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
# correct message
killer_message = b"\x01\x01\x06\x00\x00\x80\x64\x49\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x27\x6d\xee\x67\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x63\x82\x53\x63\x35\x01\x01"
# too long client-id, kea have to drop it and survive, exactly 255
killer_message += b"\x3d\xfe\x00" + 253 * b"\x12"
killer_message += b"\xff"
srv_msg.send_raw_message(raw_append=killer_message)
srv_msg.send_wait_for_message('MUST', None, expect_response=False)
# let's check if it's still alive
_get_offer()
@pytest.mark.v4
def test_cve_2019_6473():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.50')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
# message straight from fuzzer, kea has to drop it and survive
killer_message = b"\x01\x2c\x06\x00\x00\x00\x3d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x82\x01\xfc\x42\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x13\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\xe7\x03\x00\x00\x00\x00\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfa\xff\xff\xff\x00\x00\x00\x00\xe0\xff\x00\x00\x00\x00\x00\x00\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\xff\xff\x00\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x7f\x00\x00\x00\x00\x00\xff\xee\x63\x82\x53\x63\x35\x01\x01\x3d\x07\x01\x00\x00\x00\x00\x00\x00\x19\x0c\x4e\x01\x00\x07\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x35\x01\x05\x3a\x04\x00\x00\x07\x08\x3b\x04\xff\x00\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0c\xff\xff\xff\x7f\x00\x00\x00\x7f\x00\x00\x00\x00\x00\x00\x04\x63\x82\x53\x63\x35\x01\x01\x3d\x07\x01\x00\x00\x00\x00\x00\x00\x19\x0c\x4e\x01\x00\x07\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x35\x01\x05\x3a\x04\x00\x00\x07\x08\x3b\x04\x00\x00\x2e\x3b\x04\x00\x19\x2e\x00\x00\x00\x0a\x00\x12\x00\x00\x00\x00\x00\x00\x00\x0b\x82\x01\xfc\x42\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x35\x01\x05\x3a\x04\x00\x00\x07\x08\x3b\x04\x00\x00\x2e\x3b\x04\x00\x19\x2e\x56\x00\x00\x0a\x00\x12\x00\x00\x00\x00\x00\x00\x00\x0b\x82\x01\xfc\x42\x00\x00\x00\x00\x19\x0c\x4e\x01\x05\x3a\x04\xde\x00\x07\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x35\x01\x05\x3a\x07\x08\x3b\x04\x00\x00\x2e\x3b\x04\x00\x19\x2e\x56\x40\x00\x00\x00\x00\x00\x0a\x00\x12\x00\x00\x00\x00\x00\x19\x00\x0b\x82\x01\xfc\x42\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfc\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x35\x01\x05\xff\xff\x05\x00\x07\x08\x3b\x04\x00\x00\x2e\x3b\x04\x00\x19\x2e\x56\x00\x00\x00\x00\x00\x00\x0a\x05\x3a\x04\x00\x00\x07\x08\x3b\x04\x00\x00\x2e\x3b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xfe"
srv_msg.send_raw_message(raw_append=killer_message)
srv_msg.send_wait_for_message('MUST', None, expect_response=False)
# check if kea is still alive
_get_offer()
@pytest.mark.v4
def test_cve_2019_6473_hostname():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.50')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
# correct message
killer_message = b"\x01\x01\x06\x00\x00\x80\x64\x49\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x27\x6d\xee\x67\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x63\x82\x53\x63\x35\x01\x01"
# complete rubbish in hostname, should cause kea to drop message
killer_message += b"\x0c\xff\xff\xff\x7f\x00\x00\x00\x7f\x00\x00\x00\x00\x00\x00\x04\x63\x82\x53\x63\x35\x01\x01\x3d\x07\x01\x00\x00\x00\x00\x00\x00\x19\x0c\x4e\x01\x00\x07\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x35\x01\x05\x3a\x04\x00\x00\x07\x08\x3b\x04\x00\x00\x2e\x3b\x04\x00\x19\x2e\x00\x00\x00\x0a\x00\x12\x00\x00\x00\x00\x00\x00\x00\x0b\x82\x01\xfc\x42\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x35\x01\x05\x3a\x04\x00\x00\x07\x08\x3b\x04\x00\x00\x2e\x3b\x04\x00\x19\x2e\x56\x00\x00\x0a\x00\x12\x00\x00\x00\x00\x00\x00\x00\x0b\x82\x01\xfc\x42\x00\x00\x00\x00\x19\x0c\x4e\x01\x05\x3a\x04\xde\x00\x07\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x35\x01\x05\x3a\x07\x08\x3b\x04\x00\x00\x2e\x3b\x04\x00\x19\x2e\x56\x40\x00\x00\x00\x00\x00\x0a\x00\x12\x00\x00\x00\x00\x00\x19\x00\x0b\x82\x01\xfc\x42\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfc\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x35\x01\x05\xff\xff\x05\x00\x07\x08\x3b\x04\x00\x00\x2e\x3b"
killer_message += b"\xff" # end option
srv_msg.send_raw_message(raw_append=killer_message)
srv_msg.send_wait_for_message('MUST', None, expect_response=False)
# check if kea is still alive
_get_offer()
@pytest.mark.v4
def test_cve_2019_6473_hostname_length_0():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.50')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
# correct message
killer_message = b"\x01\x01\x06\x00\x00\x80\x64\x49\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x27\x6d\xee\x67\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x63\x82\x53\x63\x35\x01\x01"
# incorrect hostname extended with zeros, kea should drop and survive
killer_message += b"\x0c\x00\x00"
killer_message += b"\xff" # end option
srv_msg.send_raw_message(raw_append=killer_message)
srv_msg.send_wait_for_message('MUST', None, expect_response=False)
_get_offer()
@pytest.mark.v4
def test_cve_2019_6473_hostname_over_255():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.50')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
# correct message
killer_message = b"\x01\x01\x06\x00\x00\x80\x64\x49\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x27\x6d\xee\x67\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x63\x82\x53\x63\x35\x01\x01"
# incorrect hostname extended with zeros, kea should drop and survive
killer_message += b"\x0c\xff\xff\xff\x7f\x00\x00\x00\x7f\x00\x00\x00\x00\x00\x00\x04\x63\x82\x53\x63\x35\x01\x01\x3d\x07\x01\x00\x00\x00\x00\x00\x00\x19\x0c\x4e\x01\x00\x07\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x35\x01\x05\x3a\x04\x00\x00\x07\x08\x3b\x04\x00\x00\x2e\x3b\x04\x00\x19\x2e\x00\x00\x00\x0a\x00\x12\x00\x00\x00\x00\x00\x00\x00\x0b\x82\x01\xfc\x42\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x35\x01\x05\x3a\x04\x00\x00\x07\x08\x3b\x04\x00\x00\x2e\x3b\x04\x00\x19\x2e\x56\x00\x00\x0a\x00\x12\x00\x00\x00\x00\x00\x00\x00\x0b\x82\x01\xfc\x42\x00\x00\x00\x00\x19\x0c\x4e\x01\x05\x3a\x04\xde\x00\x07\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x35\x01\x05\x3a\x07\x08\x3b\x04\x00\x00\x2e\x3b\x04\x00\x19\x2e\x56\x40\x00\x00\x00\x00\x00\x0a\x00\x12\x00\x00\x00\x00\x00\x19\x00\x0b\x82\x01\xfc\x42\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfc\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x35\x01\x05\xff\xff\x05\x00\x07\x08\x3b\x04\x00\x00\x2e\x3b"
killer_message += 50 * b"\x00" # this is not gonna fly, in v4 you can't put too long option, max is 255
killer_message += b"\xff" # end option
srv_msg.send_raw_message(raw_append=killer_message)
srv_msg.send_wait_for_message('MUST', None, expect_response=False)
_get_offer()
@pytest.mark.v4
def test_cve_2019_6473_fqdn():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.50')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
# correct message
killer_message = b"\x01\x01\x06\x00\x00\x80\x64\x49\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x27\x6d\xee\x67\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x63\x82\x53\x63\x35\x01\x01"
# incorrect FQDN, kea should drop and survive
killer_message += b"\x0f\xff\xff\xff\x7f\x00\x00\x00\x7f\x00\x00\x00\x00\x00\x00\x04\x63\x82\x53\x63\x35\x01\x01\x3d\x07\x01\x00\x00\x00\x00\x00\x00\x19\x0c\x4e\x01\x00\x07\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x35\x01\x05\x3a\x04\x00\x00\x07\x08\x3b\x04\x00\x00\x2e\x3b\x04\x00\x19\x2e\x00\x00\x00\x0a\x00\x12\x00\x00\x00\x00\x00\x00\x00\x0b\x82\x01\xfc\x42\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x35\x01\x05\x3a\x04\x00\x00\x07\x08\x3b\x04\x00\x00\x2e\x3b\x04\x00\x19\x2e\x56\x00\x00\x0a\x00\x12\x00\x00\x00\x00\x00\x00\x00\x0b\x82\x01\xfc\x42\x00\x00\x00\x00\x19\x0c\x4e\x01\x05\x3a\x04\xde\x00\x07\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x35\x01\x05\x3a\x07\x08\x3b\x04\x00\x00\x2e\x3b\x04\x00\x19\x2e\x56\x40\x00\x00\x00\x00\x00\x0a\x00\x12\x00\x00\x00\x00\x00\x19\x00\x0b\x82\x01\xfc\x42\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfc\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x35\x01\x05\xff\xff\x05\x00\x07\x08\x3b\x04\x00\x00\x2e\x3b"
killer_message += b"\xff" # end option
srv_msg.send_raw_message(raw_append=killer_message)
srv_msg.send_wait_for_message('MUST', None, expect_response=False)
_get_offer()
@pytest.mark.v4
def test_cve_2019_6473_fqdn_too_long():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.50')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
# correct message
killer_message = b"\x01\x01\x06\x00\x00\x80\x64\x49\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x27\x6d\xee\x67\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x63\x82\x53\x63\x35\x01\x01"
# incorrect FQDN extended with zeros at the end
killer_message += b"\x0f\xff\xff\xff\x7f\x00\x00\x00\x7f\x00\x00\x00\x00\x00\x00\x04\x63\x82\x53\x63\x35\x01\x01\x3d\x07\x01\x00\x00\x00\x00\x00\x00\x19\x0c\x4e\x01\x00\x07\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x35\x01\x05\x3a\x04\x00\x00\x07\x08\x3b\x04\x00\x00\x2e\x3b\x04\x00\x19\x2e\x00\x00\x00\x0a\x00\x12\x00\x00\x00\x00\x00\x00\x00\x0b\x82\x01\xfc\x42\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x35\x01\x05\x3a\x04\x00\x00\x07\x08\x3b\x04\x00\x00\x2e\x3b\x04\x00\x19\x2e\x56\x00\x00\x0a\x00\x12\x00\x00\x00\x00\x00\x00\x00\x0b\x82\x01\xfc\x42\x00\x00\x00\x00\x19\x0c\x4e\x01\x05\x3a\x04\xde\x00\x07\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x35\x01\x05\x3a\x07\x08\x3b\x04\x00\x00\x2e\x3b\x04\x00\x19\x2e\x56\x40\x00\x00\x00\x00\x00\x0a\x00\x12\x00\x00\x00\x00\x00\x19\x00\x0b\x82\x01\xfc\x42\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfc\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x35\x01\x05\xff\xff\x05\x00\x07\x08\x3b\x04\x00\x00\x2e\x3b"
killer_message += 40 * b"\x00" # in dhcp v4 option length max is 255, let's put 00 at the end
killer_message += b"\xff" # end
srv_msg.send_raw_message(raw_append=killer_message)
srv_msg.send_wait_for_message('MUST', None, expect_response=False)
_get_offer()
@pytest.mark.v4
def test_cve_2019_6473_fqdn_0_length():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.50')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
# correct message
killer_message = b"\x01\x01\x06\x00\x00\x80\x64\x49\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x27\x6d\xee\x67\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x63\x82\x53\x63\x35\x01\x01"
# hostname length 0, should be dropped
killer_message += b"\x0f\x00\x00"
killer_message += b"\xff" # end option
srv_msg.send_raw_message(raw_append=killer_message)
srv_msg.send_wait_for_message('MUST', None, expect_response=False)
# check if kea is still alive
_get_offer()
@pytest.mark.v4
def test_cve_2019_wtf():
misc.test_setup()
srv_control.config_srv_subnet('10.0.0.0/8', '10.0.0.0-10.255.255.255')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
killer_message = b"\x01\x00\x00\x02\x00\x2e\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x6c\x82\xdc\x4e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf0\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x63\x82\x53\x63\x35\x01\x03\x5c\xff\x02\xf9\x37\x04\x01\x1c\x03\x2b\x33\x04\x00\x00\x0e\x07\x50\x61\x64\x64\x69\x6e\x67\x00\x3d\x07\x01\x00\x00\x6c\x82\xdc\x4e\xff"
srv_msg.send_raw_message(raw_append=killer_message)
srv_msg.send_wait_for_message('MUST', None, expect_response=False)
# check if kea is still alive
_get_offer()
@pytest.mark.v4
def test_cve_2019_6474():
# This test verifies two issues uncovered in CVE-2019-6474:
# - a broken packet can cause Kea to write invalid lease to disk
# - when restarted, memfile backend gives up if there were more than 100
# errors while reading a lease file.
misc.test_setup()
srv_control.config_srv_subnet('10.0.0.0/8', '10.0.0.0-10.255.255.255')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
# we will send a lot of exactly the same packets, let's turn of printing them
tmp = world.f_cfg.show_packets_from
world.f_cfg.show_packets_from = ""
world.scapy_verbose = 0
misc.test_procedure()
# message that causes kea to write incorrect lease
killer_message = b"\x01\x00\x00\x02\x00\x2e\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x6c\x82\xdc\x4e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf0\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x63\x82\x53\x63\x35\x01\x03\x5c\xff\x02\xf9\x37\x04\x01\x1c\x03\x2b\x33\x04\x00\x00\x0e\x07\x50\x61\x64\x64\x69\x6e\x67\x00\x3d\x07\x01\x00\x00\x6c\x82\xdc\x4e\xff"
# send it 101 times. This is an attempt to trigger the memfile lease parser to
# bail out after 100 broken leases being read from a file.
for _ in range(101):
srv_msg.send_raw_message(raw_append=killer_message)
# kea is actually responding but scapy is unable to detect it
srv_msg.send_wait_for_message('MUST', None, expect_response=False)
world.scapy_verbose = 99
world.f_cfg.show_packets_from = tmp
# restart kea, before fix it wasn't starting
srv_control.start_srv('DHCP', 'stopped')
srv_control.start_srv('DHCP', 'started')
# check if kea is still alive
_get_offer()
| 100.092827
| 2,672
| 0.739229
| 5,254
| 23,722
| 3.271983
| 0.047012
| 1.038683
| 1.46693
| 1.872142
| 0.928858
| 0.924146
| 0.918271
| 0.910942
| 0.906055
| 0.899599
| 0
| 0.365985
| 0.053663
| 23,722
| 236
| 2,673
| 100.516949
| 0.399795
| 0.066816
| 0
| 0.756944
| 0
| 0.097222
| 0.771229
| 0.751856
| 0
| 1
| 0
| 0
| 0
| 1
| 0.076389
| false
| 0.006944
| 0.034722
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 17
|
899e4c36d75d5d0e710e102efea98f1bca95a944
| 206
|
py
|
Python
|
src/__init__.py
|
lukaskln/GRU-Protein-Analysis
|
06233285b3267716248129a64cbde641f80c5b54
|
[
"MIT"
] | null | null | null |
src/__init__.py
|
lukaskln/GRU-Protein-Analysis
|
06233285b3267716248129a64cbde641f80c5b54
|
[
"MIT"
] | null | null | null |
src/__init__.py
|
lukaskln/GRU-Protein-Analysis
|
06233285b3267716248129a64cbde641f80c5b54
|
[
"MIT"
] | null | null | null |
from utils.argparser import *
from data.dataimport import *
from models.model_GRU_CNN import *
from models.model_GRU_autoregressive import *
from models.model_LSTM_CNN import *
from utils.tokenizer import *
| 34.333333
| 45
| 0.830097
| 30
| 206
| 5.5
| 0.433333
| 0.30303
| 0.290909
| 0.381818
| 0.290909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.11165
| 206
| 6
| 46
| 34.333333
| 0.901639
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
89af9182006da451f2c059994b31a8e846d54782
| 5,893
|
py
|
Python
|
Trees_and_Graphs/Graph.py
|
RoKu1/cracking-the-coding-interview
|
ce2fabba75f1edf69b81a80022eb9ebac8a09af2
|
[
"Apache-2.0"
] | null | null | null |
Trees_and_Graphs/Graph.py
|
RoKu1/cracking-the-coding-interview
|
ce2fabba75f1edf69b81a80022eb9ebac8a09af2
|
[
"Apache-2.0"
] | null | null | null |
Trees_and_Graphs/Graph.py
|
RoKu1/cracking-the-coding-interview
|
ce2fabba75f1edf69b81a80022eb9ebac8a09af2
|
[
"Apache-2.0"
] | null | null | null |
from Stacks_and_Queues import Stack_and_Queue as SQ
class Node:
def __init__(self, data):
self.name = data
self.neighbours = []
class Graph:
def __init__(self, arr):
self.visited = []
self.maper = dict()
if not arr:
return
self.start = Node(arr[0][0])
node = Node(arr[0][1])
self.start.neighbours.append(node)
node.neighbours.append(self.start)
self.maper[str(self.start.name)] = self.start
self.maper[str(node.name)] = node
for pair in arr:
# print("Pair -> " + pair[0] + " " + pair[1])
if self.maper.get(pair[0]) and self.maper.get(pair[1]):
# print("in IF")
node1 = self.maper[pair[0]]
node2 = self.maper[pair[1]]
if node1 not in node2.neighbours:
node1.neighbours.append(node2)
node2.neighbours.append(node1)
elif self.maper.get(pair[0]) or self.maper.get(pair[1]):
# print("in ELIF 1")
if self.maper.get(pair[0]):
# print("in ELIF 1 IF")
node1 = Node(pair[1])
node2 = self.maper[pair[0]]
if node1 not in node2.neighbours:
node2.neighbours.append(node1)
node1.neighbours.append(node2)
self.maper[node1.name] = node1
else:
# print("in ELIF 1 ELSE")
node1 = Node(pair[0])
node2 = self.maper[pair[1]]
if node1 not in node2.neighbours:
node2.neighbours.append(node1)
node1.neighbours.append(node2)
self.maper[node1.name] = node1
elif not self.maper.get(pair[0]) and not self.maper.get(pair[1]):
# print("in ELIF 2")
node1 = Node(pair[0])
node2 = Node(pair[1])
node1.neighbours.append(node2)
node2.neighbours.append(node1)
self.maper[str(node1.name)] = node1
self.maper[str(node2.name)] = node2
def __str__(self):
reprs = ""
for name in self.maper.keys():
reprs = reprs + name + "--> "
for child in self.maper[name].neighbours:
reprs = reprs + child.name + ", "
reprs = reprs + "\n"
return reprs
def DFS(self, start):
self.visited.append(start)
for child in start.neighbours:
if child not in self.visited:
self.DFS(child)
def BFS(self):
PriQ = SQ.Queue()
PriQ.enque(self.start)
while not PriQ.is_empty():
currentnode = PriQ.deque()
print(currentnode.name)
for child in currentnode.neighbours:
PriQ.enque(child)
return
class GraphDirected:
def __init__(self, arr):
self.maper = dict()
self.visited = []
if not arr:
return
self.start = Node(arr[0][0])
node = Node(arr[0][1])
self.start.neighbours.append(node)
self.maper[str(self.start.name)] = self.start
self.maper[str(node.name)] = node
for pair in arr:
if self.maper.get(pair[0]) and self.maper.get(pair[1]):
# print("in IF")
node1 = self.maper[pair[0]]
node2 = self.maper[pair[1]]
if node2 not in node1.neighbours:
node1.neighbours.append(node2)
elif self.maper.get(pair[0]) or self.maper.get(pair[1]):
# print("in ELIF 1")
if self.maper.get(pair[0]):
# print("in ELIF 1 IF")
node1 = Node(pair[1])
node2 = self.maper[pair[0]]
if node1 not in node2.neighbours:
node2.neighbours.append(node1)
self.maper[node1.name] = node1
else:
# print("in ELIF 1 ELSE")
node1 = Node(pair[0])
node2 = self.maper[pair[1]]
if node2 not in node1.neighbours:
node1.neighbours.append(node2)
self.maper[node1.name] = node1
elif not self.maper.get(pair[0]) and not self.maper.get(pair[1]):
# print("in ELIF 2")
node1 = Node(pair[0])
node2 = Node(pair[1])
if node2 not in node1.neighbours:
node1.neighbours.append(node2)
self.maper[str(node1.name)] = node1
self.maper[str(node2.name)] = node2
def __str__(self):
reprs = ""
for name in self.maper.keys():
reprs = reprs + name + "--> "
for child in self.maper[name].neighbours:
reprs = reprs + child.name + ", "
reprs = reprs + "\n"
return reprs
def DFS(self, start):
self.visited.append(start)
for child in start.neighbours:
if child not in self.visited:
self.DFS(child)
def BFS(self):
self.visited.clear()
PriQ = SQ.Queue()
PriQ.enque(self.start)
while not PriQ.is_empty():
currentnode = PriQ.deque()
print(currentnode.name, end=" ")
self.visited.append(currentnode)
for child in currentnode.neighbours:
'''
We need to ensure that the node that we put in queue is NOT visited and also not Present in Queue
'''
if child not in self.visited and child not in PriQ.items:
# print(child.name)
PriQ.enque(child)
return
| 35.932927
| 114
| 0.486679
| 674
| 5,893
| 4.216617
| 0.099407
| 0.126671
| 0.059113
| 0.078818
| 0.859958
| 0.822308
| 0.814215
| 0.813863
| 0.77727
| 0.77727
| 0
| 0.033475
| 0.401833
| 5,893
| 163
| 115
| 36.153374
| 0.772766
| 0.04395
| 0
| 0.9
| 0
| 0
| 0.0031
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.069231
| false
| 0
| 0.007692
| 0
| 0.146154
| 0.015385
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7f288ac6419f413dc554bf0acc619bad182fbcf4
| 119
|
py
|
Python
|
climate/__init__.py
|
FidelElie/cliMate
|
3d1c9cb33ef180ef07c3f9f6b27f9a6b40f62f12
|
[
"MIT"
] | null | null | null |
climate/__init__.py
|
FidelElie/cliMate
|
3d1c9cb33ef180ef07c3f9f6b27f9a6b40f62f12
|
[
"MIT"
] | null | null | null |
climate/__init__.py
|
FidelElie/cliMate
|
3d1c9cb33ef180ef07c3f9f6b27f9a6b40f62f12
|
[
"MIT"
] | null | null | null |
from climate.lib.inquirers import prompt
from .climate import CliMate
from .lib import utilities
from .lib import data
| 23.8
| 40
| 0.823529
| 18
| 119
| 5.444444
| 0.444444
| 0.22449
| 0.265306
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.134454
| 119
| 4
| 41
| 29.75
| 0.951456
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
7f9a80c58415f8e58251d64d2e8dee3f1de7ad95
| 96
|
py
|
Python
|
python/src/test/resources/pyfunc/numpy_array_test.py
|
maropu/lljvm-translator
|
322fbe24a27976948c8e8081a9552152dda58b4b
|
[
"Apache-2.0"
] | 70
|
2017-12-12T10:54:00.000Z
|
2022-03-22T07:45:19.000Z
|
python/src/test/resources/pyfunc/numpy_array_test.py
|
maropu/lljvm-as
|
322fbe24a27976948c8e8081a9552152dda58b4b
|
[
"Apache-2.0"
] | 14
|
2018-02-28T01:29:46.000Z
|
2019-12-10T01:42:22.000Z
|
python/src/test/resources/pyfunc/numpy_array_test.py
|
maropu/lljvm-as
|
322fbe24a27976948c8e8081a9552152dda58b4b
|
[
"Apache-2.0"
] | 4
|
2019-07-21T07:58:25.000Z
|
2021-02-01T09:46:59.000Z
|
import numpy as np
def numpy_array_test():
return np.array([[1, 2, 3], [4, 5, 6]], np.int32)
| 19.2
| 51
| 0.625
| 19
| 96
| 3.052632
| 0.789474
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.101266
| 0.177083
| 96
| 4
| 52
| 24
| 0.632911
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
f68f8eaf7f7437e37c858982cde9d89e2623cf17
| 1,074
|
py
|
Python
|
FlaskAPI/data_input.py
|
Jawwad-Fida/Salary-Estimator
|
f04612fd43a9549f3275cbd2013fa7dab5b18171
|
[
"MIT"
] | 1
|
2021-11-07T18:04:43.000Z
|
2021-11-07T18:04:43.000Z
|
FlaskAPI/data_input.py
|
Jawwad-Fida/Data-Science-Salary-Estimator
|
f04612fd43a9549f3275cbd2013fa7dab5b18171
|
[
"MIT"
] | null | null | null |
FlaskAPI/data_input.py
|
Jawwad-Fida/Data-Science-Salary-Estimator
|
f04612fd43a9549f3275cbd2013fa7dab5b18171
|
[
"MIT"
] | null | null | null |
data_in = [3.0,
1.0,
0.0,
0.0,
1.0,
6.0,
1.0,
0.0,
1.0,
0.0,
3280.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
0.0,
0.0,
0.0,
0.0,
1.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
0.0,
0.0,
0.0,
1.0]
| 6.067797
| 15
| 0.33892
| 356
| 1,074
| 1.019663
| 0.019663
| 1.785124
| 2.586777
| 3.327824
| 0.966942
| 0.961433
| 0.961433
| 0.917355
| 0.917355
| 0.911846
| 0
| 0.495833
| 0.329609
| 1,074
| 177
| 16
| 6.067797
| 0.008333
| 0
| 0
| 0.977401
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 13
|
f6a6498e81fd7aeb698c9e97d5cf8205a7c0cfe2
| 1,670
|
py
|
Python
|
tools/migrations/0005_auto_20200306_1448.py
|
IATI/new-website
|
b90783e32d19ac4c821c5ea018a52997a11b5286
|
[
"MIT"
] | 4
|
2019-03-28T06:42:17.000Z
|
2021-06-06T13:10:51.000Z
|
tools/migrations/0005_auto_20200306_1448.py
|
IATI/new-website
|
b90783e32d19ac4c821c5ea018a52997a11b5286
|
[
"MIT"
] | 177
|
2018-09-28T14:21:56.000Z
|
2022-03-30T21:45:26.000Z
|
tools/migrations/0005_auto_20200306_1448.py
|
IATI/new-website
|
b90783e32d19ac4c821c5ea018a52997a11b5286
|
[
"MIT"
] | 8
|
2018-10-25T20:43:10.000Z
|
2022-03-17T14:19:27.000Z
|
# Generated by Django 2.2.9 on 2020-03-06 14:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tools', '0004_toolsubpage'),
]
operations = [
migrations.AddField(
model_name='toolsubpage',
name='listing_description',
field=models.CharField(blank=True, help_text='Optional: short description to appear on the listing page if this tool is featured', max_length=255),
),
migrations.AddField(
model_name='toolsubpage',
name='listing_description_en',
field=models.CharField(blank=True, help_text='Optional: short description to appear on the listing page if this tool is featured', max_length=255, null=True),
),
migrations.AddField(
model_name='toolsubpage',
name='listing_description_es',
field=models.CharField(blank=True, help_text='Optional: short description to appear on the listing page if this tool is featured', max_length=255, null=True),
),
migrations.AddField(
model_name='toolsubpage',
name='listing_description_fr',
field=models.CharField(blank=True, help_text='Optional: short description to appear on the listing page if this tool is featured', max_length=255, null=True),
),
migrations.AddField(
model_name='toolsubpage',
name='listing_description_pt',
field=models.CharField(blank=True, help_text='Optional: short description to appear on the listing page if this tool is featured', max_length=255, null=True),
),
]
| 42.820513
| 170
| 0.654491
| 199
| 1,670
| 5.366834
| 0.266332
| 0.08427
| 0.107678
| 0.126404
| 0.858614
| 0.858614
| 0.858614
| 0.858614
| 0.746255
| 0.746255
| 0
| 0.027157
| 0.250299
| 1,670
| 38
| 171
| 43.947368
| 0.825879
| 0.026946
| 0
| 0.59375
| 1
| 0
| 0.365373
| 0.054221
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.03125
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f6dcdcdcae32180dd43739b5dbfb4a35c58419d0
| 434
|
py
|
Python
|
graph/graph.py
|
RafaelKuebler/GraphAlgos
|
b6cbb45a1f6f40056a441aac5e24ee840973c01d
|
[
"MIT"
] | 1
|
2021-02-14T08:47:17.000Z
|
2021-02-14T08:47:17.000Z
|
graph/graph.py
|
RafaelKuebler/GraphAlgos
|
b6cbb45a1f6f40056a441aac5e24ee840973c01d
|
[
"MIT"
] | null | null | null |
graph/graph.py
|
RafaelKuebler/GraphAlgos
|
b6cbb45a1f6f40056a441aac5e24ee840973c01d
|
[
"MIT"
] | null | null | null |
from abc import ABC, abstractmethod
class Graph(ABC):
@abstractmethod
def add_edge_to_neighbors(self, node):
pass
@abstractmethod
def remove_edge(self, node1, node2):
pass
@abstractmethod
def mark_as_obstacle(self, node):
pass
@abstractmethod
def is_obstacle(self, node):
return False
@abstractmethod
def get_connected_nodes(self, node):
return []
| 18.083333
| 42
| 0.647465
| 49
| 434
| 5.55102
| 0.530612
| 0.3125
| 0.231618
| 0.191176
| 0.213235
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00639
| 0.278802
| 434
| 23
| 43
| 18.869565
| 0.86262
| 0
| 0
| 0.470588
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.294118
| false
| 0.176471
| 0.058824
| 0.117647
| 0.529412
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 7
|
101a878d25ad6dc46ef79630ce3f72370ef527e8
| 11,912
|
py
|
Python
|
app/models/product.py
|
leahokamura/RetailTherapy
|
18e1070369beecdf69c1c4b4707286acc3f9b163
|
[
"MIT"
] | null | null | null |
app/models/product.py
|
leahokamura/RetailTherapy
|
18e1070369beecdf69c1c4b4707286acc3f9b163
|
[
"MIT"
] | null | null | null |
app/models/product.py
|
leahokamura/RetailTherapy
|
18e1070369beecdf69c1c4b4707286acc3f9b163
|
[
"MIT"
] | null | null | null |
from __future__ import print_function # In python 2.7
from flask import current_app as app
from sqlalchemy import text
import sys
import sqlalchemy
# product class
class Product:
def __init__(self, pid, name, price, available, image, description, category):
self.pid = pid
self.name = name
self.price = price
self.available = available
self.image = image
self.description = description
self.category = category
# get product info using product id
@staticmethod
def get(pid):
rows = app.db.execute('''
SELECT pid, name, price, available, image, description, category
FROM Products
WHERE pid = :pid
''',
pid=pid)
return Product(*(rows[0])) if rows is not None else None
# get all available products
@staticmethod
def get_all(available=True):
rows = app.db.execute('''
SELECT pid, name, price, available, image, description, category
FROM Products
WHERE available = :available
''',
available=available)
return [Product(*row) for row in rows]
# get all top rated and most reviewed products
@staticmethod
def get_top(available=True):
rows = app.db.execute('''
SELECT pid, name, price, available, image, description, category
FROM (
WITH prod_rating AS (
SELECT pid, AVG(rating)::numeric(10,2) AS avg, COUNT(pid) AS count
FROM Product_Reviews
GROUP BY pid)
SELECT Products.pid, name, price, available, image, description, category, prod_stats.avg AS rating, prod_stats.count AS count
FROM Products
RIGHT JOIN
(SELECT pid, avg, count
FROM prod_rating
WHERE count > 0) AS prod_stats
ON prod_stats.pid = Products.pid
WHERE available = :available
ORDER BY rating DESC, count DESC
LIMIT 12 ) AS foo
''',
available=available)
return [Product(*row) for row in rows]
# get name of product using product id
@staticmethod
def get_name(pid):
rows = app.db.execute('''
SELECT pid, name, image
FROM Products
WHERE pid = :pid
''',
pid=pid)
return (rows[0]) if rows else None
# get the different categories that products may belong to
@staticmethod
def get_categories():
rows = app.db.execute('''
SELECT DISTINCT category FROM products
''')
return [(row[0]) for row in rows] if rows else None
# get products using category search
@staticmethod
def get_prod_by_cat(category, sortCriteria, filterCriteria, number):
# default descriptions for sorting and filtering
sorting_descrip = '(SELECT NULL)'
filtering_descrip = ''
# all possible types of sorting
if (sortCriteria == 'high'):
sorting_descrip = '''price DESC'''
if (sortCriteria == 'low'):
sorting_descrip = '''price ASC'''
if (sortCriteria == 'high_rating'):
sorting_descrip = '''rating DESC NULLS LAST'''
if (sortCriteria == 'low_rating'):
sorting_descrip = '''rating ASC NULLS LAST'''
# filtering by price
if (filterCriteria == 'under25'):
filtering_descrip = '''AND Products.price >= 0 AND Products.price < 25'''
if (filterCriteria == '25to50'):
filtering_descrip = '''AND Products.price >= 25 AND Products.price < 50'''
if (filterCriteria == '50to100'):
filtering_descrip = '''AND Products.price >= 50 AND Products.price < 100'''
if (filterCriteria == '100to200'):
filtering_descrip = '''AND Products.price >= 100 AND Products.price < 200'''
if (filterCriteria == '200&Up'):
filtering_descrip = '''AND Products.price >= 200'''
# filtering by rating
if (filterCriteria == '1&Up'):
filtering_descrip = '''AND prod_rating.avg >= 1'''
if (filterCriteria == '2&Up'):
filtering_descrip = '''AND prod_rating.avg >= 2'''
if (filterCriteria == '3&Up'):
filtering_descrip = '''AND prod_rating.avg >= 3'''
if (filterCriteria == '4&Up'):
filtering_descrip = '''AND prod_rating.avg >= 4'''
# not vulnerable to SQL inject attacks because I control the values being inserted into the query, not the user
rows = app.db.execute('''
WITH prod_rating AS (
SELECT pid, AVG(rating)::numeric(10,2) AS avg
FROM Product_Reviews
GROUP BY pid)
SELECT Products.pid, Products.name, Products.price, Products.available, Products.image, prod_rating.avg AS rating
FROM Products
FULL OUTER JOIN
prod_rating
ON prod_rating.pid = Products.pid
WHERE Products.category = :category
''' + filtering_descrip +
'''ORDER BY ''' + sorting_descrip +
''' LIMIT 9
OFFSET :number
''',
category=category, number=number)
return rows if rows else None
# get products using keyword search
@staticmethod
def get_by_keyword(words, sortCriteria, filterCriteria, number):
# default descriptions for sorting and filtering
sorting_descrip = '(SELECT NULL)'
filtering_descrip = ''
if (sortCriteria == 'high'):
sorting_descrip = '''price DESC'''
if (sortCriteria == 'low'):
sorting_descrip = '''price ASC'''
if (sortCriteria == 'high_rating'):
sorting_descrip = '''rating DESC NULLS LAST'''
if (sortCriteria == 'low_rating'):
sorting_descrip = '''rating ASC NULLS LAST'''
# filtering by price
if (filterCriteria == 'under25'):
filtering_descrip = '''AND (Products.price >= 0 AND Products.price < 25)'''
if (filterCriteria == '25to50'):
filtering_descrip = '''AND (Products.price >= 25 AND Products.price < 50)'''
if (filterCriteria == '50to100'):
filtering_descrip = '''AND (Products.price >= 50 AND Products.price < 100)'''
if (filterCriteria == '100to200'):
filtering_descrip = '''AND (Products.price >= 100 AND Products.price < 200)'''
if (filterCriteria == '200&Up'):
filtering_descrip = '''AND Products.price >= 200'''
# filtering by rating
if (filterCriteria == '1&Up'):
filtering_descrip = '''AND prod_rating.avg >= 1'''
if (filterCriteria == '2&Up'):
filtering_descrip = '''AND prod_rating.avg >= 2'''
if (filterCriteria == '3&Up'):
filtering_descrip = '''AND prod_rating.avg >= 3'''
if (filterCriteria == '4&Up'):
filtering_descrip = '''AND prod_rating.avg >= 4'''
# not vulnerable to SQL inject attacks because I control the values being inserted into the query, not the user
rows = app.db.execute('''
WITH prod_rating AS (
SELECT pid, AVG(rating)::numeric(10,2) AS avg
FROM Product_Reviews
GROUP BY pid)
SELECT Products.pid, Products.name, Products.available, Products.price, Products.image, prod_rating.avg AS rating
FROM Products
FULL OUTER JOIN
prod_rating
ON prod_rating.pid = Products.pid
WHERE (name LIKE ANY (:words)
OR description LIKE ANY (:words)
) ''' + filtering_descrip +
'''ORDER BY ''' + sorting_descrip +
''' LIMIT 9
OFFSET :number
''',
words = words, number=number)
return rows if rows else None
# get total number of products for category search
@staticmethod
def get_total_prod_by_cat(category, sortCriteria, filterCriteria):
# default descriptions for sorting and filtering
sorting_descrip = '(SELECT NULL)'
filtering_descrip = ''
# all possible types of sorting
if (sortCriteria == 'high'):
sorting_descrip = '''price DESC'''
if (sortCriteria == 'low'):
sorting_descrip = '''price ASC'''
if (sortCriteria == 'high_rating'):
sorting_descrip = '''rating DESC NULLS LAST'''
if (sortCriteria == 'low_rating'):
sorting_descrip = '''rating ASC NULLS LAST'''
# filtering by price
if (filterCriteria == 'under25'):
filtering_descrip = '''AND Products.price >= 0 AND Products.price < 25'''
if (filterCriteria == '25to50'):
filtering_descrip = '''AND Products.price >= 25 AND Products.price < 50'''
if (filterCriteria == '50to100'):
filtering_descrip = '''AND Products.price >= 50 AND Products.price < 100'''
if (filterCriteria == '100to200'):
filtering_descrip = '''AND Products.price >= 100 AND Products.price < 200'''
if (filterCriteria == '200&Up'):
filtering_descrip = '''AND Products.price >= 200'''
# filtering by rating
if (filterCriteria == '1&Up'):
filtering_descrip = '''AND prod_rating.avg >= 1'''
if (filterCriteria == '2&Up'):
filtering_descrip = '''AND prod_rating.avg >= 2'''
if (filterCriteria == '3&Up'):
filtering_descrip = '''AND prod_rating.avg >= 3'''
if (filterCriteria == '4&Up'):
filtering_descrip = '''AND prod_rating.avg >= 4'''
# not vulnerable to SQL inject attacks because I control the values being inserted into the query, not the user
rows = app.db.execute('''
SELECT Products.pid, Products.name, Products.price, Products.image
FROM Products
WHERE Products.category = :category
''' + filtering_descrip +
'''ORDER BY ''' + sorting_descrip,
category=category)
return len(rows)
# get total number of products for keyword search
@staticmethod
def get_total_by_keyword(words, sortCriteria, filterCriteria):
# default descriptions for sorting and filtering
sorting_descrip = '(SELECT NULL)'
filtering_descrip = ''
# all possible types of sorting
if (sortCriteria == 'high'):
sorting_descrip = '''price DESC'''
if (sortCriteria == 'low'):
sorting_descrip = '''price ASC'''
if (sortCriteria == 'high_rating'):
sorting_descrip = '''rating DESC NULLS LAST'''
if (sortCriteria == 'low_rating'):
sorting_descrip = '''rating ASC NULLS LAST'''
# filtering by price
if (filterCriteria == 'under25'):
filtering_descrip = '''AND (Products.price >= 0 AND Products.price < 25)'''
if (filterCriteria == '25to50'):
filtering_descrip = '''AND (Products.price >= 25 AND Products.price < 50)'''
if (filterCriteria == '50to100'):
filtering_descrip = '''AND (Products.price >= 50 AND Products.price < 100)'''
if (filterCriteria == '100to200'):
filtering_descrip = '''AND (Products.price >= 100 AND Products.price < 200)'''
if (filterCriteria == '200&Up'):
filtering_descrip = '''AND Products.price >= 200'''
# filtering by rating
if (filterCriteria == '1&Up'):
filtering_descrip = '''AND prod_rating.avg >= 1'''
if (filterCriteria == '2&Up'):
filtering_descrip = '''AND prod_rating.avg >= 2'''
if (filterCriteria == '3&Up'):
filtering_descrip = '''AND prod_rating.avg >= 3'''
if (filterCriteria == '4&Up'):
filtering_descrip = '''AND prod_rating.avg >= 4'''
# not vulnerable to SQL inject attacks because I control the values being inserted into the query, not the user
rows = app.db.execute('''
SELECT Products.pid, Products.name, Products.price, Products.image
FROM Products
WHERE (name LIKE ANY (:words)
OR description LIKE ANY (:words)
) ''' + filtering_descrip +
'''ORDER BY ''' + sorting_descrip,
words = words)
return len(rows)
# get all available products
@staticmethod
def get_sellers(pid):
rows = app.db.execute('''
SELECT seller_id, in_stock
FROM Inventory
WHERE pid = :pid
''',
pid = pid)
return rows if rows else None
| 37.696203
| 126
| 0.612743
| 1,358
| 11,912
| 5.273196
| 0.103829
| 0.09831
| 0.095517
| 0.075408
| 0.874599
| 0.846251
| 0.819997
| 0.792068
| 0.771401
| 0.754224
| 0
| 0.024676
| 0.268553
| 11,912
| 316
| 127
| 37.696203
| 0.7972
| 0.10863
| 0
| 0.798387
| 0
| 0.016129
| 0.358078
| 0.007452
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044355
| false
| 0
| 0.020161
| 0
| 0.108871
| 0.004032
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
63fcdca6ab3c9055d281a3ba8db117c1d71e9653
| 5,536
|
py
|
Python
|
pymatflow/cmd/qe_parser.py
|
DeqiTang/pymatflow
|
bd8776feb40ecef0e6704ee898d9f42ded3b0186
|
[
"MIT"
] | 6
|
2020-03-06T16:13:08.000Z
|
2022-03-09T07:53:34.000Z
|
pymatflow/cmd/qe_parser.py
|
DeqiTang/pymatflow
|
bd8776feb40ecef0e6704ee898d9f42ded3b0186
|
[
"MIT"
] | 1
|
2021-10-02T02:23:08.000Z
|
2021-11-08T13:29:37.000Z
|
pymatflow/cmd/qe_parser.py
|
DeqiTang/pymatflow
|
bd8776feb40ecef0e6704ee898d9f42ded3b0186
|
[
"MIT"
] | 1
|
2021-07-10T16:28:14.000Z
|
2021-07-10T16:28:14.000Z
|
def read_pwscf_in(filepath):
"""
Note: read parameters from pwscf input template
"""
with open(filepath, 'r') as fin:
lines = fin.readlines()
control = {}
system = {}
electrons = {}
ions = {}
cell = {}
for i in range(len(lines)):
if lines[i].split()[0].lower() == "&control":
j = 1
while lines[i+j].split()[0] != "/":
if len(lines[i+j].split()) == 0:
pass
if len(lines[i+j].split("\n")[0].split("#")[0].split("=")) == 2:
# in case of single value &control variable
contorl[lines[i+j].split("=")[0].split()[0]] = lines[i+j].split("\n")[0].split("#")[0].split("=")[1].split()[0]
else:
control[lines[i+j].split("=")[0].split()[0]] = lines[i+j].split("\n")[0].split("#")[0].split("=")[1].split()
j += 1
if lines[i].split()[0].lower() == "&system":
j = 1
while lines[i+j].split()[0] != "/":
if len(lines[i+j].split()) == 0:
pass
if len(lines[i+j].split("\n")[0].split("#")[0].split("=")) == 2:
# in case of single value &control variable
system[lines[i+j].split("=")[0].split()[0]] = lines[i+j].split("\n")[0].split("#")[0].split("=")[1].split()[0]
else:
system[lines[i+j].split("=")[0].split()[0]] = lines[i+j].split("\n")[0].split("#")[0].split("=")[1].split()
j += 1
if lines[i].split()[0].lower() == "&electrons":
j = 1
while lines[i+j].split()[0] != "/":
if len(lines[i+j].split()) == 0:
pass
if len(lines[i+j].split("\n")[0].split("#")[0].split("=")) == 2:
# in case of single value &control variable
electrons[lines[i+j].split("=")[0].split()[0]] = lines[i+j].split("\n")[0].split("#")[0].split("=")[1].split()[0]
else:
electrons[lines[i+j].split("=")[0].split()[0]] = lines[i+j].split("\n")[0].split("#")[0].split("=")[1].split()
j += 1
if lines[i].split()[0].lower() == "&ions":
j = 1
while lines[i+j].split()[0] != "/":
if len(lines[i+j].split()) == 0:
pass
if len(lines[i+j].split("\n")[0].split("#")[0].split("=")) == 2:
# in case of single value &control variable
ions[lines[i+j].split("=")[0].split()[0]] = lines[i+j].split("\n")[0].split("#")[0].split("=")[1].split()[0]
else:
ions[lines[i+j].split("=")[0].split()[0]] = lines[i+j].split("\n")[0].split("#")[0].split("=")[1].split()
j += 1
if lines[i].split()[0].lower() == "&cell":
j = 1
while lines[i+j].split()[0] != "/":
if len(lines[i+j].split()) == 0:
pass
if len(lines[i+j].split("\n")[0].split("#")[0].split("=")) == 2:
# in case of single value &control variable
cell[lines[i+j].split("=")[0].split()[0]] = lines[i+j].split("\n")[0].split("#")[0].split("=")[1].split()[0]
else:
cell[lines[i+j].split("=")[0].split()[0]] = lines[i+j].split("\n")[0].split("#")[0].split("=")[1].split()
j += 1
return control, system, electrons, ions, cell
def read_neb_in(filepath):
"""
Note: read parameters from neb.x input template
"""
with open(filepath, 'r') as fin:
lines = fin.readlines()
path = {}
for i in range(len(lines)):
if lines[i].split()[0].lower() == "&path":
j = 1
while lines[i+j].split()[0] != "/":
if len(lines[i+j].split()) == 0:
pass
if len(lines[i+j].split("\n")[0].split("#")[0].split("=")) == 2:
# in case of single value &PATH variable
path[lines[i+j].split("=")[0].split()[0]] = lines[i+j].split("\n")[0].split("#")[0].split("=")[1].split()[0]
else:
path[lines[i+j].split("=")[0].split()[0]] = lines[i+j].split("\n")[0].split("#")[0].split("=")[1].split()
j += 1
return path
def read_ph_in(filepath):
"""
Note: read parameters from neb.x input template
"""
with open(filepath, 'r') as fin:
lines = fin.readlines()
ph = {}
for i in range(len(lines)):
if lines[i].split()[0].lower() == "&inputph":
j = 1
while lines[i+j].split()[0] != "/":
if len(lines[i+j].split()) == 0:
pass
if len(lines[i+j].split("\n")[0].split("#")[0].split("=")) == 2:
# in case of single value &INPUTPH variable
ph[lines[i+j].split("=")[0].split()[0]] = lines[i+j].split("\n")[0].split("#")[0].split("=")[1].split()[0]
else:
ph[lines[i+j].split("=")[0].split()[0]] = lines[i+j].split("\n")[0].split("#")[0].split("=")[1].split()
j += 1
return ph
| 46.133333
| 134
| 0.395592
| 688
| 5,536
| 3.174419
| 0.071221
| 0.211538
| 0.157051
| 0.269231
| 0.940476
| 0.913004
| 0.898352
| 0.898352
| 0.898352
| 0.898352
| 0
| 0.037666
| 0.362175
| 5,536
| 120
| 135
| 46.133333
| 0.580855
| 0.078577
| 0
| 0.630435
| 0
| 0
| 0.031643
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032609
| false
| 0.076087
| 0
| 0
| 0.065217
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 9
|
126ac4efe83550ea3847e28918c6a06ffc47aadd
| 116
|
py
|
Python
|
pwnlib/encoders/i386/__init__.py
|
DrKeineLust/pwntools
|
415f11bba7096b7d68fe144f5b3432b9c12a4f0a
|
[
"MIT"
] | 7
|
2017-07-11T01:12:02.000Z
|
2017-09-21T23:39:54.000Z
|
pwnlib/encoders/i386/__init__.py
|
DrKeineLust/pwntools
|
415f11bba7096b7d68fe144f5b3432b9c12a4f0a
|
[
"MIT"
] | null | null | null |
pwnlib/encoders/i386/__init__.py
|
DrKeineLust/pwntools
|
415f11bba7096b7d68fe144f5b3432b9c12a4f0a
|
[
"MIT"
] | 3
|
2018-03-21T11:48:05.000Z
|
2021-10-16T15:38:01.000Z
|
from __future__ import absolute_import
from pwnlib.encoders.i386 import delta
from pwnlib.encoders.i386 import xor
| 23.2
| 38
| 0.853448
| 17
| 116
| 5.529412
| 0.529412
| 0.212766
| 0.382979
| 0.468085
| 0.595745
| 0
| 0
| 0
| 0
| 0
| 0
| 0.058252
| 0.112069
| 116
| 4
| 39
| 29
| 0.854369
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
89e0cd4c6889956ba68aedfd8643968cac46da8a
| 149
|
py
|
Python
|
address/main.py
|
libterty/service-evaluate
|
f609e40cd0692191d460b0e1307bd2c6983f7f8c
|
[
"MIT"
] | 1
|
2021-06-03T14:46:35.000Z
|
2021-06-03T14:46:35.000Z
|
address/main.py
|
libterty/service-evaluate
|
f609e40cd0692191d460b0e1307bd2c6983f7f8c
|
[
"MIT"
] | null | null | null |
address/main.py
|
libterty/service-evaluate
|
f609e40cd0692191d460b0e1307bd2c6983f7f8c
|
[
"MIT"
] | null | null | null |
import sys
import twaddress
def main():
print(twaddress.get(sys.argv[1]))
return twaddress.get(sys.argv[1])
if __name__ =='__main__' :
main()
| 16.555556
| 35
| 0.697987
| 22
| 149
| 4.363636
| 0.545455
| 0.25
| 0.3125
| 0.395833
| 0.416667
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015625
| 0.14094
| 149
| 9
| 36
| 16.555556
| 0.734375
| 0
| 0
| 0
| 0
| 0
| 0.053333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| true
| 0
| 0.285714
| 0
| 0.571429
| 0.142857
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
89f447150eb30d25e77ee09afd76de7cb6a81c5a
| 12,272
|
py
|
Python
|
nanome_rmsd/rmsd_selection.py
|
nanome-ai/plugin-rmsd
|
f95b04c6861aa4b24368d35cda5066671c2dd3e6
|
[
"MIT"
] | null | null | null |
nanome_rmsd/rmsd_selection.py
|
nanome-ai/plugin-rmsd
|
f95b04c6861aa4b24368d35cda5066671c2dd3e6
|
[
"MIT"
] | null | null | null |
nanome_rmsd/rmsd_selection.py
|
nanome-ai/plugin-rmsd
|
f95b04c6861aa4b24368d35cda5066671c2dd3e6
|
[
"MIT"
] | null | null | null |
import numpy as np
from nanome.util import Logs
# needleman wunsch algorithm
# the param only_score was used for clustalW
def global_align(complex1, complex2, gap_penalty=-1, mismatch_penalty=0, match_reward=3, only_score=False):
match_count = 0
clustal_w_score = 0
selected_res1 = selected_res(complex1)
selected_res2 = selected_res(complex2)
# list of residues type of the complex
rest_types1 = list(map(lambda res: res.type, selected_res1))
rest_types2 = list(map(lambda res: res.type, selected_res2))
# run the "smart occupancy selection method" on the residue lists of both complexes
res_list1 = list(map(lambda a: select_occupancy(a), selected_res1))
res_list2 = list(map(lambda a: select_occupancy(a), selected_res2))
# create the table of global alignment
m, n = len(rest_types1), len(rest_types2)
shorter_len = min(m, n)
score = np.zeros((m + 1, n + 1))
# file the first column and first row of the table
for i in range(0, m + 1):
score[i][0] = gap_penalty * i
for j in range(0, n + 1):
score[0][j] = gap_penalty * j
# fill the table wtih scores
for i in range(1, m + 1):
for j in range(1, n + 1):
if rest_types1[i - 1] == rest_types2[j - 1]:
match = score[i - 1][j - 1] + match_reward
else:
match = score[i - 1][j - 1] + mismatch_penalty
delete = score[i - 1][j] + gap_penalty
insert = score[i][j - 1] + gap_penalty
score[i][j] = max(match, delete, insert)
# Traceback and compute the alignment
# aligns are the output sequences with gaps (both delete and insert)
# finals are the output sequences that should be the same after the sequence alignment
align1, align2 = '', ''
final1, final2 = '', ''
# start from the bottom right cell
i, j = m, n
# go left and up until touching the 1st row/column
while i > 0 and j > 0:
score_current = score[i][j]
score_diagonal = score[i - 1][j - 1]
score_up = score[i][j - 1]
score_left = score[i - 1][j]
# two residuses match, only deselect when the selected atoms don't match (problem in the pdb file)
if score_current == score_diagonal + match_reward and \
rest_types1[i - 1] == rest_types2[j - 1] and rest_types1[i - 1] != 'UNK' and rest_types2[j - 1] != 'UNK':
# align1 += rest_types1[i-1]
# align2 += rest_types2[j-1]
# final1 += rest_types1[i-1]
# final2 += rest_types2[j-1]
# clustal_w_score += match_reward
match1 = list(map(lambda a: a.selected, res_list1[i - 1].atoms))
match2 = list(map(lambda a: a.selected, res_list2[j - 1].atoms))
if match1 != match2 and not only_score:
for x in res_list1[i - 1].atoms:
x.selected = False
for x in res_list2[j - 1].atoms:
x.selected = False
else:
align1 += rest_types1[i - 1]
align2 += rest_types2[j - 1]
final1 += rest_types1[i - 1]
final2 += rest_types2[j - 1]
clustal_w_score += match_reward
match_count += 1
i -= 1
j -= 1
# two of the residues do not match, deselect both
elif score_current == score_diagonal + mismatch_penalty and \
rest_types1[i - 1] != rest_types2[j - 1] or (rest_types1[i - 1] == 'UNK' and rest_types2[j - 1] == 'UNK'):
if not only_score:
for x in res_list1[i - 1].atoms:
x.selected = False
for y in res_list2[j - 1].atoms:
y.selected = False
clustal_w_score += mismatch_penalty
i -= 1
j -= 1
# rest_types1 has an extra residue, deselect it
elif score_current == score_left + gap_penalty:
align1 += rest_types1[i - 1]
align2 += '---'
if not only_score:
for x in res_list1[i - 1].atoms:
x.selected = False
clustal_w_score += gap_penalty
i -= 1
# rest_types2 has an extra residue, deselect it
elif score_current == score_up + gap_penalty:
align1 += '---'
align2 += rest_types2[j - 1]
if not only_score:
for x in res_list2[j - 1].atoms:
x.selected = False
clustal_w_score += gap_penalty
j -= 1
# Finish tracing up to the top left cell
while i > 0:
align1 += rest_types1[i - 1]
align2 += '---'
if not only_score:
for x in res_list1[i - 1].atoms:
x.selected = False
clustal_w_score += gap_penalty
i -= 1
while j > 0:
align1 += '---'
align2 += rest_types2[j - 1]
if not only_score:
for x in res_list2[j - 1].atoms:
x.selected = False
clustal_w_score += gap_penalty
j -= 1
# return complex1,complex2
# return clustal_w_score
if shorter_len != 0:
rt = 1 - (match_count / shorter_len)
else:
rt = 0
Logs.debug("one of the complexes has no atom selected")
return rt
def local_align(complex1, complex2, gap_penalty=-2, mismatch_penalty=-1, match_reward=3, only_score=False):
match_count = 0
clustal_w_score = 0
selected_res1 = selected_res(complex1)
selected_res2 = selected_res(complex2)
max_cell = [0, 0]
max_cell_value = 0
# list of residues type of the complex
rest_types1 = list(map(lambda res: res.type, selected_res1))
rest_types2 = list(map(lambda res: res.type, selected_res2))
# run the "smart occupancy selection method" on the residue lists of both complexes
res_list1 = list(map(lambda a: select_occupancy(a), selected_res1))
res_list2 = list(map(lambda a: select_occupancy(a), selected_res2))
# create the table of global alignment
m, n = len(rest_types1), len(rest_types2)
shorter_len = min(m, n)
score = np.zeros((m + 1, n + 1))
# file the first column and first row of the table
for i in range(0, m + 1):
score[i][0] = 0
for j in range(0, n + 1):
score[0][j] = 0
# fill the table wtih scores
for i in range(1, m + 1):
for j in range(1, n + 1):
if rest_types1[i - 1] == rest_types2[j - 1]:
match = score[i - 1][j - 1] + match_reward
else:
match = score[i - 1][j - 1] + mismatch_penalty
delete = score[i - 1][j] + gap_penalty
insert = score[i][j - 1] + gap_penalty
score[i][j] = max(match, delete, insert, 0)
if score[i][j] > max_cell_value:
max_cell_value = score[i][j]
max_cell = [i, j]
# Traceback and compute the alignment
# aligns are the output sequences with gaps (both delete and insert)
# finals are the output sequences that should be the same after the sequence alignment
align1, align2 = '', ''
final1, final2 = '', ''
i, j = m, n
while i > max_cell[0]:
# align1 += rest_types1[i-1]
# align2 += '---'
if not only_score:
for x in res_list1[i - 1].atoms:
x.selected = False
clustal_w_score += gap_penalty
i -= 1
while j > max_cell[1]:
# align1 += '---'
# align2 += rest_types2[j-1]
if not only_score:
for x in res_list2[j - 1].atoms:
x.selected = False
clustal_w_score += gap_penalty
j -= 1
# start from the bottom right cell
i, j = max_cell
# go left and up until touching the 1st row/column
while i > 0 and j > 0:
score_current = score[i][j]
score_diagonal = score[i - 1][j - 1]
score_up = score[i][j - 1]
score_left = score[i - 1][j]
# two residuses match, only deselect when the selected atoms don't match (problem in the pdb file)
if score_current == 0:
break
if score_current == score_diagonal + match_reward and \
rest_types1[i - 1] == rest_types2[j - 1] and rest_types1[i - 1] != 'UNK' and rest_types2[j - 1] != 'UNK':
match1 = list(map(lambda a: a.selected, res_list1[i - 1].atoms))
match2 = list(map(lambda a: a.selected, res_list2[j - 1].atoms))
if match1 != match2 and not only_score:
for x in res_list1[i - 1].atoms:
x.selected = False
for x in res_list2[j - 1].atoms:
x.selected = False
else:
align1 += rest_types1[i - 1]
align2 += rest_types2[j - 1]
final1 += rest_types1[i - 1]
final2 += rest_types2[j - 1]
clustal_w_score += match_reward
match_count += 1
i -= 1
j -= 1
# two of the residues do not match, deselect both
elif score_current == score_diagonal + mismatch_penalty and \
rest_types1[i - 1] != rest_types2[j - 1] or (rest_types1[i - 1] == 'UNK' and rest_types2[j - 1] == 'UNK'):
if not only_score:
for x in res_list1[i - 1].atoms:
x.selected = False
for y in res_list2[j - 1].atoms:
y.selected = False
clustal_w_score += mismatch_penalty
i -= 1
j -= 1
# rest_types1 has an extra residue, deselect it
elif score_current == score_left + gap_penalty:
align1 += rest_types1[i - 1]
align2 += '---'
if not only_score:
for x in res_list1[i - 1].atoms:
x.selected = False
clustal_w_score += gap_penalty
i -= 1
# rest_types2 has an extra residue, deselect it
elif score_current == score_up + gap_penalty:
align1 += '---'
align2 += rest_types2[j - 1]
if not only_score:
for x in res_list2[j - 1].atoms:
x.selected = False
clustal_w_score += gap_penalty
j -= 1
# Finish tracing up to the top left cell
while i > 0:
align1 += rest_types1[i - 1]
align2 += '---'
if not only_score:
for x in res_list1[i - 1].atoms:
x.selected = False
clustal_w_score += gap_penalty
i -= 1
while j > 0:
align1 += '---'
align2 += rest_types2[j - 1]
if not only_score:
for x in res_list2[j - 1].atoms:
x.selected = False
clustal_w_score += gap_penalty
j -= 1
Logs.debug("final1 is ", final1)
Logs.debug("final2 is ", final2)
# return complex1,complex2
# return clustal_w_score
if shorter_len != 0:
rt = 1 - (match_count / shorter_len)
else:
rt = 0
Logs.debug("one of the complexes has no atom selected")
return rt
# takes in a single residue
def select_occupancy(residue):
occ_dict = {}
for a in residue.atoms:
if a._occupancy < 1:
name = a.name
if name in occ_dict:
occ_dict[name][0].append(a)
occ_dict[name][1].append(a._occupancy)
else:
occ_dict[name] = [[a], [a._occupancy]]
for p in occ_dict:
top_n = round(sum(occ_dict[p][1]))
occ_dict[p][0].sort(key=lambda x: x._occupancy, reverse=True)
occ_dict[p][0] = occ_dict[p][0][top_n:]
for a in occ_dict[p][0]:
a.selected = False
return residue
# select the residues whose atoms are all selected.
def selected_res(complexes):
residues = list(map(lambda a: a, complexes.residues))
rt = []
# if there's an unselected atom in the residue, don't include it in the list
for residue in residues:
selected_bool = True
for atom in residue.atoms:
if atom.selected is False:
selected_bool = False
if selected_bool:
rt.append(residue)
return rt
| 35.163324
| 122
| 0.547995
| 1,710
| 12,272
| 3.777193
| 0.104678
| 0.015792
| 0.035764
| 0.039015
| 0.859576
| 0.843319
| 0.843319
| 0.843319
| 0.83434
| 0.83434
| 0
| 0.041049
| 0.35088
| 12,272
| 348
| 123
| 35.264368
| 0.769772
| 0.170958
| 0
| 0.808
| 0
| 0
| 0.014815
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016
| false
| 0
| 0.008
| 0
| 0.04
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
89fbd775aa4f219a7e2a4e74c22f27caa9bdc144
| 226
|
py
|
Python
|
tests/test_version.py
|
hile/treesync
|
7b507cd3e01891ae5f24a2edf6aed125b14a1128
|
[
"PSF-2.0"
] | null | null | null |
tests/test_version.py
|
hile/treesync
|
7b507cd3e01891ae5f24a2edf6aed125b14a1128
|
[
"PSF-2.0"
] | null | null | null |
tests/test_version.py
|
hile/treesync
|
7b507cd3e01891ae5f24a2edf6aed125b14a1128
|
[
"PSF-2.0"
] | null | null | null |
from sys_toolkit.tests.packaging import validate_version_string
from treesync import __version__
def test_version_string():
"""
Test format of module version string
"""
validate_version_string(__version__)
| 18.833333
| 63
| 0.769912
| 27
| 226
| 5.888889
| 0.555556
| 0.327044
| 0.264151
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.172566
| 226
| 11
| 64
| 20.545455
| 0.850267
| 0.159292
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
d60798731a5561f1138ab70bd10f546a9ab85538
| 5,536
|
py
|
Python
|
apps/markets/migrations/0012_auto_20160728_1552.py
|
uktrade/enav-alpha
|
8d38f05763367ca6b6747203241f267612fd6e44
|
[
"MIT"
] | null | null | null |
apps/markets/migrations/0012_auto_20160728_1552.py
|
uktrade/enav-alpha
|
8d38f05763367ca6b6747203241f267612fd6e44
|
[
"MIT"
] | 67
|
2016-07-11T12:57:58.000Z
|
2016-08-08T12:59:19.000Z
|
apps/markets/migrations/0012_auto_20160728_1552.py
|
UKTradeInvestment/enav-alpha
|
8d38f05763367ca6b6747203241f267612fd6e44
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-28 15:52
from __future__ import unicode_literals
import ckeditor.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('markets', '0011_auto_20160726_0958'),
]
operations = [
migrations.AlterModelOptions(
name='country',
options={'ordering': ('-name',)},
),
migrations.AlterModelOptions(
name='region',
options={'ordering': ('-name',)},
),
migrations.AddField(
model_name='market',
name='misc1',
field=ckeditor.fields.RichTextField(blank=True, null=True),
),
migrations.AddField(
model_name='market',
name='misc10',
field=ckeditor.fields.RichTextField(blank=True, null=True),
),
migrations.AddField(
model_name='market',
name='misc11',
field=ckeditor.fields.RichTextField(blank=True, null=True),
),
migrations.AddField(
model_name='market',
name='misc12',
field=ckeditor.fields.RichTextField(blank=True, null=True),
),
migrations.AddField(
model_name='market',
name='misc13',
field=ckeditor.fields.RichTextField(blank=True, null=True),
),
migrations.AddField(
model_name='market',
name='misc14',
field=ckeditor.fields.RichTextField(blank=True, null=True),
),
migrations.AddField(
model_name='market',
name='misc15',
field=ckeditor.fields.RichTextField(blank=True, null=True),
),
migrations.AddField(
model_name='market',
name='misc16',
field=ckeditor.fields.RichTextField(blank=True, null=True),
),
migrations.AddField(
model_name='market',
name='misc17',
field=ckeditor.fields.RichTextField(blank=True, null=True),
),
migrations.AddField(
model_name='market',
name='misc18',
field=ckeditor.fields.RichTextField(blank=True, null=True),
),
migrations.AddField(
model_name='market',
name='misc19',
field=ckeditor.fields.RichTextField(blank=True, null=True),
),
migrations.AddField(
model_name='market',
name='misc2',
field=ckeditor.fields.RichTextField(blank=True, null=True),
),
migrations.AddField(
model_name='market',
name='misc20',
field=ckeditor.fields.RichTextField(blank=True, null=True),
),
migrations.AddField(
model_name='market',
name='misc21',
field=ckeditor.fields.RichTextField(blank=True, null=True),
),
migrations.AddField(
model_name='market',
name='misc22',
field=ckeditor.fields.RichTextField(blank=True, null=True),
),
migrations.AddField(
model_name='market',
name='misc23',
field=ckeditor.fields.RichTextField(blank=True, null=True),
),
migrations.AddField(
model_name='market',
name='misc24',
field=ckeditor.fields.RichTextField(blank=True, null=True),
),
migrations.AddField(
model_name='market',
name='misc25',
field=ckeditor.fields.RichTextField(blank=True, null=True),
),
migrations.AddField(
model_name='market',
name='misc26',
field=ckeditor.fields.RichTextField(blank=True, null=True),
),
migrations.AddField(
model_name='market',
name='misc27',
field=ckeditor.fields.RichTextField(blank=True, null=True),
),
migrations.AddField(
model_name='market',
name='misc28',
field=ckeditor.fields.RichTextField(blank=True, null=True),
),
migrations.AddField(
model_name='market',
name='misc29',
field=ckeditor.fields.RichTextField(blank=True, null=True),
),
migrations.AddField(
model_name='market',
name='misc3',
field=ckeditor.fields.RichTextField(blank=True, null=True),
),
migrations.AddField(
model_name='market',
name='misc4',
field=ckeditor.fields.RichTextField(blank=True, null=True),
),
migrations.AddField(
model_name='market',
name='misc5',
field=ckeditor.fields.RichTextField(blank=True, null=True),
),
migrations.AddField(
model_name='market',
name='misc6',
field=ckeditor.fields.RichTextField(blank=True, null=True),
),
migrations.AddField(
model_name='market',
name='misc7',
field=ckeditor.fields.RichTextField(blank=True, null=True),
),
migrations.AddField(
model_name='market',
name='misc8',
field=ckeditor.fields.RichTextField(blank=True, null=True),
),
migrations.AddField(
model_name='market',
name='misc9',
field=ckeditor.fields.RichTextField(blank=True, null=True),
),
]
| 32.564706
| 71
| 0.544436
| 488
| 5,536
| 6.10041
| 0.155738
| 0.141082
| 0.224051
| 0.263016
| 0.837756
| 0.837756
| 0.825328
| 0.825328
| 0.808868
| 0.808868
| 0
| 0.021892
| 0.331647
| 5,536
| 169
| 72
| 32.757396
| 0.782703
| 0.012103
| 0
| 0.753086
| 1
| 0
| 0.074643
| 0.004208
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.018519
| 0
| 0.037037
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
c394b2322dcaeeaaecdb5e8389cc5d57f6ab95fd
| 153
|
py
|
Python
|
nidm/experiment/tools/click_main.py
|
adswa/PyNIDM
|
d8e8ce743bf537d98c94ae7f4ba294f97f17e7be
|
[
"Apache-2.0"
] | null | null | null |
nidm/experiment/tools/click_main.py
|
adswa/PyNIDM
|
d8e8ce743bf537d98c94ae7f4ba294f97f17e7be
|
[
"Apache-2.0"
] | null | null | null |
nidm/experiment/tools/click_main.py
|
adswa/PyNIDM
|
d8e8ce743bf537d98c94ae7f4ba294f97f17e7be
|
[
"Apache-2.0"
] | null | null | null |
import click
from nidm.experiment.tools.click_base import cli
from nidm.experiment.tools import nidm_query
#from nidm.experiment.tools import nidm_utils
| 30.6
| 48
| 0.856209
| 24
| 153
| 5.333333
| 0.416667
| 0.1875
| 0.421875
| 0.539063
| 0.515625
| 0.515625
| 0
| 0
| 0
| 0
| 0
| 0
| 0.091503
| 153
| 4
| 49
| 38.25
| 0.920863
| 0.287582
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
c39f19a702ce41487b51a3b01ac224f1ed54a870
| 2,295
|
py
|
Python
|
backend/AXIOME3_app/report/taxonomy/views.py
|
neufeld/AXIOME3-GUI
|
80b87753b47fab116324b4f0e4151c21ab3b1725
|
[
"BSD-3-Clause"
] | 2
|
2021-02-25T16:59:12.000Z
|
2021-02-25T20:06:15.000Z
|
backend/AXIOME3_app/report/taxonomy/views.py
|
neufeld/AXIOME3-GUI
|
80b87753b47fab116324b4f0e4151c21ab3b1725
|
[
"BSD-3-Clause"
] | 7
|
2020-11-18T08:05:52.000Z
|
2022-02-17T20:45:10.000Z
|
backend/AXIOME3_app/report/taxonomy/views.py
|
neufeld/AXIOME3-GUI
|
80b87753b47fab116324b4f0e4151c21ab3b1725
|
[
"BSD-3-Clause"
] | null | null | null |
from flask import Blueprint, request, send_file
import sys
import os
from AXIOME3_app.utils import get_taxonomic_classification_dir
blueprint = Blueprint("taxonomy", __name__, url_prefix="/taxonomy")
@blueprint.route("/collapse/tsv", methods=['GET', 'POST'])
def taxa_collapse_tsv():
uid = request.form["uid"]
taxa = request.form["taxa"]
TAXONOMIC_CLASSIFICATION_DIR = get_taxonomic_classification_dir(uid)
extension = ".tsv"
if(uid == ''):
# sample output
collapsed_taxa = os.path.join('/data/output/taxa_collapse/', taxa + '_collapsed_table' + extension)
else:
collapsed_taxa = os.path.join(TAXONOMIC_CLASSIFICATION_DIR, taxa + '_collapsed_table' + extension)
return send_file(collapsed_taxa, mimetype='text/tab-separated-values', as_attachment=True)
@blueprint.route("/collapse/qza", methods=['GET', 'POST'])
def taxa_collapse_qza():
uid = request.form["uid"]
taxa = request.form["taxa"]
TAXONOMIC_CLASSIFICATION_DIR = get_taxonomic_classification_dir(uid)
extension = ".qza"
if(uid == ''):
# sample output
collapsed_taxa = os.path.join('/data/output/taxa_collapse/', taxa + '_collapsed_table' + extension)
else:
collapsed_taxa = os.path.join(TAXONOMIC_CLASSIFICATION_DIR, taxa + '_collapsed_table' + extension)
return send_file(collapsed_taxa, mimetype='application/octet-stream', as_attachment=True)
@blueprint.route("/asv/tsv", methods=['GET', 'POST'])
def taxa_asv_tsv():
uid = request.form["uid"]
TAXONOMIC_CLASSIFICATION_DIR = get_taxonomic_classification_dir(uid)
extension = ".tsv"
if(uid == ''):
# sample output
asv_taxa = os.path.join('/data/output/exported/', "taxonomy" + extension)
else:
asv_taxa = os.path.join(TAXONOMIC_CLASSIFICATION_DIR, "taxonomy" + extension)
return send_file(asv_taxa, mimetype='text/tab-separated-values', as_attachment=True)
@blueprint.route("/asv/qza", methods=['GET', 'POST'])
def taxa_asv_qza():
uid = request.form["uid"]
TAXONOMIC_CLASSIFICATION_DIR = get_taxonomic_classification_dir(uid)
extension = ".qza"
if(uid == ''):
# sample output
asv_taxa = os.path.join('/data/output/taxonomy/', "taxonomy" + extension)
else:
asv_taxa = os.path.join(TAXONOMIC_CLASSIFICATION_DIR, "taxonomy" + extension)
return send_file(asv_taxa, mimetype='application/octet-stream', as_attachment=True)
| 35.307692
| 101
| 0.745534
| 295
| 2,295
| 5.542373
| 0.183051
| 0.182875
| 0.206728
| 0.068502
| 0.875841
| 0.868502
| 0.784098
| 0.784098
| 0.737615
| 0.737615
| 0
| 0.000489
| 0.108061
| 2,295
| 65
| 102
| 35.307692
| 0.798241
| 0.023965
| 0
| 0.595745
| 0
| 0
| 0.185599
| 0.087657
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085106
| false
| 0
| 0.085106
| 0
| 0.255319
| 0.12766
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c3a72121f07be5459f6989277fb75ebe2d7514cd
| 9,506
|
py
|
Python
|
pyvaspflow/vasp/schedule.py
|
ChangChunHe/VASP-calculation-process
|
99268479f826f2f31a59d92daff443aeec688fb2
|
[
"MIT"
] | 13
|
2019-06-03T11:41:35.000Z
|
2022-03-04T07:45:42.000Z
|
pyvaspflow/vasp/schedule.py
|
ChangChunHe/VASP-calculation-process
|
99268479f826f2f31a59d92daff443aeec688fb2
|
[
"MIT"
] | 2
|
2019-03-12T10:51:15.000Z
|
2019-03-14T02:18:18.000Z
|
pyvaspflow/vasp/schedule.py
|
ChangChunHe/VASP-calculation-process
|
99268479f826f2f31a59d92daff443aeec688fb2
|
[
"MIT"
] | 8
|
2019-06-03T03:20:20.000Z
|
2021-01-06T11:48:37.000Z
|
import os,subprocess,shutil,logging
from time import sleep
from pyvaspflow.utils import read_config
config = read_config()
class Schedule():
def __init__(self):
if config["Task_Schedule"]["default_schedule"] == "SLURM":
self.schedule_type = Slurm()
elif config["Task_Schedule"]["default_schedule"] == "LSF":
self.schedule_type = LSF()
class Slurm():
def __init__(self):
pass
def is_inqueue(self,pid):
p = subprocess.Popen('squeue',stdout=subprocess.PIPE)
que_res = p.stdout.readlines()
p.stdout.close()
for ii in que_res:
if str(pid) in ii.decode('utf-8'):
return True
return False
def num_of_job_inqueue(self,pid_list):
p = subprocess.Popen('squeue',stdout=subprocess.PIPE)
sinf_res = p.stdout.read()
sinf_res = sinf_res.decode('utf-8').split('\n')
p.stdout.close()
num = 0
for _pid in pid_list:
for line in sinf_res:
if len(line.strip()) == 0:
continue
if _pid in line.split()[0]:
num += 1
return num
def node_is_idle(self,node_name):
p = subprocess.Popen('sinfo',stdout=subprocess.PIPE)
sinf_res = p.stdout.read()
sinf_res = sinf_res.decode('utf-8').split('\n')
p.stdout.close()
for line in sinf_res:
if 'idle' in line and node_name in line:
return True
return False
def is_job_running(self,pid):
p = subprocess.Popen('squeue',stdout=subprocess.PIPE)
sinf_res = p.stdout.read()
sinf_res = sinf_res.decode('utf-8').split('\n')
p.stdout.close()
for line in sinf_res:
if ' R ' in line and pid in line:
return True
return False
def is_job_pd(self,pid):
p = subprocess.Popen('squeue',stdout=subprocess.PIPE)
sinf_res = p.stdout.read()
sinf_res = sinf_res.decode('utf-8').split('\n')
p.stdout.close()
for line in sinf_res:
if ' PD ' in line and pid in line:
return True
return False
def cancel_job(self,pid):
while True:
p = subprocess.Popen(['scancel',pid],stdout=subprocess.PIPE)
if not self.is_inqueue(pid):
break
def write_job_file(self,node_name,cpu_num,node_num,job_name):
with open(os.path.join(os.getcwd(),job_name,'job.sh'),'w') as f:
f.writelines('#!/bin/bash -l\n')
f.writelines('#SBATCH -J '+job_name+'\n')
f.writelines('#SBATCH -p '+node_name+' -N '+ str(int(node_num)) +' -n '+str(int(cpu_num))+'\n\n')
f.writelines(config['RUN_VASP']['prepend']+'\n')
f.writelines(config['RUN_VASP']['exec']+'\n')
if "append" in config["RUN_VASP"]:
f.writelines(config['RUN_VASP']['append']+'\n')
def submit_job(self,job_name):
res = subprocess.Popen(['/bin/my_sbatch', './job.sh'],stdout=subprocess.PIPE,cwd=job_name)
std = res.stdout.readlines()
res.stdout.close()
pid = std[0].decode("utf-8").split()[-1]
try:
int(pid)
except:
raise ValueError("Too many jobs you have submitted")
logging.info(job_name+" calculation has been submitted, the queue id is "+pid)
logging.info("The work dir is "+os.path.join(os.getcwd(),job_name))
return pid
def submit_job_without_job(self,job_name,node_name,cpu_num,node_num=1,submit_job_idx=0):
has_write_job = False
for idx in range(len(node_name)):
if self.node_is_idle(node_name[idx]):
self.write_job_file(job_name=job_name,node_name=node_name[idx],cpu_num=cpu_num[idx],node_num=node_num)
has_write_job = True
node_submitted = node_name[idx]
break
if not has_write_job:
self.write_job_file(job_name=job_name,node_name=node_name[submit_job_idx],cpu_num=cpu_num[submit_job_idx],node_num=node_num)
node_submitted = node_name[submit_job_idx]
submit_job_idx += 1
if submit_job_idx == len(node_name):
submit_job_idx = 0
res = subprocess.Popen(['/bin/my_sbatch', './job.sh'],stdout=subprocess.PIPE,cwd=job_name)
std = res.stdout.readlines()
res.stdout.close()
pid = std[0].decode('utf-8').split()[-1]
try:
int(pid)
except:
raise ValueError("Too many jobs you have submitted")
logging.info(job_name+" calculation has been submitted, the queue id is "+pid)
logging.info("The work dir is "+os.path.join(os.getcwd(),job_name))
sleep(5)
return pid,submit_job_idx
class LSF():
def __init__(self):
pass
def is_inqueue(self,pid):
p = subprocess.Popen('squeue',stdout=subprocess.PIPE)
que_res = p.stdout.readlines()
p.stdout.close()
for ii in que_res:
if str(pid) in ii.decode('utf-8'):
return True
return False
def num_of_job_inqueue(self,pid_list):
p = subprocess.Popen('squeue',stdout=subprocess.PIPE)
sinf_res = p.stdout.read()
sinf_res = sinf_res.decode('utf-8').split('\n')
p.stdout.close()
num = 0
for _pid in pid_list:
for line in sinf_res:
if len(line.strip()) == 0:
continue
if _pid in line.split()[0]:
num += 1
return num
def node_is_idle(self,node_name):
p = subprocess.Popen('sinfo',stdout=subprocess.PIPE)
sinf_res = p.stdout.read()
sinf_res = sinf_res.decode('utf-8').split('\n')
p.stdout.close()
for line in sinf_res:
if 'idle' in line and node_name in line:
return True
return False
def is_job_running(self,pid):
p = subprocess.Popen('squeue',stdout=subprocess.PIPE)
sinf_res = p.stdout.read()
sinf_res = sinf_res.decode('utf-8').split('\n')
p.stdout.close()
for line in sinf_res:
if ' R ' in line and pid in line:
return True
return False
def is_job_pd(self,pid):
p = subprocess.Popen('squeue',stdout=subprocess.PIPE)
sinf_res = p.stdout.read()
sinf_res = sinf_res.decode('utf-8').split('\n')
p.stdout.close()
for line in sinf_res:
if ' PD ' in line and pid in line:
return True
return False
def cancel_job(self,pid):
while True:
p = subprocess.Popen(['scancel',pid],stdout=subprocess.PIPE)
if not self.is_inqueue(pid):
break
def write_job_file(self,node_name,cpu_num,node_num,job_name):
with open(os.path.join(os.getcwd(),job_name,'job.sh'),'w') as f:
f.writelines('#!/bin/sh -l\n')
f.writelines('#BSUB -q '+node_name +'\n')
f.writelines('#BSUB -n '+cpu_num +'\n')
f.writelines('#BSUB -e %J.err\n')
f.writelines('#BSUB -o %J.out\n')
f.writelines('#BSUB -R "span[ptile=24]"\n')
f.writelines('hostfile=`echo $LSB_DJOB_HOSTFILE`\n')
f.writelines('NP=`cat $hostfile | wc -l`\n\n')
f.writelines(config['RUN_VASP']['prepend']+'\n')
f.writelines(config['RUN_VASP']['exec']+'\n')
if "append" in config["RUN_VASP"]:
f.writelines(config['RUN_VASP']['append']+'\n')
def submit_job(self,job_name):
res = subprocess.Popen(['/bin/my_sbatch', './job.sh'],stdout=subprocess.PIPE,cwd=job_name)
std = res.stdout.readlines()
res.stdout.close()
pid = std[0].decode("utf-8").split()[-1]
try:
int(pid)
except:
raise ValueError("Too many jobs you have submitted")
logging.info(job_name+" calculation has been submitted, the queue id is "+pid)
logging.info("The work dir is "+os.path.join(os.getcwd(),job_name))
return pid
def submit_job_without_job(self,job_name,node_name,cpu_num,node_num=1,submit_job_idx=0):
has_write_job = False
for idx in range(len(node_name)):
if self.node_is_idle(node_name[idx]):
self.write_job_file(job_name=job_name,node_name=node_name[idx],cpu_num=cpu_num[idx],node_num=node_num)
has_write_job = True
node_submitted = node_name[idx]
break
if not has_write_job:
self.write_job_file(job_name=job_name,node_name=node_name[submit_job_idx],cpu_num=cpu_num[submit_job_idx],node_num=node_num)
node_submitted = node_name[submit_job_idx]
submit_job_idx += 1
if submit_job_idx == len(node_name):
submit_job_idx = 0
res = subprocess.Popen(['/bin/my_sbatch', './job.sh'],stdout=subprocess.PIPE,cwd=job_name)
std = res.stdout.readlines()
res.stdout.close()
pid = std[0].decode('utf-8').split()[-1]
try:
int(pid)
except:
raise ValueError("Too many jobs you have submitted")
logging.info(job_name+" calculation has been submitted, the queue id is "+pid)
logging.info("The work dir is "+os.path.join(os.getcwd(),job_name))
sleep(5)
return pid,submit_job_idx
| 37.87251
| 136
| 0.576478
| 1,323
| 9,506
| 3.94709
| 0.10582
| 0.042895
| 0.061279
| 0.03447
| 0.909996
| 0.897357
| 0.897357
| 0.897357
| 0.897357
| 0.897357
| 0
| 0.006239
| 0.291816
| 9,506
| 250
| 137
| 38.024
| 0.769459
| 0
| 0
| 0.900452
| 0
| 0
| 0.112876
| 0.002209
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095023
| false
| 0.00905
| 0.013575
| 0
| 0.221719
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.