hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
481a515908803b595951c00aad4eb32e49b0fbfe
| 57
|
py
|
Python
|
agendabuilder/__main__.py
|
llimeht/agendabuilder
|
a2b8a3c6096e58c360b28b89c592824c272f04ed
|
[
"BSD-3-Clause"
] | null | null | null |
agendabuilder/__main__.py
|
llimeht/agendabuilder
|
a2b8a3c6096e58c360b28b89c592824c272f04ed
|
[
"BSD-3-Clause"
] | null | null | null |
agendabuilder/__main__.py
|
llimeht/agendabuilder
|
a2b8a3c6096e58c360b28b89c592824c272f04ed
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
from .commands import main
sys.exit(main())
| 9.5
| 26
| 0.736842
| 9
| 57
| 4.666667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157895
| 57
| 5
| 27
| 11.4
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
4827ce7bac559cdc42b00f9cf60aff092a8be195
| 97
|
py
|
Python
|
djsingleton/apps.py
|
sainipray/djsingleton
|
c2600f821edcfe31f9cb3352446257482fb256a9
|
[
"MIT"
] | 2
|
2017-07-26T20:37:25.000Z
|
2018-08-18T10:53:34.000Z
|
djsingleton/apps.py
|
sainipray/djsingleton
|
c2600f821edcfe31f9cb3352446257482fb256a9
|
[
"MIT"
] | null | null | null |
djsingleton/apps.py
|
sainipray/djsingleton
|
c2600f821edcfe31f9cb3352446257482fb256a9
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class DjsingletonConfig(AppConfig):
name = 'djsingleton'
| 16.166667
| 35
| 0.773196
| 10
| 97
| 7.5
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.154639
| 97
| 5
| 36
| 19.4
| 0.914634
| 0
| 0
| 0
| 0
| 0
| 0.113402
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
484e1722be07c9dd927bbaf97fec9928f564c410
| 345
|
py
|
Python
|
ieugwaspy/__init__.py
|
radcheb/ieugwaspy
|
b15588e58cd2c32f5c24f5d497cfd82695c4ce3a
|
[
"MIT"
] | 7
|
2020-04-18T18:09:58.000Z
|
2021-08-16T15:14:48.000Z
|
ieugwaspy/__init__.py
|
radcheb/ieugwaspy
|
b15588e58cd2c32f5c24f5d497cfd82695c4ce3a
|
[
"MIT"
] | 3
|
2020-04-18T21:23:58.000Z
|
2020-05-14T09:14:07.000Z
|
ieugwaspy/__init__.py
|
radcheb/ieugwaspy
|
b15588e58cd2c32f5c24f5d497cfd82695c4ce3a
|
[
"MIT"
] | 1
|
2020-05-13T07:25:37.000Z
|
2020-05-13T07:25:37.000Z
|
"""The ieugwaspy module provides a convenient Python wrapper for the IEU GWAS database API. As far as possible the functionality in this module replicates functionality in the ieugwasr R package
"""
import os
from ieugwaspy.constants import option, urls
from ieugwaspy.api import *
from ieugwaspy.query import *
from ieugwaspy.variants import *
| 43.125
| 194
| 0.811594
| 50
| 345
| 5.6
| 0.62
| 0.185714
| 0.135714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.144928
| 345
| 7
| 195
| 49.285714
| 0.949153
| 0.553623
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
4872a4affc3660080fc85c845d1a3697cf1854d7
| 230
|
py
|
Python
|
fabfile_local.py
|
ideallical/ii-deploytool
|
87c51792fa60aa506a254aba5bda31f10224f674
|
[
"Apache-2.0"
] | null | null | null |
fabfile_local.py
|
ideallical/ii-deploytool
|
87c51792fa60aa506a254aba5bda31f10224f674
|
[
"Apache-2.0"
] | null | null | null |
fabfile_local.py
|
ideallical/ii-deploytool
|
87c51792fa60aa506a254aba5bda31f10224f674
|
[
"Apache-2.0"
] | null | null | null |
# add this file to your .gitignore file of the project
LOCAL_SETTINGS = dict(
media_root='/Users/user/projects/project/media',
path='/Users/user/projects/project',
virtualenv_path='/Users/user/virtualenvs/project',
)
| 28.75
| 54
| 0.734783
| 31
| 230
| 5.354839
| 0.645161
| 0.162651
| 0.204819
| 0.289157
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.134783
| 230
| 7
| 55
| 32.857143
| 0.834171
| 0.226087
| 0
| 0
| 0
| 0
| 0.528409
| 0.528409
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
487dc08083662feed241652a89edf45a6a948472
| 342
|
py
|
Python
|
stubs/integration_test/request_sources.py
|
ishaanthakur/pyre-check
|
b0f12f8a3e6a817a81d87feae301c96d57d167b9
|
[
"MIT"
] | null | null | null |
stubs/integration_test/request_sources.py
|
ishaanthakur/pyre-check
|
b0f12f8a3e6a817a81d87feae301c96d57d167b9
|
[
"MIT"
] | null | null | null |
stubs/integration_test/request_sources.py
|
ishaanthakur/pyre-check
|
b0f12f8a3e6a817a81d87feae301c96d57d167b9
|
[
"MIT"
] | null | null | null |
# @nolint
from django.http import HttpRequest, HttpResponse
# Integration test illustrating flows from request sources.
def test_index(request: HttpRequest):
eval(request.GET["bad"])
def test_get(request: HttpRequest):
eval(request.GET.get("bad"))
def test_getlist(request: HttpRequest):
eval(request.GET.getlist("bad"))
| 18
| 59
| 0.739766
| 43
| 342
| 5.813953
| 0.44186
| 0.084
| 0.264
| 0.348
| 0.384
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.140351
| 342
| 18
| 60
| 19
| 0.85034
| 0.190058
| 0
| 0
| 0
| 0
| 0.032847
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.428571
| false
| 0
| 0.142857
| 0
| 0.571429
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
6f9506757a180ce9be6bc013d1d166900d4b145c
| 51
|
py
|
Python
|
enthought/traits/ui/table_filter.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 3
|
2016-12-09T06:05:18.000Z
|
2018-03-01T13:00:29.000Z
|
enthought/traits/ui/table_filter.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 1
|
2020-12-02T00:51:32.000Z
|
2020-12-02T08:48:55.000Z
|
enthought/traits/ui/table_filter.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | null | null | null |
# proxy module
from traitsui.table_filter import *
| 17
| 35
| 0.803922
| 7
| 51
| 5.714286
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137255
| 51
| 2
| 36
| 25.5
| 0.909091
| 0.235294
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
82fc70534b8d672c104e4e974680ae72482aee7d
| 5,580
|
py
|
Python
|
python_modules/dagster/dagster_tests/api_tests/test_api_execute_run.py
|
mpkocher/dagster
|
c25c07de0e9259b08d6227f82d7aaa24f23bee85
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagster/dagster_tests/api_tests/test_api_execute_run.py
|
mpkocher/dagster
|
c25c07de0e9259b08d6227f82d7aaa24f23bee85
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagster/dagster_tests/api_tests/test_api_execute_run.py
|
mpkocher/dagster
|
c25c07de0e9259b08d6227f82d7aaa24f23bee85
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from dagster import seven
from dagster.api.execute_run import cli_api_execute_run, sync_execute_run_grpc
from dagster.core.instance import DagsterInstance
from dagster.grpc.server import GrpcServerProcess
from dagster.grpc.types import LoadableTargetOrigin
from dagster.serdes.ipc import ipc_read_event_stream
from dagster.utils import safe_tempfile_path
from .utils import (
get_foo_grpc_pipeline_handle,
get_foo_pipeline_handle,
legacy_get_foo_pipeline_handle,
)
@pytest.mark.parametrize(
"pipeline_handle", [get_foo_pipeline_handle(), legacy_get_foo_pipeline_handle()],
)
def test_execute_run_api(pipeline_handle):
with seven.TemporaryDirectory() as temp_dir:
instance = DagsterInstance.local_temp(temp_dir)
pipeline_run = instance.create_run(
pipeline_name='foo',
run_id=None,
run_config={},
mode='default',
solids_to_execute=None,
step_keys_to_execute=None,
status=None,
tags=None,
root_run_id=None,
parent_run_id=None,
pipeline_snapshot=None,
execution_plan_snapshot=None,
parent_pipeline_snapshot=None,
)
with safe_tempfile_path() as output_file_path:
process = cli_api_execute_run(
output_file=output_file_path,
instance=instance,
pipeline_origin=pipeline_handle.get_origin(),
pipeline_run=pipeline_run,
)
_stdout, _stderr = process.communicate()
events = [event for event in ipc_read_event_stream(output_file_path)]
assert len(events) == 12
assert [
event.event_type_value
for event in events
if hasattr(event, 'event_type_value') # ExecuteRunArgsLoadComplete is synthetic
] == [
'PIPELINE_START',
'ENGINE_EVENT',
'STEP_START',
'STEP_OUTPUT',
'STEP_SUCCESS',
'STEP_START',
'STEP_INPUT',
'STEP_OUTPUT',
'STEP_SUCCESS',
'ENGINE_EVENT',
'PIPELINE_SUCCESS',
]
@pytest.mark.parametrize(
"pipeline_handle", [get_foo_grpc_pipeline_handle()],
)
def test_execute_run_api_grpc_server_handle(pipeline_handle):
with seven.TemporaryDirectory() as temp_dir:
instance = DagsterInstance.local_temp(temp_dir)
pipeline_run = instance.create_run(
pipeline_name='foo',
run_id=None,
run_config={},
mode='default',
solids_to_execute=None,
step_keys_to_execute=None,
status=None,
tags=None,
root_run_id=None,
parent_run_id=None,
pipeline_snapshot=None,
execution_plan_snapshot=None,
parent_pipeline_snapshot=None,
)
events = [
event
for event in sync_execute_run_grpc(
api_client=pipeline_handle.repository_handle.repository_location_handle.client,
instance_ref=instance.get_ref(),
pipeline_origin=pipeline_handle.get_origin(),
pipeline_run=pipeline_run,
)
]
assert len(events) == 14
assert [event.event_type_value for event in events] == [
'ENGINE_EVENT',
'ENGINE_EVENT',
'PIPELINE_START',
'ENGINE_EVENT',
'STEP_START',
'STEP_OUTPUT',
'STEP_SUCCESS',
'STEP_START',
'STEP_INPUT',
'STEP_OUTPUT',
'STEP_SUCCESS',
'ENGINE_EVENT',
'PIPELINE_SUCCESS',
'ENGINE_EVENT',
]
@pytest.mark.parametrize(
"pipeline_handle", [get_foo_pipeline_handle()],
)
def test_execute_run_api_grpc_python_handle(pipeline_handle):
with seven.TemporaryDirectory() as temp_dir:
instance = DagsterInstance.local_temp(temp_dir)
pipeline_run = instance.create_run(
pipeline_name='foo',
run_id=None,
run_config={},
mode='default',
solids_to_execute=None,
step_keys_to_execute=None,
status=None,
tags=None,
root_run_id=None,
parent_run_id=None,
pipeline_snapshot=None,
execution_plan_snapshot=None,
parent_pipeline_snapshot=None,
)
loadable_target_origin = LoadableTargetOrigin.from_python_origin(
pipeline_handle.get_origin().repository_origin
)
with GrpcServerProcess(loadable_target_origin, max_workers=2) as server_process:
api_client = server_process.create_ephemeral_client()
events = [
event
for event in sync_execute_run_grpc(
api_client=api_client,
instance_ref=instance.get_ref(),
pipeline_origin=pipeline_handle.get_origin(),
pipeline_run=pipeline_run,
)
]
assert len(events) == 14
assert [event.event_type_value for event in events] == [
'ENGINE_EVENT',
'ENGINE_EVENT',
'PIPELINE_START',
'ENGINE_EVENT',
'STEP_START',
'STEP_OUTPUT',
'STEP_SUCCESS',
'STEP_START',
'STEP_INPUT',
'STEP_OUTPUT',
'STEP_SUCCESS',
'ENGINE_EVENT',
'PIPELINE_SUCCESS',
'ENGINE_EVENT',
]
| 31.525424
| 95
| 0.594265
| 573
| 5,580
| 5.376963
| 0.167539
| 0.081792
| 0.02629
| 0.040896
| 0.734826
| 0.714054
| 0.714054
| 0.700747
| 0.687115
| 0.636157
| 0
| 0.001863
| 0.326523
| 5,580
| 176
| 96
| 31.704545
| 0.817988
| 0.006989
| 0
| 0.675
| 0
| 0
| 0.099838
| 0
| 0
| 0
| 0
| 0
| 0.0375
| 1
| 0.01875
| false
| 0
| 0.05625
| 0
| 0.075
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
82fcc19d0d503b70686efd74b56e941367a19bff
| 590
|
py
|
Python
|
base_development_files/vglConst.py
|
arturxz/TCC
|
441f5e1f842abb67743bf57bd7346b6cd3353091
|
[
"MIT"
] | 2
|
2019-06-02T17:09:17.000Z
|
2021-02-17T19:57:37.000Z
|
base_development_files/vglConst.py
|
arturxz/TCC
|
441f5e1f842abb67743bf57bd7346b6cd3353091
|
[
"MIT"
] | null | null | null |
base_development_files/vglConst.py
|
arturxz/TCC
|
441f5e1f842abb67743bf57bd7346b6cd3353091
|
[
"MIT"
] | null | null | null |
"""
AS PYTHON DOESN'T HAVE CONSTANT DECLARATION, THE NEXT METHODS
RETURN THE VALUES WHO NEED CONSTANT BEHAVIOR.
"""
def VGL_SHAPE_NCHANNELS():
return 0
def VGL_SHAPE_WIDTH():
return 1
def VGL_SHAPE_HEIGHT():
return 2
def VGL_SHAPE_LENGTH():
return 3
def VGL_MAX_DIM():
return 10
def VGL_ARR_SHAPE_SIZE():
return VGL_MAX_DIM()+1
def VGL_ARR_CLSTREL_SIZE():
return 256
def VGL_STREL_CUBE():
return 1
def VGL_STREL_CROSS():
return 2
def VGL_STREL_GAUSS():
return 3
def VGL_STREL_MEAN():
return 4
def VGL_IMAGE_3D_IMAGE():
return 0
def VGL_IMAGE_2D_IMAGE():
return 1
| 13.72093
| 62
| 0.749153
| 102
| 590
| 4.019608
| 0.411765
| 0.190244
| 0.107317
| 0.063415
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.036511
| 0.164407
| 590
| 43
| 63
| 13.72093
| 0.795132
| 0.181356
| 0
| 0.346154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.5
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
d20cee2dce4ae1288ff2eba8b57ebc8b16a3aad7
| 238
|
py
|
Python
|
tests/gamestonk_terminal/stocks/behavioural_analysis/test_ba_api.py
|
minhhoang1023/GamestonkTerminal
|
195dc19b491052df080178c0cc6a9d535a91a704
|
[
"MIT"
] | 1
|
2022-03-19T23:53:38.000Z
|
2022-03-19T23:53:38.000Z
|
tests/gamestonk_terminal/stocks/behavioural_analysis/test_ba_api.py
|
minhhoang1023/GamestonkTerminal
|
195dc19b491052df080178c0cc6a9d535a91a704
|
[
"MIT"
] | null | null | null |
tests/gamestonk_terminal/stocks/behavioural_analysis/test_ba_api.py
|
minhhoang1023/GamestonkTerminal
|
195dc19b491052df080178c0cc6a9d535a91a704
|
[
"MIT"
] | null | null | null |
# IMPORTATION STANDARD
from types import ModuleType
# IMPORTATION THIRDPARTY
# IMPORTATION INTERNAL
from gamestonk_terminal.stocks.behavioural_analysis import ba_api
def test_module_loaded():
assert isinstance(ba_api, ModuleType)
| 19.833333
| 65
| 0.827731
| 28
| 238
| 6.821429
| 0.75
| 0.052356
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12605
| 238
| 11
| 66
| 21.636364
| 0.918269
| 0.268908
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.25
| true
| 0
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
d263ad9abbc281409a8449d220b21eea24f1b83f
| 204
|
py
|
Python
|
secret_keys.py
|
Three-Dev-Musketeers/Mumbai_Police
|
293fbc1d81db459c23649c9c6b1eef8c38da1939
|
[
"MIT"
] | 1
|
2021-03-31T02:02:35.000Z
|
2021-03-31T02:02:35.000Z
|
secret_keys.py
|
Divesh2201/Mumbai_Police
|
dfabf8494de2c790178541ee20d37d3002ca12af
|
[
"MIT"
] | null | null | null |
secret_keys.py
|
Divesh2201/Mumbai_Police
|
dfabf8494de2c790178541ee20d37d3002ca12af
|
[
"MIT"
] | null | null | null |
EMAIL_HOST_USER = 'mumbaipolice366@gmail.com'
EMAIL_HOST_PASSWORD = 'diveshkushmarmik'
reCAPTCHA_SITE_KEY = '6LePHmQaAAAAAJMMPEWnSEDakG2lep5xJHLoroZk'
CLOUDINARY_API_SECRET = '0ts3tZxve9J573VJoJV2gQ7wqtA'
| 51
| 63
| 0.877451
| 18
| 204
| 9.5
| 0.888889
| 0.105263
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.072539
| 0.053922
| 204
| 4
| 64
| 51
| 0.813472
| 0
| 0
| 0
| 0
| 0
| 0.526829
| 0.44878
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.25
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 4
|
d2773a09a4e8b40ecb8745f7807535bb7a65a92e
| 91,130
|
py
|
Python
|
neural_analysis.py
|
jagrayson/Neuro
|
7e14ffbbc347f071fce1ff6865cf3fb336f944bd
|
[
"Apache-2.0"
] | 1
|
2021-11-14T15:47:05.000Z
|
2021-11-14T15:47:05.000Z
|
neural_analysis.py
|
jagrayson/Neuro
|
7e14ffbbc347f071fce1ff6865cf3fb336f944bd
|
[
"Apache-2.0"
] | null | null | null |
neural_analysis.py
|
jagrayson/Neuro
|
7e14ffbbc347f071fce1ff6865cf3fb336f944bd
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import pickle
import contrib_to_behavior
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from sklearn import svm
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
plt.rcParams["font.family"] = "arial"
class neural_analysis:
def __init__(self, model_filename, ABBA=False, old_format = False):
x = pickle.load(open(model_filename, 'rb'))
self.ABBA = ABBA
# reshape STP depression
self.syn_x = np.stack(x['syn_x'],axis=2)
self.syn_x = np.stack(self.syn_x,axis=1)
if self.syn_x.shape[0] == 0:
self.syn_x = None
else:
num_neurons, trial_length, num_blocks, trials_per_block = self.syn_x.shape
self.syn_x = np.reshape(self.syn_x,(num_neurons,trial_length,num_blocks*trials_per_block))
# reshape STP facilitation
self.syn_u = np.stack(x['syn_u'],axis=2)
self.syn_u = np.stack(self.syn_u,axis=1)
if self.syn_u.shape[0] == 0:
self.syn_u = None
else:
num_neurons, trial_length, num_blocks, trials_per_block = self.syn_u.shape
self.syn_u = np.reshape(self.syn_u,(num_neurons,trial_length,num_blocks*trials_per_block))
# reshape RNN outputs
self.rnn_outputs = np.stack(x['hidden_state'],axis=2)
self.rnn_outputs = np.stack(self.rnn_outputs,axis=1)
num_neurons, trial_length, num_blocks, trials_per_block = self.rnn_outputs.shape
self.rnn_outputs = np.reshape(self.rnn_outputs,(num_neurons,trial_length,num_blocks*trials_per_block))
# reshape desired outputs
self.desired_outputs = x['desired_output']
if old_format:
self.desired_outputs = np.transpose(self.desired_outputs,(2,0,1))
# reshape train_mask
self.train_mask = x['train_mask']
self.train_mask = np.transpose(self.train_mask,(0,1))
# reshape RNN inputs
self.rnn_inputs = x['rnn_input']
self.rnn_inputs = np.transpose(self.rnn_inputs,(2,0,1))
# reshape model outputs
self.model_outputs = np.stack(x['model_outputs'],axis=2)
self.model_outputs = np.stack(self.model_outputs,axis=1)
num_classes = self.model_outputs.shape[0]
self.model_outputs = np.reshape(self.model_outputs,(num_classes,trial_length,num_blocks*trials_per_block))
"""
rnn_inputs, desired_outputs, rnn_outputs, model_outputs
should be of shape neurons X time X trials
print(self.rnn_inputs.shape, self.desired_outputs.shape,self.rnn_outputs.shape,self.model_outputs.shape, self.train_mask.shape)
"""
# reshape trial_conds
self.sample_dir = x['sample_dir']
self.test_dir = x['test_dir']
self.match = x['match']
self.rule = x['rule']
self.catch = x['catch']
self.probe = x['probe']
# for the ABBA trials
if self.ABBA:
self.num_test_stim = x['num_test_stim']
self.repeat_test_stim = x['repeat_test_stim']
self.ABBA_delay = x['params']['ABBA_delay']
# other info
#self.EI_list = x['params']['EI_list']
self.num_rules = len(x['params']['possible_rules'])
self.possible_rules = x['params']['possible_rules']
self.num_motion_dirs = x['params']['num_motion_dirs']
self.U = x['U']
self.W_rnn = x['w_rnn']
self.b_rnn = x['b_rnn']
self.W_in = x['w_in']
self.EI_list = x['params']['EI_list']
self.dead_time = x['params']['dead_time']
self.fix_time = x['params']['fix_time']
self.delta_t = x['params']['dt']
if self.ABBA:
self.max_num_tests = x['params']['max_num_tests']
self.ABBA_accuracy_match, self.ABBA_accuracy_non_match = self.performance_ABBA()
else:
pass
#accuracy = self.performance()
#print(accuracy)
def calc_native_tuning(self):
rule = 0
sample_rng = range(8+20,8+20+20)
#sample_rng = range(88,108)
num_dirs = self.num_motion_dirs
num_input_neurons, trial_length, num_trials = self.rnn_inputs.shape
mean_input_resp = np.zeros((num_input_neurons, num_dirs))
num_rnn_neurons = self.rnn_outputs.shape[0]
native_tuning = np.zeros((num_rnn_neurons, num_dirs))
for d in range(num_dirs):
ind = np.where((self.rule == self.possible_rules[rule])*(self.sample_dir==d))
#ind = np.where((self.rule == self.possible_rules[rule])*(self.test_dir==d))
s = np.mean(self.rnn_inputs[:,:,ind[0]],axis=2)
mean_input_resp[:,d] = np.mean(s[:,sample_rng],axis=1)
native_tuning = np.dot(self.W_in, mean_input_resp)
return native_tuning
def motion_tuning(self):
num_neurons, trial_length, num_trials = self.rnn_outputs.shape
sample_pd = np.zeros((num_neurons, trial_length))
sample_pev = np.zeros((num_neurons, trial_length))
sample_amp = np.zeros((num_neurons, trial_length))
test_pd = np.zeros((num_neurons, 2, trial_length))
test_pev = np.zeros((num_neurons, 2, trial_length))
test_amp = np.zeros((num_neurons, 2, trial_length))
sample_dir = np.ones((num_trials, 3))
sample_dir[:,1] = np.cos(2*np.pi*self.sample_dir/self.num_motion_dirs)
sample_dir[:,2] = np.sin(2*np.pi*self.sample_dir/self.num_motion_dirs)
test_dir = np.ones((num_trials, 3))
test_dir[:,1] = np.cos(2*np.pi*self.test_dir/self.num_motion_dirs)
test_dir[:,2] = np.sin(2*np.pi*self.test_dir/self.num_motion_dirs)
for n in range(num_neurons):
for t in range(trial_length):
h = np.linalg.lstsq(sample_dir, self.rnn_outputs[n,t,:])
pred_err = self.rnn_outputs[n,t,:] - np.dot(h[0], sample_dir.T)
mse = np.mean(pred_err**2)
response_var = np.var(self.rnn_outputs[n,t,:])
sample_pev[n,t] = 1 - (mse)/(response_var+1e-9)
sample_pd[n,t] = np.arctan2(h[0][2],h[0][1])
sample_amp[n,t] = np.sqrt(h[0][0]**2+h[0][1]**2)
for m in range(2):
ind = np.where(self.match==m)[0]
h = np.linalg.lstsq(test_dir[ind], self.rnn_outputs[n,t,ind])
pred_err = self.rnn_outputs[n,t,ind] - np.dot(h[0], test_dir[ind].T)
mse = np.mean(pred_err**2)
response_var = np.var(self.rnn_outputs[n,t,ind])
test_pev[n,m,t] = 1 - (mse)/(response_var+1e-9)
test_pd[n,m,t] = np.arctan2(h[0][2],h[0][1])
test_amp[n,m,t] = np.sqrt(h[0][0]**2+h[0][1]**2)
return sample_pd, sample_pev, sample_amp, test_pd, test_pev, test_amp
def recreate_effective_weight_matrix(self, EI = False):
rule = 0
num_neurons, trial_length, num_trials = self.syn_u.shape
W = np.zeros((num_neurons,num_neurons,self.num_motion_dirs, trial_length))
mean_efficacy = np.zeros((num_neurons,self.num_motion_dirs, trial_length))
for d in range(self.num_motion_dirs):
ind = np.where((self.rule == self.possible_rules[rule])*(self.sample_dir==d)*(self.match==1))[0]
mean_efficacy[:,d,:] = np.mean(self.syn_u[:,:,ind]*self.syn_x[:,:,ind],axis=2)
if EI:
ei_diag = np.diag(self.EI_list)
W_rnn = np.dot(np.maximum(0,self.W_rnn), ei_diag)
else:
W_rnn = self.W_rnn
for n1 in range(num_neurons):
for n2 in range(num_neurons):
for d in range(self.num_motion_dirs):
W[n1,n2,d,:] = mean_efficacy[n2,d,:]*W_rnn[n1,n2]
return W
def recreate_output_current(self, EI = False):
rule = 0
num_neurons, trial_length, num_trials = self.syn_u.shape
out_current = np.zeros((num_neurons,self.num_motion_dirs, self.num_motion_dirs, trial_length))
out_current = np.zeros((num_neurons,self.num_motion_dirs, self.num_motion_dirs, trial_length))
for s in range(self.num_motion_dirs):
for t in range(self.num_motion_dirs):
ind = np.where((self.rule == self.possible_rules[rule])*(self.sample_dir==s)*(self.test_dir==t))[0]
out_current[:,s,t,:] = np.mean(self.syn_u[:,:,ind]*self.syn_x[:,:,ind]*self.rnn_outputs[:,:,ind],axis=2)
"""
if EI:
ei_diag = np.diag(self.EI_list)
W_rnn = np.dot(np.maximum(0,self.W_rnn), ei_diag)
else:
W_rnn = self.W_rnn
for n1 in range(num_neurons):
for n2 in range(num_neurons):
for s in range(self.num_motion_dirs):
for t in range(self.num_motion_dirs):
out_current[n1,n2,s,t,:] = post_syn[n2,s,t,:]*W_rnn[n1,n2]
"""
return out_current
def performance(self):
n = 18 # number of time steps to measure during test, this will be the basis of performance
time_correct = np.zeros((self.num_rules, self.num_motion_dirs, 2))
count = np.zeros((self.num_rules, self.num_motion_dirs, 2))
for i in range(len(self.sample_dir)):
if self.catch[i]==0:
s = np.int_(self.sample_dir[i])
m = np.int_(self.match[i])
r = np.int_(np.where(self.rule[i]==self.possible_rules)[0])
count[r,s,m] +=1
if m==1:
score=np.mean((self.model_outputs[2,-n:,i]>self.model_outputs[1,-n:,i])*(self.model_outputs[2,-n:,i]>self.model_outputs[0,-n:,i]))
else:
score=np.mean((self.model_outputs[1,-n:,i]>self.model_outputs[2,-n:,i])*(self.model_outputs[1,-n:,i]>self.model_outputs[0,-n:,i]))
time_correct[r,s,m] += score
return time_correct/count
def performance_ABBA(self):
ABBA_delay = self.ABBA_delay//self.delta_t
eof = (self.dead_time+self.fix_time)//self.delta_t
eos = eof + ABBA_delay
# performance is measured with and without a repeated distractor
time_correct_match = np.zeros((self.max_num_tests))
time_correct_non_match = np.zeros((self.max_num_tests))
time_match = np.zeros((self.max_num_tests))
time_non_match = np.zeros((self.max_num_tests))
for i in range(len(self.sample_dir)):
for j in range(self.num_test_stim[i]):
# will discard the first time point of each test stim
test_rng = range(1+eos+(2*j+1)*ABBA_delay, eos+(2*j+2)*ABBA_delay)
matching_stim = self.match[i]==1 and j==self.num_test_stim[i]-1
if matching_stim:
time_match[j] += ABBA_delay-1 # -1 because we're discarding the first time point of each test stim
time_correct_match[j] += np.sum((self.model_outputs[2,test_rng,i]>self.model_outputs[1,test_rng,i])*(self.model_outputs[2,test_rng,i]>self.model_outputs[0,test_rng,i]))
else:
time_non_match[j] += ABBA_delay-1
time_correct_non_match[j] += np.sum((self.model_outputs[1,test_rng,i]>self.model_outputs[2,test_rng,i])*(self.model_outputs[1,test_rng,i]>self.model_outputs[0,test_rng,i]))
auccracy_match = time_correct_match/time_match
auccracy_non_match = time_correct_non_match/time_non_match
print('Accuracy')
print(time_correct_non_match, time_non_match)
print(time_correct_match, time_match)
return auccracy_match, auccracy_non_match
def show_results(self):
print(self.results)
def plot_example_neurons(self, example_numbers):
mean_resp = calc_mean_responses(self)
1/0
f = plt.figure(figsize=(12,8))
ax = f.add_subplot(1, 3, 1)
ax.imshow(trial_info['sample_direction'],interpolation='none',aspect='auto')
ax = f.add_subplot(1, 3, 2)
ax.imshow(trial_info['test_direction'],interpolation='none',aspect='auto')
ax = f.add_subplot(1, 3, 3)
ax.imshow(trial_info['match'],interpolation='none',aspect='auto')
plt.show()
1/0
def calculate_svms(self, num_reps = 3, DMC = [False], decode_test = False):
lin_clf = svm.SVC(C=1,kernel='linear',decision_function_shape='ovr', shrinking=False, tol=1e-4)
num_neurons, trial_length, num_trials = self.rnn_outputs.shape
spike_decoding = np.zeros((trial_length,self.num_rules,num_reps))
synapse_decoding = np.zeros((trial_length,self.num_rules,num_reps))
spike_decoding_test = np.zeros((trial_length,self.num_rules,num_reps))
synapse_decoding_test = np.zeros((trial_length,self.num_rules,num_reps))
N = self.num_motion_dirs
sample_cat = np.floor(self.sample_dir/(self.num_motion_dirs/2)*np.ones_like(self.sample_dir))
if self.ABBA:
test_dir = self.test_dir[:,0]
else:
test_dir = self.test_dir
test_cat = np.floor(test_dir/(self.num_motion_dirs/2)*np.ones_like(test_dir))
for r in range(self.num_rules):
if self.ABBA:
ind = np.where((self.num_test_stim>=4))[0]
else:
ind = np.where((self.rule==self.possible_rules[r]))[0]
for t in range(trial_length):
if DMC[r]:
spike_decoding[t,r,:] = self.calc_svm_equal_trials(lin_clf,self.rnn_outputs[:,t,ind].T, sample_cat[ind],num_reps,2)
if decode_test:
spike_decoding_test[t,r,:] = self.calc_svm_equal_trials(lin_clf,self.rnn_outputs[:,t,ind].T, test_cat[ind],num_reps,2)
else:
spike_decoding[t,r,:] = self.calc_svm_equal_trials(lin_clf,self.rnn_outputs[:,t,ind].T, self.sample_dir[ind],num_reps,N)
if decode_test:
spike_decoding_test[t,r,:] = self.calc_svm_equal_trials(lin_clf,self.rnn_outputs[:,t,ind].T, test_dir[ind],num_reps,N)
if self.syn_x is not None:
effective_current = self.syn_x[:,t,ind].T*self.syn_u[:,t,ind].T
if DMC[r]:
synapse_decoding[t,r,:] = self.calc_svm_equal_trials(lin_clf,effective_current, sample_cat[ind],num_reps,2)
if decode_test:
synapse_decoding_test[t,r,:] = self.calc_svm_equal_trials(lin_clf,effective_current, test_cat[ind],num_reps,2)
else:
synapse_decoding[t,r,:] = self.calc_svm_equal_trials(lin_clf,effective_current, self.sample_dir[ind],num_reps,N)
if decode_test:
synapse_decoding_test[t,r,:] = self.calc_svm_equal_trials(lin_clf,effective_current, test_dir[ind],num_reps,N)
return spike_decoding, synapse_decoding, spike_decoding_test, synapse_decoding_test
def calculate_autocorr(self, time_start, time_end):
num_neurons, trial_length, num_trials = self.rnn_outputs.shape
num_lags = time_end-time_start
spike_autocorr = np.zeros((num_neurons, num_lags))
syn_x_autocorr = np.zeros((num_neurons, num_lags))
syn_adapt_autocorr = np.zeros((num_neurons, num_lags))
for n in range(num_neurons):
count = np.zeros((num_lags))
for i in range(time_start, time_end):
for j in range(time_start, time_end):
lag = np.abs(i-j)
for s in range(4):
ind = np.where(self.sample_dir==s)
ind = np.where(self.match==1)
ind = ind[0]
count[lag] += 1
r1 = np.corrcoef(self.rnn_outputs[n,i,ind], self.rnn_outputs[n,j,ind])
spike_autocorr[n, lag] += r1[0,1]
if self.syn_x is not None:
r1 = np.corrcoef(self.syn_x[n,i,ind], self.syn_x[n,j,ind])
syn_x_autocorr[n, lag] += r1[0,1]
if self.sa is not None:
r1 = np.corrcoef(self.sa[n,i,ind], self.sa[n,j,ind])
syn_adapt_autocorr[n, lag] += r1[0,1]
spike_autocorr[n,:] /= count
syn_x_autocorr[n,:] /= count
syn_adapt_autocorr[n,:] /= count
return spike_autocorr,syn_x_autocorr,syn_adapt_autocorr
def calc_mean_responses(self):
num_rules = self.num_rules
num_dirs = self.num_motion_dirs
num_neurons, trial_length, num_trials = self.rnn_outputs.shape
num_classes = self.model_outputs.shape[0]
mean_resp = np.zeros((num_neurons, num_rules, num_dirs, trial_length))
mean_out_match = np.zeros((num_classes, num_rules, trial_length))
mean_out_non_match = np.zeros((num_classes, num_rules, trial_length))
for n in range(num_neurons):
for r in range(num_rules):
for d in range(num_dirs):
if self.ABBA:
ind = np.where((self.num_test_stim>=4)*(self.sample_dir==d))[0]
else:
ind = np.where((self.rule == self.possible_rules[r])*(self.sample_dir==d))[0]
mean_resp[n,r,d,:] = np.mean(self.rnn_outputs[n,:,ind],axis=0)
for n in range(num_classes):
for r in range(num_rules):
ind_match = np.where((self.rule == self.possible_rules[r])*(self.match==1)*(self.catch==0))
ind_non_match = np.where((self.rule == self.possible_rules[r])*(self.match==0)*(self.catch==0))
mean_out_match[n,r,:] = np.mean(self.model_outputs[n,:,ind_match[0]],axis=0)
mean_out_non_match[n,r,:] = np.mean(self.model_outputs[n,:,ind_non_match[0]],axis=0)
return mean_resp, mean_out_match, mean_out_non_match
def decoding_accuracy_postle(self, num_reps = 10):
lin_clf = svm.SVC(C=1,kernel='linear',decision_function_shape='ovr', shrinking=False, tol=1e-5)
num_neurons, trial_length, num_trials = self.rnn_outputs.shape
sample_pev = np.zeros((num_neurons, 2,2,2,2,trial_length))
sample_stp_pev = np.zeros((num_neurons, 2,2,2,2,trial_length))
sample_decoding = np.zeros((2,2,2,2,trial_length,num_reps))
sample_stp_decoding = np.zeros((2,2,2,2,trial_length,num_reps))
model_output = np.zeros((2,2,3,trial_length))
# r1 and r2 refer to the first and second rule (attention) cue
# m refers to the modality
# p refers to the presence or absence of a probe
for m1 in range(2):
for m2 in range(2):
ind = np.where((self.match[:,0] == m1)*(self.match[:,1] == m2)*(self.probe[:,1]==0))[0]
model_output[m1,m2,:,:] = np.mean(self.model_outputs[:,:,ind],axis=2)
for r1 in range(2):
for r2 in range(2):
for p in range(2):
ind = np.where((self.rule[:,0] == r1)*(self.rule[:,1] == r2)*(self.probe[:,1]==p))[0]
#ind = np.where((self.rule[:,0] == r1)*(self.rule[:,1] == r2)*(self.probe[:,1]>=0))[0]
for m in range(2):
for t in range(trial_length):
for n in range(num_neurons):
sample_pev[n,r1,r2,p,m,t] = self.calc_pev(self.rnn_outputs[n,t,ind], self.sample_dir[ind,m])
sample_decoding[r1,r2,p,m,t,:] = self.calc_svm_equal_trials(lin_clf,self.rnn_outputs[:,t,ind].T, self.sample_dir[ind,m],num_reps, self.num_motion_dirs)
if self.syn_x is not None:
for n in range(num_neurons):
effective_current = self.syn_x[n,t,ind]*self.syn_u[n,t,ind]
sample_stp_pev[n,r1,r2,p,m,t] = self.calc_pev(effective_current, self.sample_dir[ind,m])
effective_current = self.syn_x[:,t,ind]*self.syn_u[:,t,ind]
sample_stp_decoding[r1,r2,p,m,t,:] = self.calc_svm_equal_trials(lin_clf,effective_current.T, self.sample_dir[ind,m],num_reps, self.num_motion_dirs)
return sample_pev, sample_stp_pev, sample_decoding, sample_stp_decoding, model_output
@staticmethod
def calc_svm_equal_trials(lin_clf, y, conds, num_reps, num_conds):
# normalize values between 0 and 1
for i in range(y.shape[1]):
m1 = y[:,i].min()
m2 = y[:,i].max()
y[:,i] -= m1
if m2>m1:
y[:,i] /=(m2-m1)
"""
Want to ensure that all conditions have the same number of trials
Will find the min number of trials per conditions, and remove trials above the min number
"""
num_trials = np.zeros((num_conds))
for i in range(num_conds):
num_trials[i] = np.sum(conds==i)
min_num_trials = int(np.min(num_trials))
conds_equal = np.zeros((min_num_trials*num_conds))
y_equal = np.zeros((min_num_trials*num_conds, y.shape[1]))
for i in range(num_conds):
ind = np.where(conds==i)[0]
ind = ind[:min_num_trials]
conds_equal[i*min_num_trials:(i+1)*min_num_trials] = i
y_equal[i*min_num_trials:(i+1)*min_num_trials, :] = y[ind,:]
train_pct = 0.75
score = np.zeros((num_reps))
for r in range(num_reps):
q = np.random.permutation(len(conds_equal))
i = np.int_(np.round(len(conds_equal)*train_pct))
train_ind = q[:i]
test_ind = q[i:]
lin_clf.fit(y_equal[train_ind,:], conds_equal[train_ind])
#dec = lin_clf.decision_function(y[test_ind,:])
dec = lin_clf.predict(y_equal[test_ind,:])
for i in range(len(test_ind)):
if conds_equal[test_ind[i]]==dec[i]:
score[r] += 1/len(test_ind)
return score
@staticmethod
def calc_svm(lin_clf, y, conds, num_reps):
num_conds = len(np.unique(conds))
y = np.squeeze(y).T
# normalize values between 0 and 1
for i in range(y.shape[1]):
m1 = y[:,i].min()
m2 = y[:,i].max()
y[:,i] -= m1
if m2>m1:
y[:,i] /=(m2-m1)
train_pct = 0.75
score = np.zeros((num_reps))
for r in range(num_reps):
q = np.random.permutation(len(conds))
i = np.int_(np.round(len(conds)*train_pct))
train_ind = q[:i]
test_ind = q[i:]
lin_clf.fit(y[train_ind,:], conds[train_ind])
dec = lin_clf.decision_function(y[test_ind,:])
if num_conds>2:
dec = np.argmax(dec, 1)
else:
dec = np.int_(np.sign(dec)*0.5+0.5)
for i in range(len(test_ind)):
if conds[test_ind[i]]==dec[i]:
score[r] += 1/len(test_ind)
return score
def calculate_pevs(self):
num_neurons, trial_length, num_trials = self.rnn_outputs.shape
sample_pev = np.zeros((num_neurons, self.num_rules,trial_length))
test_pev = np.zeros((num_neurons, self.num_rules,trial_length))
rule_pev = np.zeros((num_neurons,trial_length))
match_pev = np.zeros((num_neurons, self.num_rules,trial_length))
sample_stp_pev = np.zeros((num_neurons, self.num_rules,trial_length))
sample_cat_pev = np.zeros((num_neurons, self.num_rules,trial_length))
sample_cat_stp_pev = np.zeros((num_neurons, self.num_rules,trial_length))
test_stp_pev = np.zeros((num_neurons, self.num_rules,trial_length))
for r in range(self.num_rules):
if self.ABBA:
ind = np.where((self.num_test_stim>=4))[0]
else:
ind = np.where((self.rule == self.possible_rules[r]))[0]
ind_test = np.where((self.rule == self.possible_rules[r])*(self.match == 0))[0]
for n in range(num_neurons):
for t in range(trial_length):
sample_pev[n,r,t] = self.calc_pev(self.rnn_outputs[n,t,ind], self.sample_dir[ind])
sample_cat_pev[n,r,t] = self.calc_pev(self.rnn_outputs[n,t,ind], np.floor(self.sample_dir[ind]/(self.num_motion_dirs/2)))
if not self.ABBA:
test_pev[n,r,t] = self.calc_pev(self.rnn_outputs[n,t,ind_test], self.test_dir[ind_test])
rule_pev[n,t] = self.calc_pev(self.rnn_outputs[n,t,:], self.rule)
match_pev[n,r,t] = self.calc_pev(self.rnn_outputs[n,t,ind], self.match[ind])
if self.syn_x is not None:
effective_current = self.syn_x[n,t,ind]*self.syn_u[n,t,ind]
sample_stp_pev[n,r,t] = self.calc_pev(effective_current, self.sample_dir[ind])
if not self.ABBA:
test_stp_pev[n,r,t] = self.calc_pev(effective_current, self.test_dir[ind_test])
sample_cat_stp_pev[n,r,t] = self.calc_pev(effective_current, np.floor(self.sample_dir[ind]/(self.num_motion_dirs/2)))
return sample_pev, test_pev, rule_pev, match_pev, sample_stp_pev, sample_cat_pev, sample_cat_stp_pev, test_stp_pev
@staticmethod
def calc_pev(x, conds):
unique_conds = np.unique(conds)
m = len(unique_conds)
lx = len(x)
xr = x - np.mean(x)
xm = np.zeros((1,m))
countx = np.zeros((1,m))
for (j,i) in enumerate(unique_conds):
ind = np.where(conds==i)
countx[0,j] = len(ind[0])
xm[0,j] = np.mean(xr[ind[0]])
gm = np.mean(xr)
df1 = np.sum(countx>0)-1
df2 = lx - df1 - 1
xc = xm - gm
ix = np.where(countx==0)
xc[ix] = 0
RSS = np.dot(countx, np.transpose(xc**2))
#TSS = (xr - gm)**2
TSS = np.dot(np.transpose(xr - gm),xr - gm)
#print(TSS.shape)
SSE = TSS - RSS
if df2 > 0:
mse = SSE/df2
else:
mse = np.NaN
F = (RSS/df1)/mse
"""
Table = np.zeros((3,5))
Table[:,0] = [RSS,SSE,TSS]
Table[:,1] = [df1,df2,df1+df2]
Table[:,2] = [RSS/df1,mse,999];
Table[:,3] = [F,999,999]
"""
SS_groups = RSS;
SS_total = TSS;
df_groups = df1;
MS_error = mse;
pev = (SS_groups-df_groups*MS_error)/(SS_total+MS_error)
if np.isnan(pev):
pev = 0
return pev
def plot_all_figures(self, rule,dt=25, STP=False, DMC = [False], f=None, start_sp=0, num_rows=3, tight=False, two_rules = False, decode_test = False):
font = {'family' : 'normal',
'weight' : 'bold',
'size' : 12}
mean_resp, mean_out_match, mean_out_non_match = self.calc_mean_responses()
spike_decode, synapse_decode, spike_decode_test, synapse_decode_test = self.calculate_svms(DMC=DMC,decode_test=decode_test)
sample_pev, test_pev, rule_pev, _, sample_stp_pev, sample_cat_pev, sample_cat_stp_pev, test_stp_pev = self.calculate_pevs()
if DMC[0]:
sample_pev = sample_cat_pev
sample_stp_pev = sample_cat_stp_pev
chance_level = 1/2
else:
chance_level = 1/8
if two_rules:
num_cols = 4
else:
num_cols = 3
# find good example neuron
mean_pev = np.mean(sample_pev[:, rule, 30:],axis=1)
ind = np.argsort(mean_pev)
example_neuron = ind[-1]
trial_length_steps = sample_pev.shape[2]
trial_length = np.int_(trial_length_steps*dt)
t = np.arange(0,trial_length,dt)
t -= 900 # assuming 400 ms dead time, 500 ms fixation
if self.ABBA:
t0,t1,t2,t3 = np.where(t==-500), np.where(t==0),np.where(t==500),np.where(t==1500)
else:
t0,t1,t2,t3 = np.where(t==-500), np.where(t==0),np.where(t==500),np.where(t==1500)
if f is None:
f = plt.figure(figsize=(8,2*num_rows))
ax = f.add_subplot(num_rows, num_cols, start_sp+1)
if self.ABBA:
# plot accuracy bar plot instead
x=np.array([0,1,2,3])
ax.bar(x+0.1, self.ABBA_accuracy_match,width=0.2,color='r',align='center')
ax.bar(x-0.1, self.ABBA_accuracy_non_match,width=0.2,color='b',align='center')
ax.set_title('Accuracy')
ax.set_ylabel('Fraction correct')
ax.set_xlabel('Num. of distractors')
else:
ax.hold(True)
if two_rules:
ax.plot(t, mean_out_match[0,0,:] ,'k',linewidth=2,label='Fixation')
ax.plot(t, mean_out_match[1,0,:] ,'m',linewidth=2,label='Non-match')
ax.plot(t, mean_out_match[2,0,:] ,'g',linewidth=2,label='Match')
ax.plot(t, mean_out_match[0,1,:] ,'k--',linewidth=2,label='Fixation')
ax.plot(t, mean_out_match[1,1,:] ,'m--',linewidth=2,label='Non-match')
ax.plot(t, mean_out_match[2,1,:] ,'g--',linewidth=2,label='Match')
else:
ax.plot(t, mean_out_match[0,rule,:] ,'k',linewidth=2,label='Fixation')
ax.plot(t, mean_out_match[1,rule,:] ,'m',linewidth=2,label='Non-match')
ax.plot(t, mean_out_match[2,rule,:] ,'g',linewidth=2,label='Match')
#plt.legend(loc=3)
self.add_subplot_fixings(ax)
ax.set_title('Network output - match trials')
if self.ABBA:
pass
else:
ax = f.add_subplot(num_rows, num_cols, start_sp+2)
ax.hold(True)
if two_rules:
ax.plot(t, mean_out_non_match[0,0,:] ,'k',linewidth=2,label='Fixation')
ax.plot(t, mean_out_non_match[1,0,:] ,'m',linewidth=2,label='Non-match')
ax.plot(t, mean_out_non_match[2,0,:] ,'g',linewidth=2,label='Match')
ax.plot(t, mean_out_non_match[0,1,:] ,'k--',linewidth=2,label='Fixation')
ax.plot(t, mean_out_non_match[1,1,:] ,'m--',linewidth=2,label='Non-match')
ax.plot(t, mean_out_non_match[2,1,:] ,'g--',linewidth=2,label='Match')
else:
ax.plot(t, mean_out_non_match[0,rule,:] ,'k',linewidth=2)
ax.plot(t, mean_out_non_match[1,rule,:] ,'m',linewidth=2)
ax.plot(t, mean_out_non_match[2,rule,:] ,'g',linewidth=2)
self.add_subplot_fixings(ax)
ax.set_title('Network output - non-match trials')
ax = f.add_subplot(num_rows, num_cols, start_sp+3)
ax.hold(True)
# if plotting the result of the delayed rule task, show rule PEV instead of example neuron
if two_rules:
max_val = np.max(rule_pev)
ax.plot(t,np.mean(rule_pev, axis=0), linewidth=2)
self.add_subplot_fixings(ax,chance_level=0,ylim=0.2)
ax.set_title('Rule selectivity')
ax.set_ylabel('Normalized PEV')
else:
"""
max_val = np.max(mean_resp[example_neuron,rule,:,:])
print(max_val)
for i in range(8):
ax.plot(t,mean_resp[example_neuron,rule,i,:],color=[1-i/7,0,i/7], linewidth=1)
self.add_subplot_fixings(ax,chance_level=0,ylim=max_val*1.05)
ax.set_title('Example neuron')
ax.set_ylabel('Activity (a.u.)')
# plot the mean population response from those neurons whose synapses are informative of sample
"""
#syn_pev = np.mean(sample_stp_pev[:,0,t2[0]:t3[0]], axis=1)
#ind_syn = np.where(syn_pev > 0.1)[0]
#print('Informative synapses ', ind_syn)
s = np.mean(mean_resp[:,rule,:,:],axis=0)
max_val = np.max(s)
for i in range(8):
ax.plot(t,s[i,:],color=[1-i/7,0,i/7], linewidth=1)
self.add_subplot_fixings(ax,chance_level=0,ylim=0.5)
ax.set_title('Mean response from synpases informative neurons')
ax.set_ylabel('Activity (a.u.)')
ax.set_ylim([0, 0.5])
if two_rules:
ax = f.add_subplot(num_rows, num_cols, start_sp+5)
im = ax.imshow(sample_pev[:,0,:],aspect='auto',interpolation=None)
f.colorbar(im,orientation='vertical')
ax.spines['right'].set_visible(False)
ax.set_ylabel('Neuron number')
ax.set_xlabel('Time relative to sample onset (ms)')
ax.spines['top'].set_visible(False)
ax.set_xticks([t0[0], t1[0], t2[0], t3[0]])
ax.set_xticklabels([-500,0,500,1500])
ax.set_title('Neuronal sample \nselectvity - DMS task')
ax = f.add_subplot(num_rows, num_cols, start_sp+6)
im = ax.imshow(sample_pev[:,1,:],aspect='auto',interpolation=None)
f.colorbar(im,orientation='vertical')
ax.spines['right'].set_visible(False)
ax.set_ylabel('Neuron number')
ax.set_xlabel('Time relative to sample onset (ms)')
ax.spines['top'].set_visible(False)
ax.set_xticks([t0[0], t1[0], t2[0], t3[0]])
ax.set_xticklabels([-500,0,500,1500])
ax.set_title('Neuronal sample \nselectvity - DMrS task')
else:
ax = f.add_subplot(num_rows, 3, start_sp+4)
im = ax.imshow(sample_pev[:,rule,:],aspect='auto',interpolation=None)
f.colorbar(im,orientation='vertical')
ax.spines['right'].set_visible(False)
ax.set_ylabel('Neuron number')
ax.set_xlabel('Time relative to sample onset (ms)')
ax.spines['top'].set_visible(False)
if DMC:
ax.set_title('Neuronal sample \ncategory selectvity')
else:
ax.set_title('Neuronal sample selectvity')
if self.ABBA:
ax.set_xticks([t0[0], t1[0], t2[0], t3[0]])
ax.set_xticklabels([-500,0,500,1500])
else:
ax.set_xticks([t0[0], t1[0], t2[0], t3[0]])
ax.set_xticklabels([-500,0,500,1500])
if two_rules:
ax = f.add_subplot(num_rows, num_cols, start_sp+7)
plt.hold(True)
u = np.mean(sample_pev[:,0,:],axis=0)
se = np.std(sample_pev[:,0,:],axis=0)/np.sqrt(sample_pev.shape[0])
ax.plot(t,u,'g')
sample_max1 = np.max(u)
ax.fill_between(t,u-se,u+se,facecolor=(0,1,0,0.5))
u = np.mean(sample_pev[:,1,:],axis=0)
se = np.std(sample_pev[:,1,:],axis=0)/np.sqrt(sample_pev.shape[0])
ax.plot(t,u,'m')
sample_max = np.max(u)
sample_max = np.max([sample_max, sample_max1])
ax.fill_between(t,u-se,u+se,facecolor=(1,0,1,0.5))
else:
ax = f.add_subplot(num_rows, num_cols, start_sp+5)
u = np.mean(sample_pev[:,rule,:],axis=0)
se = np.std(sample_pev[:,rule,:],axis=0)/np.sqrt(sample_pev.shape[0])
ax.plot(t,u,'k')
sample_max = np.max(u)
ax.fill_between(t,u-se,u+se,facecolor=(0,0,0,0.5))
self.add_subplot_fixings(ax,chance_level=0,ylim=sample_max*2)
if DMC:
ax.set_title('Neuronal sample \ncategory selectivity')
else:
ax.set_title('Neuronal sample selectivity')
ax.set_ylabel('Normalized PEV')
if two_rules:
ax = f.add_subplot(num_rows, num_cols, start_sp+8)
u = np.mean(spike_decode[:,0,:],axis=1)
se = np.std(spike_decode[:,0,:],axis=1)
ax.plot(t,u,'g')
ax.fill_between(t,u-se,u+se,facecolor=(0,1,0,0.5))
u = np.mean(spike_decode[:,1,:],axis=1)
se = np.std(spike_decode[:,1,:],axis=1)
ax.plot(t,u,'m')
ax.fill_between(t,u-se,u+se,facecolor=(1,0,1,0.5))
self.add_subplot_fixings(ax, chance_level=chance_level)
else:
ax = f.add_subplot(num_rows, num_cols, start_sp+6)
u = np.mean(spike_decode[:,rule,:],axis=1)
se = np.std(spike_decode[:,rule,:],axis=1)
ax.plot(t,u,'k')
ax.fill_between(t,u-se,u+se,facecolor=(0,0,0,0.5))
u = np.mean(spike_decode_test[:,rule,:],axis=1)
se = np.std(spike_decode_test[:,rule,:],axis=1)
ax.plot(t,u,'c')
ax.fill_between(t,u-se,u+se,facecolor=(0,1,1,0.5))
self.add_subplot_fixings(ax, chance_level=chance_level)
if DMC:
ax.set_title('Neuronal sample \ncategory decoding')
else:
ax.set_title('Neuronal sample decoding')
ax.set_ylabel('Decoding accuracy')
# add short term plasticity plots
if STP:
if two_rules:
ax = f.add_subplot(num_rows, num_cols, start_sp+9)
im = ax.imshow(sample_stp_pev[:,0,:],aspect='auto',interpolation=None)
f.colorbar(im,orientation='vertical')
ax.spines['right'].set_visible(False)
ax.set_ylabel('Neuron number')
ax.set_xlabel('Time relative to sample onset (ms)')
ax.spines['top'].set_visible(False)
ax.set_xticks([t0[0], t1[0], t2[0], t3[0]])
ax.set_xticklabels([-500,0,500,1500])
ax.set_title('Synaptic sample \nselectvity - DMS task')
ax = f.add_subplot(num_rows, num_cols, start_sp+10)
im = ax.imshow(sample_stp_pev[:,1,:],aspect='auto',interpolation=None)
f.colorbar(im,orientation='vertical')
ax.spines['right'].set_visible(False)
ax.set_ylabel('Neuron number')
ax.set_xlabel('Time relative to sample onset (ms)')
ax.spines['top'].set_visible(False)
ax.set_xticks([t0[0], t1[0], t2[0], t3[0]])
ax.set_xticklabels([-500,0,500,1500])
ax.set_title('Synaptic sample \nselectvity - DMrS task')
else:
ax = f.add_subplot(num_rows, 3, start_sp+7)
im = ax.imshow(sample_stp_pev[:,rule,:],aspect='auto',interpolation=None)
f.colorbar(im,orientation='vertical')
ax.spines['right'].set_visible(False)
ax.set_ylabel('Neuron number')
ax.set_xlabel('Time relative to sample onset (ms)')
ax.spines['top'].set_visible(False)
if DMC:
ax.set_title('Synaptic sample \ncategory selectvity')
else:
ax.set_title('Synaptic sample selectvity')
if self.ABBA:
ax.set_xticks([t0[0], t1[0], t2[0], t3[0]])
ax.set_xticklabels([-500,0,500,1500])
else:
ax.set_xticks([t0[0], t1[0], t2[0], t3[0]])
ax.set_xticklabels([-500,0,500,1500])
if two_rules:
ax = f.add_subplot(num_rows, num_cols, start_sp+11)
plt.hold(True)
u = np.mean(sample_stp_pev[:,0,:],axis=0)
se = np.std(sample_stp_pev[:,0,:],axis=0)/np.sqrt(sample_pev.shape[0])
ax.plot(t,u,'g')
ax.fill_between(t,u-se,u+se,facecolor=(0,1,0,0.5))
u = np.mean(sample_stp_pev[:,1,:],axis=0)
se = np.std(sample_stp_pev[:,1,:],axis=0)/np.sqrt(sample_pev.shape[0])
ax.plot(t,u,'m')
ax.fill_between(t,u-se,u+se,facecolor=(1,0,1,0.5))
ax.set_title('Synaptic sample selectivity')
else:
ax = f.add_subplot(num_rows, num_cols, start_sp+8)
u = np.mean(sample_stp_pev[:,rule,:],axis=0)
se = np.std(sample_stp_pev[:,rule,:],axis=0)/np.sqrt(sample_pev.shape[0])
ax.plot(t,u,'k')
ax.fill_between(t,u-se,u+se,facecolor=(0,0,0,0.5))
if DMC:
ax.set_title('Synaptic sample \ncategory selectivity')
else:
ax.set_title('Synaptic sample selectivity')
self.add_subplot_fixings(ax,chance_level=0,ylim=sample_max*2)
ax.set_ylabel('Normalized PEV')
if two_rules:
ax = f.add_subplot(num_rows, num_cols, start_sp+12)
u = np.mean(synapse_decode[:,0,:],axis=1)
se = np.std(synapse_decode[:,0,:],axis=1)
ax.plot(t,u,'g')
ax.fill_between(t,u-se,u+se,facecolor=(0,1,0,0.5))
u = np.mean(synapse_decode[:,1,:],axis=1)
se = np.std(synapse_decode[:,1,:],axis=1)
ax.plot(t,u,'m')
ax.fill_between(t,u-se,u+se,facecolor=(0.5,0,0.5))
ax.set_title('Synaptic sample decoding')
else:
ax = f.add_subplot(num_rows, num_cols, start_sp+9)
u = np.mean(synapse_decode[:,rule,:],axis=1)
se = np.std(synapse_decode[:,rule,:],axis=1)
ax.plot(t,u,'k')
ax.fill_between(t,u-se,u+se,facecolor=(0,0,0,0.5))
u = np.mean(synapse_decode_test[:,rule,:],axis=1)
se = np.std(synapse_decode_test[:,rule,:],axis=1)
ax.plot(t,u,'c')
ax.fill_between(t,u-se,u+se,facecolor=(0,1,1,0.5))
if DMC:
ax.set_title('Synaptic sample \ncategory decoding')
else:
ax.set_title('Synaptic sample decoding')
self.add_subplot_fixings(ax, chance_level=chance_level)
ax.set_ylabel('Decoding accuracy')
if tight:
plt.tight_layout()
plt.savefig('DMS summary.pdf', format='pdf')
plt.show()
def plot_postle_figure(self,dt=20, STP=False, tight=False):
# declare that we're analyzing a postle task
self.postle = True
sample_pev, sample_stp_pev, sample_decoding, sample_stp_decoding, model_output = self.decoding_accuracy_postle(num_reps=10)
t = np.arange(0,220*dt,dt)
t -= 900 # assuming 400 ms dead time, 500 ms fixation
t0,t1,t2,t3,t4,t5,t6 = np.where(t==-500), np.where(t==0), np.where(t==500), np.where(t==1000), np.where(t==1500), np.where(t==2000), np.where(t==2500)
f = plt.figure(figsize=(6,6))
for i in range(2):
for j in range(2):
ax = f.add_subplot(3, 2, 2*i+j+1)
u = np.mean(sample_decoding[i,j,0,0,:,:],axis=1)
se = np.std(sample_decoding[i,j,0,0,:,:],axis=1)
ax.fill_between(t,u-se,u+se,facecolor=(0,1,0,0.5))
ax.plot(t,u,'g')
u = np.mean(sample_decoding[i,j,0,1,:,:],axis=1)
se = np.std(sample_decoding[i,j,0,1,:,:],axis=1)
ax.fill_between(t,u-se,u+se,facecolor=(1,0.6,0,0.5))
ax.plot(t,u,color=[1,0.6,0])
if STP:
u = np.mean(sample_stp_decoding[i,j,0,0,:,:],axis=1)
se = np.std(sample_stp_decoding[i,j,0,0,:,:],axis=1)
ax.fill_between(t,u-se,u+se,facecolor=(1,0,1,0.5))
ax.plot(t,u,'m')
u = np.mean(sample_stp_decoding[i,j,0,1,:,:],axis=1)
se = np.std(sample_stp_decoding[i,j,0,1,:,:],axis=1)
ax.fill_between(t,u-se,u+se,facecolor=(0,1,1,0.5))
ax.plot(t,u,'c')
self.add_subplot_fixings(ax, chance_level=1/8, ylim=1.1)
ax.set_ylabel('Decoding accuracy')
ax = f.add_subplot(3, 2, 5)
u = np.mean(sample_decoding[0,0,1,1,:,:],axis=1)
se = np.std(sample_decoding[0,0,1,1,:,:],axis=1)
#u = np.mean(np.mean(sample_decoding[0,:,1,1,:,:],axis=0),axis=1)
#se = np.std(np.mean(sample_decoding[0,:,1,1,:,:],axis=0),axis=1)
ax.fill_between(t,u-se,u+se,facecolor=(0,0,0,0.5))
ax.plot(t,u,'k')
u = np.mean(sample_decoding[0,0,0,1,:,:],axis=1)
se = np.std(sample_decoding[0,0,0,1,:,:],axis=1)
#u = np.mean(np.mean(sample_decoding[0,:,0,1,:,:],axis=0),axis=1)
#se = np.std(np.mean(sample_decoding[0,:,0,1,:,:],axis=0),axis=1)
ax.fill_between(t,u-se,u+se,facecolor=(1,0.6,0,0.5))
ax.plot(t,u,color=[1,0.6,0])
self.add_subplot_fixings(ax, chance_level=1/8, ylim=1.1)
ax.plot([2400,2400],[-2, 99],'y--')
ax.set_ylabel('Decoding accuracy')
ax = f.add_subplot(3, 2, 6)
u = np.mean(sample_decoding[1,1,1,0,:,:],axis=1)
se = np.std(sample_decoding[1,1,1,0,:,:],axis=1)
#u = np.mean(np.mean(sample_decoding[1,:,1,0,:,:],axis=0),axis=1)
#se = np.std(np.mean(sample_decoding[1,:,1,0,:,:],axis=0),axis=1)
ax.fill_between(t,u-se,u+se,facecolor=(0,0,0,0.5))
ax.plot(t,u,'k')
u = np.mean(sample_decoding[1,1,0,0,:,:],axis=1)
se = np.std(sample_decoding[1,1,0,0,:,:],axis=1)
#u = np.mean(np.mean(sample_decoding[1,:,0,0,:,:],axis=0),axis=1)
#se = np.std(np.mean(sample_decoding[1,:,0,0,:,:],axis=0),axis=1)
ax.fill_between(t,u-se,u+se,facecolor=(0,1,0,0.5))
ax.plot(t,u,'g')
self.add_subplot_fixings(ax, chance_level=1/8, ylim=1.1)
ax.plot([2400,2400],[-2, 99],'y--')
ax.set_ylabel('Decoding accuracy')
plt.tight_layout()
plt.savefig('postle summary.pdf', format='pdf')
plt.show()
return sample_decoding, sample_stp_decoding
def plot_ABBA_figures(self,dt=25, STP=False, tight=False):
mean_resp, mean_out_match, mean_out_non_match = self.calc_mean_responses()
spike_decode, synapse_decode, spike_decode_test, synapse_decode_test = self.calculate_svms(DMC=[False],decode_test=True)
#sample_pev, test_pev, rule_pev, _, sample_stp_pev, sample_cat_pev, sample_cat_stp_pev, test_stp_pev = self.calculate_pevs()
chance_level = 1/2
trial_length_steps = self.rnn_outputs.shape[1]
trial_length = np.int_(trial_length_steps*dt)
t = np.arange(0,trial_length,dt)
t -= 900 # assuming 400 ms dead time, 500 ms fixation
t0,t1,t2,t3 = np.where(t==-500), np.where(t==0),np.where(t==500),np.where(t==1500)
f = plt.figure(figsize=(6,4))
ax = f.add_subplot(2, 2, 1)
# plot accuracy bars
x=np.array([0,1,2,3])
ax.bar(x+0.1, self.ABBA_accuracy_match,width=0.2,color='r',align='center')
ax.bar(x-0.1, self.ABBA_accuracy_non_match,width=0.2,color='b',align='center')
ax.set_title('Accuracy')
ax.set_ylabel('Fraction correct')
ax.set_xlabel('Num. of distractors')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax = f.add_subplot(2, 2, 2)
ax.hold(True)
s = np.mean(mean_resp[:,0,:,:],axis=0)
max_val = np.max(s)
for i in range(8):
ax.plot(t,s[i,:],color=[1-i/7,0,i/7], linewidth=1)
self.add_subplot_fixings(ax,chance_level=0,ylim=0.5)
ax.set_title('Mean response from synpases informative neurons')
ax.set_ylabel('Activity (a.u.)')
ax.set_ylim([0, 0.5])
ax = f.add_subplot(2, 2, 3)
u = np.mean(spike_decode[:,0,:],axis=1)
se = np.std(spike_decode[:,0,:],axis=1)
ax.plot(t,u,'b')
ax.fill_between(t,u-se,u+se,facecolor=(0,0,1,0.5))
u = np.mean(spike_decode_test[:,0,:],axis=1)
se = np.std(spike_decode_test[:,0,:],axis=1)
ax.plot(t,u,'r')
ax.fill_between(t,u-se,u+se,facecolor=(1,0,0,0.5))
self.add_subplot_fixings(ax, chance_level=1/8, ylim=1.1)
ax.set_ylabel('Decoding accuracy')
ax = f.add_subplot(2, 2, 4)
u = np.mean(spike_decode[:,0,:],axis=1)
se = np.std(spike_decode[:,0,:],axis=1)
ax.plot(t,u,'b')
ax.fill_between(t,u-se,u+se,facecolor=(0,0,1,0.5))
u = np.mean(synapse_decode_test[:,0,:],axis=1)
se = np.std(synapse_decode_test[:,0,:],axis=1)
ax.plot(t,u,'r')
ax.fill_between(t,u-se,u+se,facecolor=(1,0,0,0.5))
self.add_subplot_fixings(ax, chance_level=1/8, ylim=1.1)
ax.set_ylabel('Decoding accuracy')
if tight:
plt.tight_layout()
plt.savefig('ABBA summary.pdf', format='pdf')
plt.show()
def add_subplot_fixings(self, ax, chance_level = 0, ylim = 1.1, delayed_rule=False):
ax.plot([0,0],[-2, 99],'k--')
if self.ABBA:
ax.plot([500,500],[-2, 99],'k--')
ax.plot([1000,1000],[-2, 99],'k--')
ax.plot([1500,1500],[-2, 99],'k--')
ax.plot([2000,2000],[-2, 99],'k--')
ax.set_xlim([-500,2500])
ax.set_xticks([-500,0,500,1000,1500,2000,2500])
elif self.postle:
ax.plot([500,500],[-2, 99],'k--')
ax.plot([1000,1000],[-2, 99],'k--')
ax.plot([1500,1500],[-2, 99],'k--')
ax.plot([2000,2000],[-2, 99],'k--')
ax.plot([2500,2500],[-2, 99],'k--')
ax.plot([3000,3000],[-2, 99],'k--')
ax.set_xlim([-500,3500])
ax.set_xticks([-500,0,500,1000,1500,2000,2500,3000])
else:
ax.plot([500,500],[-2, 99],'k--')
ax.plot([1500,1500],[-2, 99],'k--')
ax.set_xlim([-500,2000])
ax.set_xticks([-500,0,500,1500])
if delayed_rule:
ax.set_xticks([-500,0,500,1000,1500])
ax.plot([1000,1000],[-2, 99],'k--')
ax.plot([-700,3600],[chance_level, chance_level],'k--')
ax.set_ylim([-0.1, ylim])
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
#ax.set_title('Tuning similarity between proximal and distal neurons')
ax.set_ylabel('Response')
ax.set_xlabel('Time relative to sample onset (ms)')
def compare_two_tasks(fn1, fn2, DMC=False, ABBA_flag=False,rule = 0, dt=25):
# enter the two filenames, fn1 and fn2
font = {'family' : 'normal',
'weight' : 'bold',
'size' : 12}
na1 = neural_analysis(fn1, ABBA = ABBA_flag)
na2 = neural_analysis(fn2, ABBA = ABBA_flag)
mean_resp1, _, _ = na1.calc_mean_responses()
svm_results1 = na1.calculate_svms(ABBA = ABBA_flag)
sample_pev1, _, rule_pev1, _, sample_stp_pev1, _ , sample_cat_pev1, sample_cat_stp_pev1 = na1.calculate_pevs(ABBA = ABBA_flag)
mean_resp2, _, _ = na2.calc_mean_responses()
svm_results2 = na2.calculate_svms()
sample_pev2, _, rule_pev2, _, sample_stp_pev2, _ ,sample_cat_pev2, sample_cat_stp_pev2 = na2.calculate_pevs()
if DMC:
sample_pev1 = sample_cat_pev1
sample_pev2 = sample_cat_pev2
sample_stp_pev1 = sample_cat_stp_pev1
sample_stp_pev2 = sample_cat_stp_pev2
svm_results1['sample_full'] = svm_results1['sample_full_cat']
svm_results2['sample_full'] = svm_results2['sample_full_cat']
svm_results1['sample_full_stp'] = svm_results1['sample_full_cat_stp']
svm_results2['sample_full_stp'] = svm_results2['sample_full_cat_stp']
if na1.num_rules>1 and False:
# not sure if I want this. If there are more than one task rules, this part will average
# across all rules
sample_pev1[:,0,:] = np.mean(sample_cat_pev1,axis=1)
sample_pev2[:,0,:] = np.mean(sample_cat_pev2,axis=1)
sample_stp_pev1[:,0,:] = np.mean(sample_cat_stp_pev1,axis=1)
sample_stp_pev2[:,0,:] = np.mean(sample_cat_stp_pev2,axis=1)
svm_results1['sample_full'][:,0,:] = np.mean(svm_results1['sample_full'],axis=1)
svm_results2['sample_full'][:,0,:] = np.mean(svm_results2['sample_full'],axis=1)
svm_results1['sample_full_stp'][:,0,:] = np.mean(svm_results1['sample_full_stp'],axis=1)
svm_results2['sample_full_stp'][:,0,:] = np.mean(svm_results2['sample_full_stp'],axis=1)
rule = 0
f = plt.figure(figsize=(8,4))
t = np.arange(0,2700,dt)
t -= 900
t0,t1,t2,t3 = np.where(t==-500), np.where(t==0),np.where(t==500),np.where(t==1500)
if ABBA_flag:
t = np.arange(0,200+500+1500+300+300,dt)
t = np.arange(0,200+500+250+2000,dt)
t -= 700
t0,t1,t2,t3,t4,t5,t6 = np.where(t==-500), np.where(t==0),np.where(t==500),np.where(t==1000),np.where(t==1500),np.where(t==2000), np.where(t==2500)
ax = f.add_subplot(2, 3, 1)
ax.hold(True)
u1 = np.mean(np.mean(mean_resp1[:,rule,:,:],axis=1),axis=0)
u2 = np.mean(np.mean(mean_resp2[:,rule,:,:],axis=1),axis=0)
se1 = np.std(np.mean(mean_resp1[:,rule,:,:],axis=1),axis=0)/np.sqrt(mean_resp1.shape[0])
se2 = np.std(np.mean(mean_resp2[:,rule,:,:],axis=1),axis=0)/np.sqrt(mean_resp1.shape[0])
ax.fill_between(t,u1-se1,u1+se1,facecolor=(0,1,0))
ax.fill_between(t,u2-se2,u2+se2,facecolor=(1,0,1))
ax.plot(t,u1,'g',label='without STP',color=(0,0.5,0),linewidth=2)
ax.plot(t,u2,'m',label='with STP',color=(0.5,0,0.5),linewidth=2)
na1.add_subplot_fixings(ax, chance_level=0, ylim=6, ABBA_task=ABBA_flag)
green_patch = mpatches.Patch(color='green', label='without STP')
magenta_patch = mpatches.Patch(color='magenta', label='with STP')
plt.legend(loc=0, handles=[green_patch,magenta_patch],prop={'size':6})
ax.set_title('Mean population response')
ax.set_ylabel('Mean response')
ax = f.add_subplot(2, 3, 2)
ax.hold(True)
u1 = np.mean(sample_pev1[:,rule,:],axis=0)
u2 = np.mean(sample_pev2[:,rule,:],axis=0)
u3 = np.mean(rule_pev2,axis=0)
se1 = np.std(sample_pev1[:,rule,:],axis=0)/np.sqrt(sample_pev1.shape[0])
se2 = np.std(sample_pev2[:,rule,:],axis=0)/np.sqrt(sample_pev1.shape[0])
ax.fill_between(t,u1-se1,u1+se1,facecolor=(0,1,0))
ax.fill_between(t,u2-se2,u2+se2,facecolor=(1,0,1))
ax.plot(t,u1,'g',label='without STP',color=(0,0.5,0),linewidth=2)
ax.plot(t,u2,'m',label='with STP',color=(0.5,0,0.5),linewidth=2)
ax.plot(t,u3,label='rule with STP',color=(0,0,0),linewidth=2)
na1.add_subplot_fixings(ax, chance_level=0, ylim=0.35,ABBA_task=ABBA_flag)
#green_patch = mpatches.Patch(color='green', label='without STP')
#magenta_patch = mpatches.Patch(color='magenta', label='with STP')
#plt.legend(loc=0, handles=[green_patch,magenta_patch])
ax.set_title('Neuron sample selectivity')
ax.set_ylabel('Normalized PEV')
ax = f.add_subplot(2, 3, 3)
ax.hold(True)
u1 = np.mean(svm_results1['sample_full'][:,rule,:],axis=1)
u2 = np.mean(svm_results2['sample_full'][:,rule,:],axis=1)
se1 = np.std(svm_results1['sample_full'][:,rule,:],axis=1)
se2 = np.std(svm_results2['sample_full'][:,rule,:],axis=1)
se1[np.isnan(se1)] = 0
se2[np.isnan(se2)] = 0
ax.fill_between(t,u1-se1,u1+se1,facecolor=(0,1,0))
ax.fill_between(t,u2-se2,u2+se2,facecolor=(1,0,1))
ax.plot(t,u1,'g',label='without STP',color=(0,0.5,0),linewidth=2)
ax.plot(t,u2,'m',label='with STP',color=(0.5,0,0.5),linewidth=2)
na1.add_subplot_fixings(ax, chance_level=1/8, ylim=1.1,ABBA_task=ABBA_flag)
#green_patch = mpatches.Patch(color='green', label='without STP')
#magenta_patch = mpatches.Patch(color='magenta', label='with STP')
#plt.legend(loc=0, handles=[green_patch,magenta_patch])
ax.set_title('Neuron sample decoding accuracy')
ax.set_ylabel('Decoding accuracy')
ax = f.add_subplot(2, 3, 4)
im = ax.imshow(sample_stp_pev2[:,rule,:],aspect='auto',interpolation=None)
if not ABBA_flag:
ax.set_xticks([t0[0], t1[0], t2[0], t3[0]])
ax.set_xticklabels([-500,0,500,1500])
else:
ax.set_xticks([t0[0], t1[0], t2[0], t3[0], t4[0], t5[0], t6[0]])
ax.set_xticklabels([-500,0,500,1000,1500,2000,2000,2500])
f.colorbar(im,orientation='vertical')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_ylabel('Neuron number')
ax.set_xlabel('Time relative to sample onset (ms)')
ax.set_title('Synaptic sample selectivity')
ax = f.add_subplot(2, 3, 5)
ax.hold(True)
u2 = np.mean(sample_stp_pev2[:,rule,:],axis=0)
se2 = np.std(sample_stp_pev2[:,rule,:],axis=0)/np.sqrt(sample_pev1.shape[0])
ax.fill_between(t,u2-se2,u2+se2,facecolor=(1,0,1))
ax.plot(t,u2,'m',label='with STP',color=(0.5,0,0.5),linewidth=2)
na1.add_subplot_fixings(ax, chance_level=0, ylim=0.3,ABBA_task=ABBA_flag)
#green_patch = mpatches.Patch(color='green', label='without STP')
#magenta_patch = mpatches.Patch(color='magenta', label='with STP')
#plt.legend(loc=0, handles=[green_patch,magenta_patch])
ax.set_title('Synaptic sample selectivity')
ax.set_ylabel('Normalized PEV')
ax = f.add_subplot(2, 3, 6)
ax.hold(True)
u2 = np.mean(svm_results2['sample_full_stp'][:,rule,:],axis=1)
se2 = np.std(svm_results2['sample_full_stp'][:,rule,:],axis=1)
se2[np.isnan(se2)] = 0
ax.fill_between(t,u2-se2,u2+se2,facecolor=(1,0,1))
ax.plot(t,u2,'m',label='with STP',color=(0.5,0,0.5),linewidth=2)
na1.add_subplot_fixings(ax, chance_level=1/8, ylim=1.1,ABBA_task=ABBA_flag)
#green_patch = mpatches.Patch(color='green', label='without STP')
#magenta_patch = mpatches.Patch(color='magenta', label='with STP')
#plt.legend(loc=0, handles=[green_patch,magenta_patch])
ax.set_title('Synaptic sample decoding accuray')
ax.set_ylabel('Decoding accuracy')
plt.tight_layout()
plt.savefig('DMS comparison.pdf', format='pdf')
plt.show()
def compare_two_tasks_two_rules(fn1, fn2, DMC=False, ABBA=False, dt=25):
# enter the two filenames, fn1 and fn2
font = {'family' : 'normal',
'weight' : 'bold',
'size' : 12}
na1 = neural_analysis(fn1)
na2 = neural_analysis(fn2)
mean_resp1, _, _ = na1.calc_mean_responses()
svm_results1 = na1.calculate_svms()
sample_pev1, _, rule_pev1, _, sample_stp_pev1, _ , sample_cat_pev1, sample_cat_stp_pev1 = na1.calculate_pevs()
mean_resp2, _, _ = na2.calc_mean_responses()
svm_results2 = na2.calculate_svms()
sample_pev2, _, rule_pev2, _, sample_stp_pev2, _ ,sample_cat_pev2, sample_cat_stp_pev2 = na2.calculate_pevs()
if DMC:
sample_pev1 = sample_cat_pev1
sample_pev2 = sample_cat_pev2
sample_stp_pev1 = sample_cat_stp_pev1
sample_stp_pev2 = sample_cat_stp_pev2
svm_results1['sample_full'] = svm_results1['sample_full_cat']
svm_results2['sample_full'] = svm_results2['sample_full_cat']
svm_results1['sample_full_stp'] = svm_results1['sample_full_cat_stp']
svm_results2['sample_full_stp'] = svm_results2['sample_full_cat_stp']
f = plt.figure(figsize=(8,9))
t = np.arange(0,2700,dt)
t -= 900
t0,t1,t2,t3 = np.where(t==-500), np.where(t==0),np.where(t==500),np.where(t==1500)
if ABBA:
t = np.arange(0,200+500+1500+300+300,dt)
t -= 700
t0,t1,t2,t3,t4,t5,t6,t7 = np.where(t==-500), np.where(t==0),np.where(t==300),np.where(t==600),np.where(t==900),np.where(t==1200), np.where(t==1500), np.where(t==1800)
ax = f.add_subplot(5, 2, 1)
ax.hold(True)
u1 = np.mean(np.mean(np.mean(mean_resp1[:,:,:,:],axis=1),axis=1),axis=0)
u2 = np.mean(np.mean(np.mean(mean_resp2[:,:,:,:],axis=1),axis=1),axis=0)
se1 = np.std(np.mean(np.mean(mean_resp1[:,:,:,:],axis=1),axis=1),axis=0)/np.sqrt(mean_resp1.shape[0])
se2 = np.std(np.mean(np.mean(mean_resp2[:,:,:,:],axis=1),axis=1),axis=0)/np.sqrt(mean_resp1.shape[0])
ax.fill_between(t,u1-se1,u1+se1,facecolor=(0,1,0))
ax.fill_between(t,u2-se2,u2+se2,facecolor=(1,0,1))
ax.plot(t,u1,'g',label='without STP',color=(0,0.5,0),linewidth=2)
ax.plot(t,u2,'m',label='with STP',color=(0.5,0,0.5),linewidth=2)
na1.add_subplot_fixings(ax, chance_level=0, ylim=4, ABBA_task=ABBA,delayed_rule=True)
green_patch = mpatches.Patch(color='green', label='without STP')
magenta_patch = mpatches.Patch(color='magenta', label='with STP')
plt.legend(loc=0, handles=[green_patch,magenta_patch],prop={'size':6})
ax.set_title('Mean population response')
ax.set_ylabel('Mean response')
ax = f.add_subplot(5, 2, 2)
ax.hold(True)
u1 = np.mean(rule_pev1,axis=0)
u2 = np.mean(rule_pev2,axis=0)
se1 = np.std(rule_pev1,axis=0)/np.sqrt(sample_pev1.shape[0])
se2 = np.std(rule_pev2,axis=0)/np.sqrt(sample_pev1.shape[0])
ax.fill_between(t,u1-se1,u1+se1,facecolor=(0,1,0))
ax.fill_between(t,u2-se2,u2+se2,facecolor=(1,0,1))
ax.plot(t,u1,'g',label='without STP',color=(0,0.5,0),linewidth=2)
ax.plot(t,u2,'m',label='with STP',color=(0.5,0,0.5),linewidth=2)
na1.add_subplot_fixings(ax, chance_level=0, ylim=0.3,ABBA_task=ABBA,delayed_rule=True)
ax.set_title('Neuron rule selectivity')
ax.set_ylabel('Normalized PEV')
for rule in range(2):
ax = f.add_subplot(5, 2, 3+2*rule)
ax.hold(True)
u1 = np.mean(sample_pev1[:,rule,:],axis=0)
u2 = np.mean(sample_pev2[:,rule,:],axis=0)
se1 = np.std(sample_pev1[:,rule,:],axis=0)/np.sqrt(sample_pev1.shape[0])
se2 = np.std(sample_pev2[:,rule,:],axis=0)/np.sqrt(sample_pev1.shape[0])
ax.fill_between(t,u1-se1,u1+se1,facecolor=(0,1,0))
ax.fill_between(t,u2-se2,u2+se2,facecolor=(1,0,1))
ax.plot(t,u1,'g',label='without STP',color=(0,0.5,0),linewidth=2)
ax.plot(t,u2,'m',label='with STP',color=(0.5,0,0.5),linewidth=2)
na1.add_subplot_fixings(ax, chance_level=0, ylim=0.7,ABBA_task=ABBA,delayed_rule=True)
ax.set_title('Neuron sample selectivity')
ax.set_ylabel('Normalized PEV')
ax = f.add_subplot(5, 2, 4+2*rule)
ax.hold(True)
u1 = np.mean(svm_results1['sample_full'][:,rule,:],axis=1)
u2 = np.mean(svm_results2['sample_full'][:,rule,:],axis=1)
se1 = np.std(svm_results1['sample_full'][:,rule,:],axis=1)
se2 = np.std(svm_results2['sample_full'][:,rule,:],axis=1)
se1[np.isnan(se1)] = 0
se2[np.isnan(se2)] = 0
ax.fill_between(t,u1-se1,u1+se1,facecolor=(0,1,0))
ax.fill_between(t,u2-se2,u2+se2,facecolor=(1,0,1))
ax.plot(t,u1,'g',label='without STP',color=(0,0.5,0),linewidth=2)
ax.plot(t,u2,'m',label='with STP',color=(0.5,0,0.5),linewidth=2)
na1.add_subplot_fixings(ax, chance_level=1/8, ylim=1.1,ABBA_task=ABBA,delayed_rule=True)
ax.set_title('Neuron sample decoding accuracy')
ax.set_ylabel('Decoding accuracy')
ax = f.add_subplot(5, 2, 7+2*rule)
ax.hold(True)
u1 = np.mean(sample_stp_pev1[:,rule,:],axis=0)
u2 = np.mean(sample_stp_pev2[:,rule,:],axis=0)
se1 = np.std(sample_stp_pev1[:,rule,:],axis=0)/np.sqrt(sample_pev1.shape[0])
se2 = np.std(sample_stp_pev2[:,rule,:],axis=0)/np.sqrt(sample_pev1.shape[0])
ax.fill_between(t,u1-se1,u1+se1,facecolor=(0,1,0))
ax.fill_between(t,u2-se2,u2+se2,facecolor=(1,0,1))
ax.plot(t,u1,'g',label='without STP',color=(0,0.5,0),linewidth=2)
ax.plot(t,u2,'m',label='with STP',color=(0.5,0,0.5),linewidth=2)
na1.add_subplot_fixings(ax, chance_level=0, ylim=0.7,ABBA_task=ABBA,delayed_rule=True)
ax.set_title('Synaptic sample selectivity')
ax.set_ylabel('Normalized PEV')
ax = f.add_subplot(5, 2, 8+2*rule)
ax.hold(True)
u1 = np.mean(svm_results1['sample_full_stp'][:,rule,:],axis=1)
u2 = np.mean(svm_results2['sample_full_stp'][:,rule,:],axis=1)
se1 = np.std(svm_results1['sample_full_stp'][:,rule,:],axis=1)
se2 = np.std(svm_results2['sample_full_stp'][:,rule,:],axis=1)
se1[np.isnan(se1)] = 0
se2[np.isnan(se2)] = 0
ax.fill_between(t,u1-se1,u1+se1,facecolor=(0,1,0))
ax.fill_between(t,u2-se2,u2+se2,facecolor=(1,0,1))
ax.plot(t,u1,'g',label='without STP',color=(0,0.5,0),linewidth=2)
ax.plot(t,u2,'m',label='with STP',color=(0.5,0,0.5),linewidth=2)
na1.add_subplot_fixings(ax, chance_level=1/8, ylim=1.1,ABBA_task=ABBA,delayed_rule=True)
ax.set_title('Synaptic sample decoding accuracy')
ax.set_ylabel('Decoding accuracy')
plt.tight_layout()
plt.savefig('Two rules comparison.pdf', format='pdf')
plt.show()
def plt_dual_figures(fn1, fn2, ABBA=False, DMC=False, two_rules=False):
# assume fn1 has no STP, and fn2 does
if two_rules:
fig_handle = plt.figure(figsize=(10,10))
sp = 8
else:
fig_handle = plt.figure(figsize=(8,10))
sp = 6
na = neural_analysis(fn1, ABBA=ABBA)
na.plot_all_figures(rule=0, STP=False, ABBA=ABBA, DMC=DMC, two_rules=two_rules,f=fig_handle, start_sp=0, num_rows=5, tight=False)
na = neural_analysis(fn2, ABBA=ABBA)
na.plot_all_figures(rule=0, STP=True, ABBA=ABBA, DMC=DMC, two_rules=two_rules,f=fig_handle, start_sp=sp, num_rows=5, tight=True)
def plot_summary_decoding_figure():
fn1 = 'C:/Users/nicol_000/Projects/RNN STP Model/saved_model_files/DMS.pkl'
fn2 = 'C:/Users/nicol_000/Projects/RNN STP Model/saved_model_files/DMS_std_stf.pkl'
fn3 = 'C:/Users/nicol_000/Projects/RNN STP Model/saved_model_files/DMC.pkl'
fn4 = 'C:/Users/nicol_000/Projects/RNN STP Model/saved_model_files/DMC_std_stf.pkl'
fn5 = 'C:/Users/nicol_000/Projects/RNN STP Model/saved_model_files/DMS_rotation.pkl'
fn6 = 'C:/Users/nicol_000/Projects/RNN STP Model/saved_model_files/DMS_rotate_std_stf_v3.pkl'
fn7 = 'C:/Users/nicol_000/Projects/RNN STP Model/saved_model_files/DMS_and_rotate_v3.pkl'
fn8 = 'C:/Users/nicol_000/Projects/RNN STP Model/saved_model_files/DMS_and_rotate_std_stf_v3.pkl'
fn9 = 'C:/Users/nicol_000/Projects/RNN STP Model/saved_model_files/ABBA.pkl'
fn10 = 'C:/Users/nicol_000/Projects/RNN STP Model/saved_model_files/ABBA_std_stf_v2.pkl'
fig_handle = plt.figure(figsize=(6,10))
num_rows = 5
plot_decoding_pairs(fn1, fn2, fig_handle, num_rows=num_rows, start_sp=0, DMC=False, ABBA=False, two_rules=False)
plot_decoding_pairs(fn3, fn4, fig_handle, num_rows=num_rows, start_sp=2, DMC=True, ABBA=False, two_rules=False)
plot_decoding_pairs(fn5, fn6, fig_handle, num_rows=num_rows, start_sp=4, DMC=False, ABBA=False, two_rules=False)
plot_decoding_pairs(fn7, fn8, fig_handle, num_rows=num_rows, start_sp=6, DMC=False, ABBA=False, two_rules=True)
plot_decoding_pairs(fn9, fn10, fig_handle, num_rows=num_rows, start_sp=8, DMC=False, ABBA=True, two_rules=False)
plt.tight_layout()
plt.savefig('Summary.pdf', format='pdf')
plt.show()
def plot_decoding_pairs(fn1, fn2, f, num_rows, start_sp, DMC=False, ABBA=False, two_rules=False):
dt = 25
na = neural_analysis(fn1, ABBA=False)
svm_results1 = na.calculate_svms()
na = neural_analysis(fn2, ABBA=False)
svm_results2 = na.calculate_svms()
trial_length_steps = svm_results1['sample_full'].shape[0]
trial_length = np.int_(trial_length_steps*dt)
t = np.arange(0,trial_length,dt)
t -= 900 # assuming 400 ms dead time, 500 ms fixation
if DMC:
svm_results1['sample_full'] = svm_results1['sample_full_cat']
svm_results2['sample_full'] = svm_results2['sample_full_cat']
svm_results1['sample_full_stp'] = svm_results1['sample_full_cat_stp']
svm_results2['sample_full_stp'] = svm_results2['sample_full_cat_stp']
if two_rules:
svm_results1['sample_full'] = np.mean(svm_results1['sample_full'],axis=1)
svm_results2['sample_full'] = np.mean(svm_results2['sample_full'],axis=1)
svm_results1['sample_full_stp'] = np.mean(svm_results1['sample_full_stp'],axis=1)
svm_results2['sample_full_stp'] = np.mean(svm_results2['sample_full_stp'],axis=1)
else:
svm_results1['sample_full'] = np.squeeze(svm_results1['sample_full'][:,0,:])
svm_results2['sample_full'] = np.squeeze(svm_results2['sample_full'][:,0,:])
svm_results1['sample_full_stp'] = np.squeeze(svm_results1['sample_full_stp'][:,0,:])
svm_results2['sample_full_stp'] = np.squeeze(svm_results2['sample_full_stp'][:,0,:])
print(svm_results1['sample_full'].shape)
ax = f.add_subplot(num_rows, 2, start_sp+1)
u = np.mean(svm_results1['sample_full'],axis=1)
se = np.std(svm_results1['sample_full'],axis=1)
print(u.shape, se.shape, t.shape)
ax.plot(t,u,'g')
ax.fill_between(t,u-se,u+se,facecolor=(0,0.5,0))
u = np.mean(svm_results2['sample_full'],axis=1)
se = np.std(svm_results2['sample_full'],axis=1)
ax.plot(t,u,'m')
ax.fill_between(t,u-se,u+se,facecolor=(0.5,0,0.5))
if DMC:
ax.set_title('Neuronal sample \category decoding')
cl = 1/2
else:
ax.set_title('Neuronal sample decoding')
cl = 1/8
na.add_subplot_fixings(ax, chance_level=cl)
ax.set_ylabel('Decoding accuracy')
ax = f.add_subplot(num_rows, 2, start_sp+2)
u = np.mean(svm_results2['sample_full_stp'],axis=1)
se = np.std(svm_results2['sample_full_stp'],axis=1)
ax.plot(t,u,'m')
ax.fill_between(t,u-se,u+se,facecolor=(0.5,0,0.5))
if DMC:
ax.set_title('Synaptic sample \category decoding')
cl = 1/2
else:
ax.set_title('Synaptic sample decoding')
cl = 1/8
na.add_subplot_fixings(ax, chance_level=cl)
def plot_summary_results(old_format = False):
dt = 20
t = np.arange(0,2900,dt)
t -= 900
t0,t1,t2,t3 = np.where(t==-500), np.where(t==0),np.where(t==500),np.where(t==1500)
num_svm_reps = 2
trial_length = (400+500+500+1000+500)//dt
N = 11
data_dir = 'D:/Masse/RNN STP/saved_models/'
fn = ['DMS_', 'DMS_stp_', 'DMC_stp_', 'DMrS_stp_']
titles = ['DMS no STP', 'DMS', 'DMC', 'DMrS']
spike_decoding = np.zeros((4, N, trial_length, num_svm_reps))
synapse_decoding = np.zeros((4, N, trial_length, num_svm_reps))
spike_decoding_test = np.zeros((4, N, trial_length, num_svm_reps))
synapse_decoding_test = np.zeros((4, N, trial_length, num_svm_reps))
perf = np.zeros((4, N))
perf_shuffled_hidden = np.zeros((4, N))
perf_shuffled_stp = np.zeros((4, N))
"""
Calculate the spiking and synaptic sample decoding accuracy across all networks
Calculate the behavioral performance
"""
for i in range(N):
print('Group ', i)
for j in range(1,4):
if j == 2:
DMC = [True]
else:
DMC = [False]
f = data_dir + fn[j] + str(i) + '.pkl'
na = neural_analysis(f, ABBA=False, old_format = old_format)
perf[j,i] = get_perf(na.desired_outputs, na.model_outputs, na.train_mask, na.rule)
spike_decode, synapse_decode, spike_decode_test, synapse_decode_test = na.calculate_svms(num_reps = num_svm_reps, DMC = DMC)
spike_decoding[j,i,:,:] = spike_decode[:,0,:]
synapse_decoding[j,i,:,:] = synapse_decode[:,0,:]
spike_decoding_test[j,i,:,:] = spike_decode_test[:,0,:]
synapse_decoding_test[j,i,:,:] = synapse_decode_test[:,0,:]
a = contrib_to_behavior.Analysis(f,old_format = old_format)
perf[j,i], perf_shuffled_hidden[j,i], perf_shuffled_stp[j,i] = a.simulate_network()
"""
Calculate the mean decoding accuracy for the last 500 ms of the delay
"""
d = range(1900//dt,2400//dt)
delay_accuracy = np.mean(np.mean(spike_decoding[:,:,d,:],axis=3),axis=2)
ind_example = [0]
for j in range(1,4):
ind_good_perf = np.where(perf[j,:] > 0.9)[0]
ind_sort = np.argsort(delay_accuracy[j,ind_good_perf])[0]
ind_example.append(ind_good_perf[ind_sort])
"""
Plot decoding accuracy from example models
Only consider models with performance accuracy above 99.0%
Will use the model with the lowest spike decoding value during the last 500 ms of the delay
"""
print(ind_example)
f = plt.figure(figsize=(6,4))
for j in range(1,4):
if j == 2:
chance_level = 1/2
else:
chance_level = 1/8
ax = f.add_subplot(2, 2, j+1)
u = np.mean(spike_decoding[j,ind_example[j],:,:],axis=1)
se = np.std(spike_decoding[j,ind_example[j],:,:],axis=1)
ax.plot(t,u,'g')
ax.fill_between(t,u-se,u+se,facecolor=(0,1,0,0.5))
u = np.mean(synapse_decoding[j,ind_example[j],:,:],axis=1)
se = np.std(synapse_decoding[j,ind_example[j],:,:],axis=1)
ax.plot(t,u,'m')
ax.fill_between(t,u-se,u+se,facecolor=(1,0,1,0.5))
na.add_subplot_fixings(ax, chance_level=chance_level)
ax.set_title(titles[j])
ax.set_ylabel('Decoding accuracy')
ax.set_ylim([0, 1])
plt.tight_layout()
plt.savefig('Example models.pdf', format='pdf')
plt.show()
"""
Plot mean decoding accuracy across all models
Only use models with performance accuracy above 85%
"""
print(ind_example)
f = plt.figure(figsize=(6,4))
for j in range(1,4):
if j == 2:
chance_level = 1/2
else:
chance_level = 1/8
ind_good_models = np.where(perf[j,:] > 0.85)[0]
ax = f.add_subplot(2, 2, j+1)
u = np.mean(np.mean(spike_decoding[j,ind_good_models,:,:],axis=2),axis=0)
se = np.std(np.mean(spike_decoding[j,ind_good_models,:,:],axis=2),axis=0)/np.sqrt(len(ind_good_models))
ax.plot(t,u,'g')
ax.fill_between(t,u-se,u+se,facecolor=(0,1,0,0.5))
u = np.mean(np.mean(synapse_decoding[j,ind_good_models,:,:],axis=2),axis=0)
se = np.std(np.mean(synapse_decoding[j,ind_good_models,:,:],axis=2),axis=0)/np.sqrt(len(ind_good_models))
ax.plot(t,u,'m')
ax.fill_between(t,u-se,u+se,facecolor=(1,0,1,0.5))
na.add_subplot_fixings(ax, chance_level=chance_level)
ax.set_title(titles[j])
ax.set_ylabel('Decoding accuracy')
ax.set_ylim([0, 1])
plt.tight_layout()
plt.savefig('Average models.pdf', format='pdf')
plt.show()
"""
Plot decoding accuracy across all models using heatmaps
Only use models with performance accuracy above 97.5%
"""
print(ind_example)
f = plt.figure(figsize=(6,4))
for j in range(1,4):
if j == 2:
chance_level = 1/2
else:
chance_level = 1/8
ind_good_models = np.where(perf[j,:] > 0.975)[0]
ax = f.add_subplot(2, 2, j+1)
u = np.mean(synapse_decoding[j,ind_good_models,:,:],axis=2)
im = ax.imshow(u,aspect='auto',interpolation=None)
f.colorbar(im,orientation='vertical')
ax.spines['right'].set_visible(False)
ax.set_ylabel('Model number')
ax.set_xlabel('Time relative to sample onset (ms)')
ax.spines['top'].set_visible(False)
ax.set_title(titles[j])
ax.set_xticks([t0[0], t1[0], t2[0], t3[0]])
ax.set_xticklabels([-500,0,500,1500])
plt.tight_layout()
plt.savefig('All models.pdf', format='pdf')
plt.show()
print(ind_example)
return spike_decoding, synapse_decoding, spike_decoding_test, synapse_decoding_test, perf, perf_shuffled_hidden, perf_shuffled_stp
def plot_variable_delay_results():
"""
Plot a model that was trained on a variable delay
"""
data_dir = 'C:/Users/Freedmanlab/Documents/Masse/STP/saved_models/'
dt = 25
num_svm_reps = 5
t = np.arange(0,2900,dt)
t -= 900
fn = 'DMS_EI_std_stf_var_delay_1_iter1000.pkl'
f = data_dir + fn
na = neural_analysis(f, ABBA=False)
perf = get_perf(na.desired_outputs, na.model_outputs, na.train_mask)
print('Model accuracy = ', perf)
spike_decode, synapse_decode = na.calculate_svms(num_reps = num_svm_reps, DMC = False)
f = plt.figure(figsize=(3,2))
chance_level = 1/8
ax = f.add_subplot(1, 1, 1)
u = np.mean(spike_decode[:,0,:],axis=1)
se = np.std(spike_decode[:,0,:],axis=1)
ax.plot(t,u,'g')
ax.fill_between(t,u-se,u+se,facecolor=(0,1,0,0.5))
u = np.mean(synapse_decode[:,0,:],axis=1)
se = np.std(synapse_decode[:,0,:],axis=1)
ax.plot(t,u,'m')
ax.fill_between(t,u-se,u+se,facecolor=(1,0,1,0.5))
na.add_subplot_fixings(ax, chance_level=chance_level)
ax.set_ylabel('Decoding accuracy')
ax.set_ylim([0, 1])
plt.tight_layout()
plt.savefig('Var delay model.pdf', format='pdf')
plt.show()
def plot_multiple_delay_results():
dt = 20
num_svm_reps = 5
N = 8
data_dir = 'D:/Masse/RNN STP/saved_models/'
delay = [1000,1500,2000]
num_delays = len(delay)
mean_decoding = np.zeros((num_delays, N))
std_decoding = np.zeros((num_delays, N))
perf = np.zeros((num_delays, N))
for i in range(N):
print('Group ', i)
for j in range(num_delays):
if j==0:
f = data_dir + 'DMS_stp_' + str(i) + '.pkl'
else:
f = data_dir + 'DMS_stp_delay_' + str(delay[j]) + '_' + str(i) + '.pkl'
try:
na = neural_analysis(f, ABBA=False, old_format = False)
spike_decode, synapse_decode,_,_ = na.calculate_svms(num_reps = num_svm_reps, DMC = [False])
perf[j,i] = get_perf(na.desired_outputs, na.model_outputs, na.train_mask, na.rule)
except:
na = neural_analysis(f, ABBA=False, old_format = True)
spike_decode, synapse_decode,_,_ = na.calculate_svms(num_reps = num_svm_reps, DMC = [False])
perf[j,i] = get_perf(na.desired_outputs, na.model_outputs, na.train_mask, na.rule)
# look at last 100 ms of delay epoch
# variable delay
delay_end = (400+500+500+delay[j])//dt
delay_start = (400+500+500+delay[j]-100)//dt
# variable tau
#delay_end = (400+500+500+1000)//dt
#delay_start = (400+500+500+900)//dt
mean_decoding[j,i] = np.mean(spike_decode[delay_start:delay_end,0,:])
std_decoding[j,i] = np.std(np.mean(spike_decode[delay_start:delay_end,0,:],axis=0))
print(i,j,perf[j,i],mean_decoding[j,i],std_decoding[j,i])
f = plt.figure(figsize=(3,2))
chance_level = 1/8
ax = f.add_subplot(1, 1, 1)
for i, d in enumerate(delay):
# only use models with over 90% accuracy
ind_good_model = np.where(perf[i,:]>0.90)[0]
ax.plot([d]*len(ind_good_model),mean_decoding[i,ind_good_model],'k.')
ax.plot([0,3000],[chance_level,chance_level],'k--')
ax.set_ylim([0, 1])
ax.set_xlim([400, 2100])
ax.set_xticks(delay)
ax.set_xticklabels(delay)
return mean_decoding, std_decoding, perf
def plot_summary_results_v2(old_format = False):
dt = 20
t = np.arange(0,2900,dt)
t -= 900
t0,t1,t2,t3 = np.where(t==-500), np.where(t==0),np.where(t==500),np.where(t==1500)
num_svm_reps = 2
trial_length = (400+500+500+1000+500)//dt
N = 20
data_dir = 'D:/Masse/RNN STP/saved_models/'
fn = ['DMS_stp_', 'DMC_stp_', 'DMrS_stp_', 'DMS_DMrS_stp_']
titles = ['DMS', 'DMC', 'DMrS', 'DMS_DMrS']
num_tasks = len(fn)
"""
the DMS_DMrS will produce two decoding/accuracy scores, one for each task
thus, will show num_tasks+1 set of values
"""
spike_decoding = np.zeros((num_tasks+1, N, trial_length, num_svm_reps))
synapse_decoding = np.zeros((num_tasks+1, N, trial_length, num_svm_reps))
spike_decoding_test = np.zeros((num_tasks+1, N, trial_length, num_svm_reps))
synapse_decoding_test = np.zeros((num_tasks+1, N, trial_length, num_svm_reps))
perf = np.zeros((num_tasks+1, N))
perf_shuffled_hidden = np.zeros((num_tasks+1, N))
perf_shuffled_stp = np.zeros((num_tasks+1, N))
"""
Calculate the spiking and synaptic sample decoding accuracy across all networks
Calculate the behavioral performance
"""
for i in range(N):
print('Group ', i)
for j in range(num_tasks):
if fn[j] == 'DMC_stp_':
DMC = [True]
elif fn[j] == 'DMS_DMrS_stp_':
DMC = [False, False]
else:
DMC = [False]
f = data_dir + fn[j] + str(i) + '.pkl'
try:
na = neural_analysis(f, ABBA=False, old_format = old_format)
except:
na = neural_analysis(f, ABBA=False, old_format = not old_format)
#perf_temp = get_perf(na.desired_outputs, na.model_outputs, na.train_mask, na.rule)
spike_decode, synapse_decode, spike_decode_test, synapse_decode_test = na.calculate_svms(num_reps = num_svm_reps, DMC = DMC)
try:
a = contrib_to_behavior.Analysis(f,old_format = old_format)
perf_temp, perf_shuffled_hidden_temp, perf_shuffled_stp_temp = a.simulate_network()
except:
a = contrib_to_behavior.Analysis(f,old_format = not old_format)
perf_temp, perf_shuffled_hidden_temp, perf_shuffled_stp_temp = a.simulate_network()
if j<3:
print(perf_temp)
perf[j,i] = perf_temp
perf_shuffled_hidden[j,i] = perf_shuffled_hidden_temp
perf_shuffled_stp[j,i] = perf_shuffled_stp_temp
spike_decoding[j,i,:,:] = spike_decode[:,0,:]
synapse_decoding[j,i,:,:] = synapse_decode[:,0,:]
spike_decoding_test[j,i,:,:] = spike_decode_test[:,0,:]
synapse_decoding_test[j,i,:,:] = synapse_decode_test[:,0,:]
else:
perf[j:,i] = perf_temp
perf_shuffled_hidden[j:,i] = perf_shuffled_hidden_temp
perf_shuffled_stp[j:,i] = perf_shuffled_stp_temp
spike_decoding[j:,i,:,:] = np.transpose(spike_decode[:,:,:],(1,0,2))
synapse_decoding[j:,i,:,:] = np.transpose(synapse_decode[:,:,:],(1,0,2))
spike_decoding_test[j:,i,:,:] = np.transpose(spike_decode_test[:,:,:],(1,0,2))
synapse_decoding_test[j:,i,:,:] = np.transpose(synapse_decode_test[:,:,:],(1,0,2))
print(spike_decoding.shape)
"""
Calculate the mean decoding accuracy for the last 500 ms of the delay
"""
dt=20
d = range(1900//dt,2400//dt)
delay_accuracy = np.mean(np.mean(spike_decoding[:,:,d,:],axis=3),axis=2)
fn = ['DMS_stp_', 'DMC_stp_', 'DMrS_stp_', 'DMS_DMrS_stp_']
titles = ['DMS', 'DMC', 'DMrS', 'DMS + DMrS']
# combine the DMS and DMrS trials for the DMS_DMrS task
delay_accuracy[3,:] = np.mean(delay_accuracy[3:,:],axis=0)
perf_combined = perf[:num_tasks,:]
perf_combined[num_tasks-1,:] = np.mean(perf[num_tasks:,:],axis=0)
# will find 2 examples for each task
ind_example = np.zeros((num_tasks, 3),dtype=np.int8)
for j in range(num_tasks):
ind_good_perf = np.where(perf_combined[j,:] > 0.9)[0]
ind_sort = np.argsort(delay_accuracy[j,ind_good_perf])
#ind_example[j,0] = ind_good_perf[ind_sort][-1]
ind_example[j,0]= ind_good_perf[ind_sort][len(ind_sort)//2]
ind_example[j,1]= ind_good_perf[ind_sort][0]
f = plt.figure(figsize=(6,8.5))
for j in range(num_tasks):
if fn[j] == 'DMC_stp_':
chance_level = 1/2
else:
chance_level = 1/8
for i in range(2):
ax = f.add_subplot(num_tasks+1, 2, j*2+i+1)
u = np.mean(spike_decoding[j,ind_example[j,i],:,:],axis=1)
se = np.std(spike_decoding[j,ind_example[j,i],:,:],axis=1)
ax.plot(t,u,'g')
ax.fill_between(t,u-se,u+se,facecolor=(0,1,0,0.5))
u = np.mean(synapse_decoding[j,ind_example[j,i],:,:],axis=1)
se = np.std(synapse_decoding[j,ind_example[j,i],:,:],axis=1)
ax.plot(t,u,'m')
ax.fill_between(t,u-se,u+se,facecolor=(1,0,1,0.5))
na.add_subplot_fixings(ax, chance_level=chance_level)
if j == 3:
# DMS_DMrS task
u = np.mean(spike_decoding[j+1,ind_example[j,i],:,:],axis=1)
se = np.std(spike_decoding[j+1,ind_example[j,i],:,:],axis=1)
ax.plot(t,u,'b')
ax.fill_between(t,u-se,u+se,facecolor=(0,0,1,0.5))
u = np.mean(synapse_decoding[j+1,ind_example[j,i],:,:],axis=1)
se = np.std(synapse_decoding[j+1,ind_example[j,i],:,:],axis=1)
ax.plot(t,u,'r')
ax.fill_between(t,u-se,u+se,facecolor=(1,0,0,0.5))
ax.set_xticks([-500,0,500,1000,1500])
ax.plot([1000,1000],[-2, 99],'k--')
ax.set_yticks([0,0.5,1])
ax.set_title(titles[j])
ax.set_ylabel('Decoding accuracy')
ax.set_ylim([0, 1])
plt.tight_layout()
plt.savefig('Summary1.pdf', format='pdf')
plt.show()
col=['b','r','g','c','k']
marker = ['o','v','^','s','D']
"""
Normalize delay decoding
"""
for j in range(num_tasks+1):
if j == 1:
delay_accuracy[j,:] = (delay_accuracy[j,:]-0.5)*2
else:
delay_accuracy[j,:] = (delay_accuracy[j,:]-1/8)*8/7
f = plt.figure(figsize=(6.5,3))
ax = f.add_subplot(1, 3, 1)
for j in range(num_tasks+1):
ind_good_models = np.where(perf[j,:] > 0.9)[0]
#ax.plot(delay_accuracy[j,ind_good_models], perf_shuffled_hidden[j,ind_good_models]
# -perf[j,ind_good_models],marker[j], color=col[j], markersize=3)
ax.plot(delay_accuracy[j,ind_good_models], perf_shuffled_hidden[j,ind_good_models]
-perf[j,ind_good_models],marker[j], color=col[j], markersize=3)
ax.set_xlim(-0.1,1.02)
ax.set_aspect(1.12/0.5)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.set_yticks([-0.5,-0.25,0])
ax.set_xticks([0,0.5,1])
ax.set_ylabel('Delta acc. shuffled spike rate')
ax.set_xlabel('Normalized delay decoding acc.')
ax = f.add_subplot(1, 3, 2)
for j in range(num_tasks+1):
ind_good_models = np.where(perf[j,:] > 0.9)[0]
#ax.plot(delay_accuracy[j,ind_good_models], perf_shuffled_hidden[j,ind_good_models]
# -perf[j,ind_good_models],marker[j], color=col[j], markersize=3)
ax.plot(delay_accuracy[j,ind_good_models], perf_shuffled_stp[j,ind_good_models]
-perf[j,ind_good_models],marker[j], color=col[j], markersize=3)
ax.set_xlim(-0.1,1.02)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.set_yticks([-0.5,-0.25,0])
ax.set_xticks([0,0.5,1])
ax.set_aspect(1.12/0.5)
ax.set_ylabel('Delta acc. shuffled STP')
ax.set_xlabel('Normalized delay decoding acc.')
ax = f.add_subplot(1, 3, 3)
for j in range(num_tasks+1):
ind_good_models = np.where(perf[j,:] > 0.9)[0]
ax.plot(perf_shuffled_stp[j,ind_good_models]-perf[j,ind_good_models], perf_shuffled_hidden[j,ind_good_models]
-perf[j,ind_good_models],marker[j], color=col[j], markersize=3)
ax.set_aspect(1)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.set_yticks([-0.5,-0.25,0])
ax.set_xticks([-0.5,-0.25,0])
ax.set_ylabel('Delta acc. shuffled spike rate')
ax.set_xlabel('Delta acc. shuffled STP')
plt.tight_layout()
plt.savefig('Summary2.pdf', format='pdf')
plt.show()
return spike_decoding, synapse_decoding, spike_decoding_test, synapse_decoding_test, perf, perf_shuffled_hidden, perf_shuffled_stp, ind_example
def get_perf(y, y_hat, mask, rule):
"""
only examine time points when test stimulus is on
in another words, when y[0,:,:] is not 0
"""
print('Neural analysis: get_perf')
print(y.shape, y_hat.shape, mask.shape)
mask *= np.logical_or(y[1,:,:]>0,y[2,:,:]>0)
#mask *= y[0,:,:]==0
y = np.argmax(y, axis = 0)
y_hat = np.argmax(y_hat, axis = 0)
return np.sum(np.float32(y == y_hat)*np.squeeze(mask))/np.sum(mask)
| 44.869522
| 193
| 0.577603
| 13,872
| 91,130
| 3.587587
| 0.043829
| 0.015774
| 0.011534
| 0.016597
| 0.815681
| 0.766954
| 0.729459
| 0.691804
| 0.65232
| 0.62232
| 0
| 0.047017
| 0.267387
| 91,130
| 2,031
| 194
| 44.869522
| 0.698411
| 0.044958
| 0
| 0.523964
| 0
| 0
| 0.071578
| 0.009564
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020078
| false
| 0.001295
| 0.004534
| 0
| 0.037565
| 0.012306
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
d27aa9658b089ff498be1062a18ab6190ca75b26
| 179
|
py
|
Python
|
geochannel/ws/utils.py
|
kosior/geochannel
|
4d8c0bc191738155fdc4dc4803e95eec6456f2d1
|
[
"MIT"
] | null | null | null |
geochannel/ws/utils.py
|
kosior/geochannel
|
4d8c0bc191738155fdc4dc4803e95eec6456f2d1
|
[
"MIT"
] | 1
|
2019-07-21T20:07:19.000Z
|
2019-07-21T20:13:11.000Z
|
geochannel/ws/utils.py
|
kosior/geochannel
|
4d8c0bc191738155fdc4dc4803e95eec6456f2d1
|
[
"MIT"
] | null | null | null |
from urllib.parse import parse_qs
def get_token_from_query_string(query_string, name='access_token'):
token, *_ = parse_qs(query_string).get(name, (None,))
return token
| 25.571429
| 67
| 0.75419
| 27
| 179
| 4.62963
| 0.518519
| 0.264
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.134078
| 179
| 6
| 68
| 29.833333
| 0.806452
| 0
| 0
| 0
| 0
| 0
| 0.067039
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
962b21b6895e891a00376fd922fb52fcb87572c9
| 101
|
py
|
Python
|
project/bin/__init__.py
|
mizxc/kispower
|
38d88c4c5a983a90009cb8c7012cb4295b1aec06
|
[
"MIT"
] | 12
|
2020-03-12T08:13:52.000Z
|
2022-01-19T05:27:35.000Z
|
project/model/__init__.py
|
kqqian/kispower
|
38d88c4c5a983a90009cb8c7012cb4295b1aec06
|
[
"MIT"
] | 4
|
2020-07-18T05:07:52.000Z
|
2022-01-13T02:21:58.000Z
|
project/model/__init__.py
|
kqqian/kispower
|
38d88c4c5a983a90009cb8c7012cb4295b1aec06
|
[
"MIT"
] | 3
|
2020-04-30T02:49:25.000Z
|
2022-01-19T05:27:38.000Z
|
# -*- coding: utf-8 -*-
# @Time : 2019-12-21
# @Author : mizxc
# @Email : xiangxianjiao@163.com
| 25.25
| 34
| 0.564356
| 13
| 101
| 4.384615
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.151899
| 0.217822
| 101
| 4
| 34
| 25.25
| 0.56962
| 0.920792
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
96407107e2c4ddf8023b1af41a195e607e732329
| 163
|
py
|
Python
|
omim_data_pipeline/__main__.py
|
joeflack4/omim-data-pipeline
|
1d0eb9fcb12303b8104bf1dcdf3d01a0fc174c18
|
[
"MIT"
] | null | null | null |
omim_data_pipeline/__main__.py
|
joeflack4/omim-data-pipeline
|
1d0eb9fcb12303b8104bf1dcdf3d01a0fc174c18
|
[
"MIT"
] | null | null | null |
omim_data_pipeline/__main__.py
|
joeflack4/omim-data-pipeline
|
1d0eb9fcb12303b8104bf1dcdf3d01a0fc174c18
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Package entry point."""
from omim_data_pipeline.interfaces.cli import cli
if __name__ == '__main__':
cli()
| 18.111111
| 49
| 0.662577
| 22
| 163
| 4.454545
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014493
| 0.153374
| 163
| 8
| 50
| 20.375
| 0.695652
| 0.392638
| 0
| 0
| 0
| 0
| 0.086957
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
964fa3a7cd563bf211da61450f3afc731a1f9af3
| 427
|
py
|
Python
|
distiller/helper/__init__.py
|
arkel23/IntermediateFeaturesAugmentedRepDistiller
|
86e332d6100246bc9e6c6ee7492f3a7a70acfdcc
|
[
"BSD-2-Clause"
] | null | null | null |
distiller/helper/__init__.py
|
arkel23/IntermediateFeaturesAugmentedRepDistiller
|
86e332d6100246bc9e6c6ee7492f3a7a70acfdcc
|
[
"BSD-2-Clause"
] | null | null | null |
distiller/helper/__init__.py
|
arkel23/IntermediateFeaturesAugmentedRepDistiller
|
86e332d6100246bc9e6c6ee7492f3a7a70acfdcc
|
[
"BSD-2-Clause"
] | 1
|
2021-09-25T08:46:47.000Z
|
2021-09-25T08:46:47.000Z
|
from .parser import parse_option_teacher, parse_option_linear, parse_option_student
from .misc_utils import count_params_single, count_params_module_list, summary_stats
from .model_utils import load_model, load_teacher, save_model
from .optim_utils import return_optimizer_scheduler
from .dist_utils import distribute_bn
from .pretrain import init
from .loops import train_vanilla, train_distill, validate, feature_extraction
| 47.444444
| 84
| 0.873536
| 62
| 427
| 5.612903
| 0.580645
| 0.126437
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088993
| 427
| 8
| 85
| 53.375
| 0.894602
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
96762c0a0828832425abf90026bc2273b89c1eaa
| 141
|
py
|
Python
|
ulm/__init__.py
|
dupuy/ulm
|
db1563394975bb1a60fdbb958520d9799d4ec71c
|
[
"BSD-3-Clause"
] | 1
|
2015-05-05T11:13:11.000Z
|
2015-05-05T11:13:11.000Z
|
ulm/__init__.py
|
dupuy/ulm
|
db1563394975bb1a60fdbb958520d9799d4ec71c
|
[
"BSD-3-Clause"
] | null | null | null |
ulm/__init__.py
|
dupuy/ulm
|
db1563394975bb1a60fdbb958520d9799d4ec71c
|
[
"BSD-3-Clause"
] | null | null | null |
"""Ubuntu Laptop Monitoring - Django project to display laptop hardware status.
.. moduleauthor:: Alexander Dupuy <alex.dupuy@mac.com>
"""
| 23.5
| 79
| 0.744681
| 17
| 141
| 6.176471
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.134752
| 141
| 5
| 80
| 28.2
| 0.860656
| 0.93617
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
967b09bfc6a5731862ffc8d02fec681a40529898
| 37,172
|
py
|
Python
|
Optuna/OB5F_LS/SEQ.py
|
ysdede/jesse_strategies
|
ade9f4ba42cec11207c766d267b9d8feb8bce648
|
[
"CC0-1.0"
] | 38
|
2021-09-18T15:33:28.000Z
|
2022-02-21T17:29:08.000Z
|
Optuna/OB5F_LS/SEQ.py
|
ysdede/jesse_strategies
|
ade9f4ba42cec11207c766d267b9d8feb8bce648
|
[
"CC0-1.0"
] | 4
|
2022-01-02T14:46:12.000Z
|
2022-02-16T18:39:41.000Z
|
Optuna/OB5F_LS/SEQ.py
|
ysdede/jesse_strategies
|
ade9f4ba42cec11207c766d267b9d8feb8bce648
|
[
"CC0-1.0"
] | 11
|
2021-10-19T06:21:43.000Z
|
2022-02-21T17:29:10.000Z
|
hps = {
"0351291110650853": {
"ott_len": 35,
"ott_percent": 129,
"ott_bw_up": 111,
"tps_qty_index": 65,
"max_risk_long": 85
},
"0561821341040643": {
"ott_len": 56,
"ott_percent": 182,
"ott_bw_up": 134,
"tps_qty_index": 104,
"max_risk_long": 64
},
"0351291110200403": {
"ott_len": 35,
"ott_percent": 129,
"ott_bw_up": 111,
"tps_qty_index": 20,
"max_risk_long": 40
},
"0572181720720673": {
"ott_len": 57,
"ott_percent": 218,
"ott_bw_up": 172,
"tps_qty_index": 72,
"max_risk_long": 67
},
"0331701430640973": {
"ott_len": 33,
"ott_percent": 170,
"ott_bw_up": 143,
"tps_qty_index": 64,
"max_risk_long": 97
},
"66787357652": {
"ott_len": 66,
"ott_percent": 78,
"ott_bw_up": 73,
"tps_qty_index": 57,
"max_risk_long": 65
},
"0701701490000913": {
"ott_len": 70,
"ott_percent": 170,
"ott_bw_up": 149,
"tps_qty_index": 0,
"max_risk_long": 91
},
"0700781390290673": {
"ott_len": 70,
"ott_percent": 78,
"ott_bw_up": 139,
"tps_qty_index": 29,
"max_risk_long": 67
},
"0601050950650813": {
"ott_len": 60,
"ott_percent": 105,
"ott_bw_up": 95,
"tps_qty_index": 65,
"max_risk_long": 81
},
"0400781490420563": {
"ott_len": 40,
"ott_percent": 78,
"ott_bw_up": 149,
"tps_qty_index": 42,
"max_risk_long": 56
},
"0572181750000913": {
"ott_len": 57,
"ott_percent": 218,
"ott_bw_up": 175,
"tps_qty_index": 0,
"max_risk_long": 91
},
"0281701690020483": {
"ott_len": 28,
"ott_percent": 170,
"ott_bw_up": 169,
"tps_qty_index": 2,
"max_risk_long": 48
},
"0631701340690723": {
"ott_len": 63,
"ott_percent": 170,
"ott_bw_up": 134,
"tps_qty_index": 69,
"max_risk_long": 72
},
"0691581140650703": {
"ott_len": 69,
"ott_percent": 158,
"ott_bw_up": 114,
"tps_qty_index": 65,
"max_risk_long": 70
},
"0450781491120653": {
"ott_len": 45,
"ott_percent": 78,
"ott_bw_up": 149,
"tps_qty_index": 112,
"max_risk_long": 65
},
"0691050980650663": {
"ott_len": 69,
"ott_percent": 105,
"ott_bw_up": 98,
"tps_qty_index": 65,
"max_risk_long": 66
},
"0391701430640973": {
"ott_len": 39,
"ott_percent": 170,
"ott_bw_up": 143,
"tps_qty_index": 64,
"max_risk_long": 97
},
"0331291110650813": {
"ott_len": 33,
"ott_percent": 129,
"ott_bw_up": 111,
"tps_qty_index": 65,
"max_risk_long": 81
},
"0351291110200453": {
"ott_len": 35,
"ott_percent": 129,
"ott_bw_up": 111,
"tps_qty_index": 20,
"max_risk_long": 45
},
"0701701380640523": {
"ott_len": 70,
"ott_percent": 170,
"ott_bw_up": 138,
"tps_qty_index": 64,
"max_risk_long": 52
},
"0450781490370653": {
"ott_len": 45,
"ott_percent": 78,
"ott_bw_up": 149,
"tps_qty_index": 37,
"max_risk_long": 65
},
"0521051340690693": {
"ott_len": 52,
"ott_percent": 105,
"ott_bw_up": 134,
"tps_qty_index": 69,
"max_risk_long": 69
},
"66788842652": {
"ott_len": 66,
"ott_percent": 78,
"ott_bw_up": 88,
"tps_qty_index": 42,
"max_risk_long": 65
},
"0640781390420673": {
"ott_len": 64,
"ott_percent": 78,
"ott_bw_up": 139,
"tps_qty_index": 42,
"max_risk_long": 67
},
"0700701120370653": {
"ott_len": 70,
"ott_percent": 70,
"ott_bw_up": 112,
"tps_qty_index": 37,
"max_risk_long": 65
},
"0682181720800673": {
"ott_len": 68,
"ott_percent": 218,
"ott_bw_up": 172,
"tps_qty_index": 80,
"max_risk_long": 67
},
"0311051471100663": {
"ott_len": 31,
"ott_percent": 105,
"ott_bw_up": 147,
"tps_qty_index": 110,
"max_risk_long": 66
},
"0701291110370653": {
"ott_len": 70,
"ott_percent": 129,
"ott_bw_up": 111,
"tps_qty_index": 37,
"max_risk_long": 65
},
"0400781360170653": {
"ott_len": 40,
"ott_percent": 78,
"ott_bw_up": 136,
"tps_qty_index": 17,
"max_risk_long": 65
},
"0682181721120663": {
"ott_len": 68,
"ott_percent": 218,
"ott_bw_up": 172,
"tps_qty_index": 112,
"max_risk_long": 66
},
"0331291490650523": {
"ott_len": 33,
"ott_percent": 129,
"ott_bw_up": 149,
"tps_qty_index": 65,
"max_risk_long": 52
},
"0334791750000813": {
"ott_len": 33,
"ott_percent": 479,
"ott_bw_up": 175,
"tps_qty_index": 0,
"max_risk_long": 81
},
"57788850852": {
"ott_len": 57,
"ott_percent": 78,
"ott_bw_up": 88,
"tps_qty_index": 50,
"max_risk_long": 85
},
"0581050990500523": {
"ott_len": 58,
"ott_percent": 105,
"ott_bw_up": 99,
"tps_qty_index": 50,
"max_risk_long": 52
},
"0531701470690723": {
"ott_len": 53,
"ott_percent": 170,
"ott_bw_up": 147,
"tps_qty_index": 69,
"max_risk_long": 72
},
"0541701430841003": {
"ott_len": 54,
"ott_percent": 170,
"ott_bw_up": 143,
"tps_qty_index": 84,
"max_risk_long": 100
},
"57788842712": {
"ott_len": 57,
"ott_percent": 78,
"ott_bw_up": 88,
"tps_qty_index": 42,
"max_risk_long": 71
},
"70709372652": {
"ott_len": 70,
"ott_percent": 70,
"ott_bw_up": 93,
"tps_qty_index": 72,
"max_risk_long": 65
},
"0540971110650593": {
"ott_len": 54,
"ott_percent": 97,
"ott_bw_up": 111,
"tps_qty_index": 65,
"max_risk_long": 59
},
"0331291110610813": {
"ott_len": 33,
"ott_percent": 129,
"ott_bw_up": 111,
"tps_qty_index": 61,
"max_risk_long": 81
},
"0311031470410553": {
"ott_len": 31,
"ott_percent": 103,
"ott_bw_up": 147,
"tps_qty_index": 41,
"max_risk_long": 55
},
"0292181721120663": {
"ott_len": 29,
"ott_percent": 218,
"ott_bw_up": 172,
"tps_qty_index": 112,
"max_risk_long": 66
},
"0400781470420563": {
"ott_len": 40,
"ott_percent": 78,
"ott_bw_up": 147,
"tps_qty_index": 42,
"max_risk_long": 56
},
"0541291400650653": {
"ott_len": 54,
"ott_percent": 129,
"ott_bw_up": 140,
"tps_qty_index": 65,
"max_risk_long": 65
},
"0312181721100913": {
"ott_len": 31,
"ott_percent": 218,
"ott_bw_up": 172,
"tps_qty_index": 110,
"max_risk_long": 91
},
"0330781470590493": {
"ott_len": 33,
"ott_percent": 78,
"ott_bw_up": 147,
"tps_qty_index": 59,
"max_risk_long": 49
},
"0400781490170563": {
"ott_len": 40,
"ott_percent": 78,
"ott_bw_up": 149,
"tps_qty_index": 17,
"max_risk_long": 56
},
"40789942562": {
"ott_len": 40,
"ott_percent": 78,
"ott_bw_up": 99,
"tps_qty_index": 42,
"max_risk_long": 56
},
"0701291160370653": {
"ott_len": 70,
"ott_percent": 129,
"ott_bw_up": 116,
"tps_qty_index": 37,
"max_risk_long": 65
},
"0331291170220593": {
"ott_len": 33,
"ott_percent": 129,
"ott_bw_up": 117,
"tps_qty_index": 22,
"max_risk_long": 59
},
"0310781471100493": {
"ott_len": 31,
"ott_percent": 78,
"ott_bw_up": 147,
"tps_qty_index": 110,
"max_risk_long": 49
},
"57788872652": {
"ott_len": 57,
"ott_percent": 78,
"ott_bw_up": 88,
"tps_qty_index": 72,
"max_risk_long": 65
},
"0660781240650673": {
"ott_len": 66,
"ott_percent": 78,
"ott_bw_up": 124,
"tps_qty_index": 65,
"max_risk_long": 67
},
"0400781360480653": {
"ott_len": 40,
"ott_percent": 78,
"ott_bw_up": 136,
"tps_qty_index": 48,
"max_risk_long": 65
},
"70788864712": {
"ott_len": 70,
"ott_percent": 78,
"ott_bw_up": 88,
"tps_qty_index": 64,
"max_risk_long": 71
},
"0350781380090653": {
"ott_len": 35,
"ott_percent": 78,
"ott_bw_up": 138,
"tps_qty_index": 9,
"max_risk_long": 65
},
"0331291110410523": {
"ott_len": 33,
"ott_percent": 129,
"ott_bw_up": 111,
"tps_qty_index": 41,
"max_risk_long": 52
},
"0331701340470523": {
"ott_len": 33,
"ott_percent": 170,
"ott_bw_up": 134,
"tps_qty_index": 47,
"max_risk_long": 52
},
"0311031470420523": {
"ott_len": 31,
"ott_percent": 103,
"ott_bw_up": 147,
"tps_qty_index": 42,
"max_risk_long": 52
},
"0331051110270913": {
"ott_len": 33,
"ott_percent": 105,
"ott_bw_up": 111,
"tps_qty_index": 27,
"max_risk_long": 91
},
"0682181731120663": {
"ott_len": 68,
"ott_percent": 218,
"ott_bw_up": 173,
"tps_qty_index": 112,
"max_risk_long": 66
},
"0701701340690523": {
"ott_len": 70,
"ott_percent": 170,
"ott_bw_up": 134,
"tps_qty_index": 69,
"max_risk_long": 52
},
"70788872612": {
"ott_len": 70,
"ott_percent": 78,
"ott_bw_up": 88,
"tps_qty_index": 72,
"max_risk_long": 61
},
"0312181721100663": {
"ott_len": 31,
"ott_percent": 218,
"ott_bw_up": 172,
"tps_qty_index": 110,
"max_risk_long": 66
},
"0331291490590523": {
"ott_len": 33,
"ott_percent": 129,
"ott_bw_up": 149,
"tps_qty_index": 59,
"max_risk_long": 52
},
"0682181571120663": {
"ott_len": 68,
"ott_percent": 218,
"ott_bw_up": 157,
"tps_qty_index": 112,
"max_risk_long": 66
},
"0550781430310673": {
"ott_len": 55,
"ott_percent": 78,
"ott_bw_up": 143,
"tps_qty_index": 31,
"max_risk_long": 67
},
"48788842652": {
"ott_len": 48,
"ott_percent": 78,
"ott_bw_up": 88,
"tps_qty_index": 42,
"max_risk_long": 65
},
"0581050970270653": {
"ott_len": 58,
"ott_percent": 105,
"ott_bw_up": 97,
"tps_qty_index": 27,
"max_risk_long": 65
},
"0572181390030713": {
"ott_len": 57,
"ott_percent": 218,
"ott_bw_up": 139,
"tps_qty_index": 3,
"max_risk_long": 71
},
"0701701341090523": {
"ott_len": 70,
"ott_percent": 170,
"ott_bw_up": 134,
"tps_qty_index": 109,
"max_risk_long": 52
},
"0331291110610493": {
"ott_len": 33,
"ott_percent": 129,
"ott_bw_up": 111,
"tps_qty_index": 61,
"max_risk_long": 49
},
"0420781430420553": {
"ott_len": 42,
"ott_percent": 78,
"ott_bw_up": 143,
"tps_qty_index": 42,
"max_risk_long": 55
},
"0631701380480523": {
"ott_len": 63,
"ott_percent": 170,
"ott_bw_up": 138,
"tps_qty_index": 48,
"max_risk_long": 52
},
"0311291490650523": {
"ott_len": 31,
"ott_percent": 129,
"ott_bw_up": 149,
"tps_qty_index": 65,
"max_risk_long": 52
},
"0330781490890673": {
"ott_len": 33,
"ott_percent": 78,
"ott_bw_up": 149,
"tps_qty_index": 89,
"max_risk_long": 67
},
"0311031470030523": {
"ott_len": 31,
"ott_percent": 103,
"ott_bw_up": 147,
"tps_qty_index": 3,
"max_risk_long": 52
},
"0331291490410793": {
"ott_len": 33,
"ott_percent": 129,
"ott_bw_up": 149,
"tps_qty_index": 41,
"max_risk_long": 79
},
"0631701471100913": {
"ott_len": 63,
"ott_percent": 170,
"ott_bw_up": 147,
"tps_qty_index": 110,
"max_risk_long": 91
},
"0321031470840913": {
"ott_len": 32,
"ott_percent": 103,
"ott_bw_up": 147,
"tps_qty_index": 84,
"max_risk_long": 91
},
"0311291160930823": {
"ott_len": 31,
"ott_percent": 129,
"ott_bw_up": 116,
"tps_qty_index": 93,
"max_risk_long": 82
},
"0350781380640523": {
"ott_len": 35,
"ott_percent": 78,
"ott_bw_up": 138,
"tps_qty_index": 64,
"max_risk_long": 52
},
"0590821210640523": {
"ott_len": 59,
"ott_percent": 82,
"ott_bw_up": 121,
"tps_qty_index": 64,
"max_risk_long": 52
},
"0660781110370653": {
"ott_len": 66,
"ott_percent": 78,
"ott_bw_up": 111,
"tps_qty_index": 37,
"max_risk_long": 65
},
"0631291110100523": {
"ott_len": 63,
"ott_percent": 129,
"ott_bw_up": 111,
"tps_qty_index": 10,
"max_risk_long": 52
},
"0701701110370653": {
"ott_len": 70,
"ott_percent": 170,
"ott_bw_up": 111,
"tps_qty_index": 37,
"max_risk_long": 65
},
"0331411470590593": {
"ott_len": 33,
"ott_percent": 141,
"ott_bw_up": 147,
"tps_qty_index": 59,
"max_risk_long": 59
},
"0331291080590793": {
"ott_len": 33,
"ott_percent": 129,
"ott_bw_up": 108,
"tps_qty_index": 59,
"max_risk_long": 79
},
"0311411471100543": {
"ott_len": 31,
"ott_percent": 141,
"ott_bw_up": 147,
"tps_qty_index": 110,
"max_risk_long": 54
},
"0311291360170653": {
"ott_len": 31,
"ott_percent": 129,
"ott_bw_up": 136,
"tps_qty_index": 17,
"max_risk_long": 65
},
"0331291490980613": {
"ott_len": 33,
"ott_percent": 129,
"ott_bw_up": 149,
"tps_qty_index": 98,
"max_risk_long": 61
},
"0321031490840673": {
"ott_len": 32,
"ott_percent": 103,
"ott_bw_up": 149,
"tps_qty_index": 84,
"max_risk_long": 67
},
"0701701400650653": {
"ott_len": 70,
"ott_percent": 170,
"ott_bw_up": 140,
"tps_qty_index": 65,
"max_risk_long": 65
},
"0321031491100913": {
"ott_len": 32,
"ott_percent": 103,
"ott_bw_up": 149,
"tps_qty_index": 110,
"max_risk_long": 91
},
"0321031240840913": {
"ott_len": 32,
"ott_percent": 103,
"ott_bw_up": 124,
"tps_qty_index": 84,
"max_risk_long": 91
},
"0701291160160823": {
"ott_len": 70,
"ott_percent": 129,
"ott_bw_up": 116,
"tps_qty_index": 16,
"max_risk_long": 82
},
"0681701340690523": {
"ott_len": 68,
"ott_percent": 170,
"ott_bw_up": 134,
"tps_qty_index": 69,
"max_risk_long": 52
},
"0731291110650813": {
"ott_len": 73,
"ott_percent": 129,
"ott_bw_up": 111,
"tps_qty_index": 65,
"max_risk_long": 81
},
"0330781470860523": {
"ott_len": 33,
"ott_percent": 78,
"ott_bw_up": 147,
"tps_qty_index": 86,
"max_risk_long": 52
},
"0331031491100493": {
"ott_len": 33,
"ott_percent": 103,
"ott_bw_up": 149,
"tps_qty_index": 110,
"max_risk_long": 49
},
"0311031471070553": {
"ott_len": 31,
"ott_percent": 103,
"ott_bw_up": 147,
"tps_qty_index": 107,
"max_risk_long": 55
},
"0331701710590653": {
"ott_len": 33,
"ott_percent": 170,
"ott_bw_up": 171,
"tps_qty_index": 59,
"max_risk_long": 65
},
"0701291160930653": {
"ott_len": 70,
"ott_percent": 129,
"ott_bw_up": 116,
"tps_qty_index": 93,
"max_risk_long": 65
},
"0331291490980913": {
"ott_len": 33,
"ott_percent": 129,
"ott_bw_up": 149,
"tps_qty_index": 98,
"max_risk_long": 91
},
"0331291710790653": {
"ott_len": 33,
"ott_percent": 129,
"ott_bw_up": 171,
"tps_qty_index": 79,
"max_risk_long": 65
},
"0312021470370653": {
"ott_len": 31,
"ott_percent": 202,
"ott_bw_up": 147,
"tps_qty_index": 37,
"max_risk_long": 65
},
"0330781470840673": {
"ott_len": 33,
"ott_percent": 78,
"ott_bw_up": 147,
"tps_qty_index": 84,
"max_risk_long": 67
},
"0331121490940803": {
"ott_len": 33,
"ott_percent": 112,
"ott_bw_up": 149,
"tps_qty_index": 94,
"max_risk_long": 80
},
"0331701380700523": {
"ott_len": 33,
"ott_percent": 170,
"ott_bw_up": 138,
"tps_qty_index": 70,
"max_risk_long": 52
},
"0311571490980603": {
"ott_len": 31,
"ott_percent": 157,
"ott_bw_up": 149,
"tps_qty_index": 98,
"max_risk_long": 60
},
"40788117652": {
"ott_len": 40,
"ott_percent": 78,
"ott_bw_up": 81,
"tps_qty_index": 17,
"max_risk_long": 65
},
"0480781390650613": {
"ott_len": 48,
"ott_percent": 78,
"ott_bw_up": 139,
"tps_qty_index": 65,
"max_risk_long": 61
},
"0321031470840773": {
"ott_len": 32,
"ott_percent": 103,
"ott_bw_up": 147,
"tps_qty_index": 84,
"max_risk_long": 77
},
"0311330940420553": {
"ott_len": 31,
"ott_percent": 133,
"ott_bw_up": 94,
"tps_qty_index": 42,
"max_risk_long": 55
},
"0331291490690523": {
"ott_len": 33,
"ott_percent": 129,
"ott_bw_up": 149,
"tps_qty_index": 69,
"max_risk_long": 52
},
"0681291350650663": {
"ott_len": 68,
"ott_percent": 129,
"ott_bw_up": 135,
"tps_qty_index": 65,
"max_risk_long": 66
},
"0331761490860523": {
"ott_len": 33,
"ott_percent": 176,
"ott_bw_up": 149,
"tps_qty_index": 86,
"max_risk_long": 52
},
"0371291110420653": {
"ott_len": 37,
"ott_percent": 129,
"ott_bw_up": 111,
"tps_qty_index": 42,
"max_risk_long": 65
},
"0480781470100613": {
"ott_len": 48,
"ott_percent": 78,
"ott_bw_up": 147,
"tps_qty_index": 10,
"max_risk_long": 61
},
"0331291450690523": {
"ott_len": 33,
"ott_percent": 129,
"ott_bw_up": 145,
"tps_qty_index": 69,
"max_risk_long": 52
},
"0330911470840853": {
"ott_len": 33,
"ott_percent": 91,
"ott_bw_up": 147,
"tps_qty_index": 84,
"max_risk_long": 85
},
"0400781040690653": {
"ott_len": 40,
"ott_percent": 78,
"ott_bw_up": 104,
"tps_qty_index": 69,
"max_risk_long": 65
},
"0441031110840693": {
"ott_len": 44,
"ott_percent": 103,
"ott_bw_up": 111,
"tps_qty_index": 84,
"max_risk_long": 69
},
"0330781470770673": {
"ott_len": 33,
"ott_percent": 78,
"ott_bw_up": 147,
"tps_qty_index": 77,
"max_risk_long": 67
},
"0661291100370653": {
"ott_len": 66,
"ott_percent": 129,
"ott_bw_up": 110,
"tps_qty_index": 37,
"max_risk_long": 65
},
"0701701380930653": {
"ott_len": 70,
"ott_percent": 170,
"ott_bw_up": 138,
"tps_qty_index": 93,
"max_risk_long": 65
},
"0331291110560823": {
"ott_len": 33,
"ott_percent": 129,
"ott_bw_up": 111,
"tps_qty_index": 56,
"max_risk_long": 82
},
"0501030920420453": {
"ott_len": 50,
"ott_percent": 103,
"ott_bw_up": 92,
"tps_qty_index": 42,
"max_risk_long": 45
},
"0331291490840503": {
"ott_len": 33,
"ott_percent": 129,
"ott_bw_up": 149,
"tps_qty_index": 84,
"max_risk_long": 50
},
"0331701340690523": {
"ott_len": 33,
"ott_percent": 170,
"ott_bw_up": 134,
"tps_qty_index": 69,
"max_risk_long": 52
},
"0331031471100493": {
"ott_len": 33,
"ott_percent": 103,
"ott_bw_up": 147,
"tps_qty_index": 110,
"max_risk_long": 49
},
"68788818492": {
"ott_len": 68,
"ott_percent": 78,
"ott_bw_up": 88,
"tps_qty_index": 18,
"max_risk_long": 49
},
"0311031471100553": {
"ott_len": 31,
"ott_percent": 103,
"ott_bw_up": 147,
"tps_qty_index": 110,
"max_risk_long": 55
},
"0331701380640523": {
"ott_len": 33,
"ott_percent": 170,
"ott_bw_up": 138,
"tps_qty_index": 64,
"max_risk_long": 52
},
"0701701380570523": {
"ott_len": 70,
"ott_percent": 170,
"ott_bw_up": 138,
"tps_qty_index": 57,
"max_risk_long": 52
},
"0621291110650913": {
"ott_len": 62,
"ott_percent": 129,
"ott_bw_up": 111,
"tps_qty_index": 65,
"max_risk_long": 91
},
"0350781340640523": {
"ott_len": 35,
"ott_percent": 78,
"ott_bw_up": 134,
"tps_qty_index": 64,
"max_risk_long": 52
},
"0311291471100653": {
"ott_len": 31,
"ott_percent": 129,
"ott_bw_up": 147,
"tps_qty_index": 110,
"max_risk_long": 65
},
"0311701350380933": {
"ott_len": 31,
"ott_percent": 170,
"ott_bw_up": 135,
"tps_qty_index": 38,
"max_risk_long": 93
},
"0311030880720553": {
"ott_len": 31,
"ott_percent": 103,
"ott_bw_up": 88,
"tps_qty_index": 72,
"max_risk_long": 55
},
"0331701490640523": {
"ott_len": 33,
"ott_percent": 170,
"ott_bw_up": 149,
"tps_qty_index": 64,
"max_risk_long": 52
},
"66788884672": {
"ott_len": 66,
"ott_percent": 78,
"ott_bw_up": 88,
"tps_qty_index": 84,
"max_risk_long": 67
},
"0331291050940523": {
"ott_len": 33,
"ott_percent": 129,
"ott_bw_up": 105,
"tps_qty_index": 94,
"max_risk_long": 52
},
"0400781470690653": {
"ott_len": 40,
"ott_percent": 78,
"ott_bw_up": 147,
"tps_qty_index": 69,
"max_risk_long": 65
},
"0321031481100913": {
"ott_len": 32,
"ott_percent": 103,
"ott_bw_up": 148,
"tps_qty_index": 110,
"max_risk_long": 91
},
"0311031470420653": {
"ott_len": 31,
"ott_percent": 103,
"ott_bw_up": 147,
"tps_qty_index": 42,
"max_risk_long": 65
},
"0331291080420793": {
"ott_len": 33,
"ott_percent": 129,
"ott_bw_up": 108,
"tps_qty_index": 42,
"max_risk_long": 79
},
"0331291470690523": {
"ott_len": 33,
"ott_percent": 129,
"ott_bw_up": 147,
"tps_qty_index": 69,
"max_risk_long": 52
},
"0331291490110653": {
"ott_len": 33,
"ott_percent": 129,
"ott_bw_up": 149,
"tps_qty_index": 11,
"max_risk_long": 65
},
"0701701380570653": {
"ott_len": 70,
"ott_percent": 170,
"ott_bw_up": 138,
"tps_qty_index": 57,
"max_risk_long": 65
},
"0331561340640863": {
"ott_len": 33,
"ott_percent": 156,
"ott_bw_up": 134,
"tps_qty_index": 64,
"max_risk_long": 86
},
"0330781490650503": {
"ott_len": 33,
"ott_percent": 78,
"ott_bw_up": 149,
"tps_qty_index": 65,
"max_risk_long": 50
},
"0311030880400553": {
"ott_len": 31,
"ott_percent": 103,
"ott_bw_up": 88,
"tps_qty_index": 40,
"max_risk_long": 55
},
"0330781470690493": {
"ott_len": 33,
"ott_percent": 78,
"ott_bw_up": 147,
"tps_qty_index": 69,
"max_risk_long": 49
},
"0321031490980913": {
"ott_len": 32,
"ott_percent": 103,
"ott_bw_up": 149,
"tps_qty_index": 98,
"max_risk_long": 91
},
"0321031491100523": {
"ott_len": 32,
"ott_percent": 103,
"ott_bw_up": 149,
"tps_qty_index": 110,
"max_risk_long": 52
},
"0331701490590653": {
"ott_len": 33,
"ott_percent": 170,
"ott_bw_up": 149,
"tps_qty_index": 59,
"max_risk_long": 65
},
"0331291490770553": {
"ott_len": 33,
"ott_percent": 129,
"ott_bw_up": 149,
"tps_qty_index": 77,
"max_risk_long": 55
},
"0291291160930583": {
"ott_len": 29,
"ott_percent": 129,
"ott_bw_up": 116,
"tps_qty_index": 93,
"max_risk_long": 58
},
"0341291471100663": {
"ott_len": 34,
"ott_percent": 129,
"ott_bw_up": 147,
"tps_qty_index": 110,
"max_risk_long": 66
},
"0701701380370653": {
"ott_len": 70,
"ott_percent": 170,
"ott_bw_up": 138,
"tps_qty_index": 37,
"max_risk_long": 65
},
"0331291491100613": {
"ott_len": 33,
"ott_percent": 129,
"ott_bw_up": 149,
"tps_qty_index": 110,
"max_risk_long": 61
},
"33788842652": {
"ott_len": 33,
"ott_percent": 78,
"ott_bw_up": 88,
"tps_qty_index": 42,
"max_risk_long": 65
},
"0331291490410673": {
"ott_len": 33,
"ott_percent": 129,
"ott_bw_up": 149,
"tps_qty_index": 41,
"max_risk_long": 67
},
"0331291490590653": {
"ott_len": 33,
"ott_percent": 129,
"ott_bw_up": 149,
"tps_qty_index": 59,
"max_risk_long": 65
},
"70788872532": {
"ott_len": 70,
"ott_percent": 78,
"ott_bw_up": 88,
"tps_qty_index": 72,
"max_risk_long": 53
},
"0701701380840653": {
"ott_len": 70,
"ott_percent": 170,
"ott_bw_up": 138,
"tps_qty_index": 84,
"max_risk_long": 65
},
"39788977342": {
"ott_len": 39,
"ott_percent": 78,
"ott_bw_up": 89,
"tps_qty_index": 77,
"max_risk_long": 34
},
"68788856442": {
"ott_len": 68,
"ott_percent": 78,
"ott_bw_up": 88,
"tps_qty_index": 56,
"max_risk_long": 44
},
"0331271710570653": {
"ott_len": 33,
"ott_percent": 127,
"ott_bw_up": 171,
"tps_qty_index": 57,
"max_risk_long": 65
},
"68788872652": {
"ott_len": 68,
"ott_percent": 78,
"ott_bw_up": 88,
"tps_qty_index": 72,
"max_risk_long": 65
},
"0331531490660793": {
"ott_len": 33,
"ott_percent": 153,
"ott_bw_up": 149,
"tps_qty_index": 66,
"max_risk_long": 79
},
"0331291470650453": {
"ott_len": 33,
"ott_percent": 129,
"ott_bw_up": 147,
"tps_qty_index": 65,
"max_risk_long": 45
},
"0330781471040523": {
"ott_len": 33,
"ott_percent": 78,
"ott_bw_up": 147,
"tps_qty_index": 104,
"max_risk_long": 52
},
"0701291161100653": {
"ott_len": 70,
"ott_percent": 129,
"ott_bw_up": 116,
"tps_qty_index": 110,
"max_risk_long": 65
},
"0310781471100523": {
"ott_len": 31,
"ott_percent": 78,
"ott_bw_up": 147,
"tps_qty_index": 110,
"max_risk_long": 52
},
"33788841792": {
"ott_len": 33,
"ott_percent": 78,
"ott_bw_up": 88,
"tps_qty_index": 41,
"max_risk_long": 79
},
"0321291160930523": {
"ott_len": 32,
"ott_percent": 129,
"ott_bw_up": 116,
"tps_qty_index": 93,
"max_risk_long": 52
},
"0331291490110503": {
"ott_len": 33,
"ott_percent": 129,
"ott_bw_up": 149,
"tps_qty_index": 11,
"max_risk_long": 50
},
"0331291071100613": {
"ott_len": 33,
"ott_percent": 129,
"ott_bw_up": 107,
"tps_qty_index": 110,
"max_risk_long": 61
},
"0701701381100523": {
"ott_len": 70,
"ott_percent": 170,
"ott_bw_up": 138,
"tps_qty_index": 110,
"max_risk_long": 52
},
"0680781090180743": {
"ott_len": 68,
"ott_percent": 78,
"ott_bw_up": 109,
"tps_qty_index": 18,
"max_risk_long": 74
},
"0651291490650523": {
"ott_len": 65,
"ott_percent": 129,
"ott_bw_up": 149,
"tps_qty_index": 65,
"max_risk_long": 52
},
"0331701491100883": {
"ott_len": 33,
"ott_percent": 170,
"ott_bw_up": 149,
"tps_qty_index": 110,
"max_risk_long": 88
},
"0330781490640663": {
"ott_len": 33,
"ott_percent": 78,
"ott_bw_up": 149,
"tps_qty_index": 64,
"max_risk_long": 66
},
"0701701380840573": {
"ott_len": 70,
"ott_percent": 170,
"ott_bw_up": 138,
"tps_qty_index": 84,
"max_risk_long": 57
},
"0331291471100663": {
"ott_len": 33,
"ott_percent": 129,
"ott_bw_up": 147,
"tps_qty_index": 110,
"max_risk_long": 66
},
"68788818982": {
"ott_len": 68,
"ott_percent": 78,
"ott_bw_up": 88,
"tps_qty_index": 18,
"max_risk_long": 98
},
"0331291710770653": {
"ott_len": 33,
"ott_percent": 129,
"ott_bw_up": 171,
"tps_qty_index": 77,
"max_risk_long": 65
},
"0331701710110653": {
"ott_len": 33,
"ott_percent": 170,
"ott_bw_up": 171,
"tps_qty_index": 11,
"max_risk_long": 65
},
"0310941471100553": {
"ott_len": 31,
"ott_percent": 94,
"ott_bw_up": 147,
"tps_qty_index": 110,
"max_risk_long": 55
},
"0321031470770553": {
"ott_len": 32,
"ott_percent": 103,
"ott_bw_up": 147,
"tps_qty_index": 77,
"max_risk_long": 55
},
"0321031491060503": {
"ott_len": 32,
"ott_percent": 103,
"ott_bw_up": 149,
"tps_qty_index": 106,
"max_risk_long": 50
},
"0680780881100493": {
"ott_len": 68,
"ott_percent": 78,
"ott_bw_up": 88,
"tps_qty_index": 110,
"max_risk_long": 49
},
"0331291491100793": {
"ott_len": 33,
"ott_percent": 129,
"ott_bw_up": 149,
"tps_qty_index": 110,
"max_risk_long": 79
},
"0331031471060643": {
"ott_len": 33,
"ott_percent": 103,
"ott_bw_up": 147,
"tps_qty_index": 106,
"max_risk_long": 64
},
"0331291330650943": {
"ott_len": 33,
"ott_percent": 129,
"ott_bw_up": 133,
"tps_qty_index": 65,
"max_risk_long": 94
},
"0560781380350573": {
"ott_len": 56,
"ott_percent": 78,
"ott_bw_up": 138,
"tps_qty_index": 35,
"max_risk_long": 57
},
"0331291710590523": {
"ott_len": 33,
"ott_percent": 129,
"ott_bw_up": 171,
"tps_qty_index": 59,
"max_risk_long": 52
},
"0331291471100523": {
"ott_len": 33,
"ott_percent": 129,
"ott_bw_up": 147,
"tps_qty_index": 110,
"max_risk_long": 52
},
"0320781161100523": {
"ott_len": 32,
"ott_percent": 78,
"ott_bw_up": 116,
"tps_qty_index": 110,
"max_risk_long": 52
},
"0701701380320523": {
"ott_len": 70,
"ott_percent": 170,
"ott_bw_up": 138,
"tps_qty_index": 32,
"max_risk_long": 52
},
"0331031491100523": {
"ott_len": 33,
"ott_percent": 103,
"ott_bw_up": 149,
"tps_qty_index": 110,
"max_risk_long": 52
},
"0331701710640633": {
"ott_len": 33,
"ott_percent": 170,
"ott_bw_up": 171,
"tps_qty_index": 64,
"max_risk_long": 63
},
"0331501550690523": {
"ott_len": 33,
"ott_percent": 150,
"ott_bw_up": 155,
"tps_qty_index": 69,
"max_risk_long": 52
},
"0331291380840503": {
"ott_len": 33,
"ott_percent": 129,
"ott_bw_up": 138,
"tps_qty_index": 84,
"max_risk_long": 50
},
"0701291380570763": {
"ott_len": 70,
"ott_percent": 129,
"ott_bw_up": 138,
"tps_qty_index": 57,
"max_risk_long": 76
},
"0331291490570523": {
"ott_len": 33,
"ott_percent": 129,
"ott_bw_up": 149,
"tps_qty_index": 57,
"max_risk_long": 52
},
"0331291491160523": {
"ott_len": 33,
"ott_percent": 129,
"ott_bw_up": 149,
"tps_qty_index": 116,
"max_risk_long": 52
},
"68788825492": {
"ott_len": 68,
"ott_percent": 78,
"ott_bw_up": 88,
"tps_qty_index": 25,
"max_risk_long": 49
},
"0330781340640553": {
"ott_len": 33,
"ott_percent": 78,
"ott_bw_up": 134,
"tps_qty_index": 64,
"max_risk_long": 55
},
"0331291710770463": {
"ott_len": 33,
"ott_percent": 129,
"ott_bw_up": 171,
"tps_qty_index": 77,
"max_risk_long": 46
},
"0341291490840663": {
"ott_len": 34,
"ott_percent": 129,
"ott_bw_up": 149,
"tps_qty_index": 84,
"max_risk_long": 66
},
"0331291490650663": {
"ott_len": 33,
"ott_percent": 129,
"ott_bw_up": 149,
"tps_qty_index": 65,
"max_risk_long": 66
},
"0321291710980523": {
"ott_len": 32,
"ott_percent": 129,
"ott_bw_up": 171,
"tps_qty_index": 98,
"max_risk_long": 52
},
"0451291490590523": {
"ott_len": 45,
"ott_percent": 129,
"ott_bw_up": 149,
"tps_qty_index": 59,
"max_risk_long": 52
},
"0331291490840523": {
"ott_len": 33,
"ott_percent": 129,
"ott_bw_up": 149,
"tps_qty_index": 84,
"max_risk_long": 52
},
"0331031490590653": {
"ott_len": 33,
"ott_percent": 103,
"ott_bw_up": 149,
"tps_qty_index": 59,
"max_risk_long": 65
},
"0301471471100553": {
"ott_len": 30,
"ott_percent": 147,
"ott_bw_up": 147,
"tps_qty_index": 110,
"max_risk_long": 55
},
"0330941470920553": {
"ott_len": 33,
"ott_percent": 94,
"ott_bw_up": 147,
"tps_qty_index": 92,
"max_risk_long": 55
},
"0311031390420563": {
"ott_len": 31,
"ott_percent": 103,
"ott_bw_up": 139,
"tps_qty_index": 42,
"max_risk_long": 56
},
"0340861470860523": {
"ott_len": 34,
"ott_percent": 86,
"ott_bw_up": 147,
"tps_qty_index": 86,
"max_risk_long": 52
},
"0451291490770553": {
"ott_len": 45,
"ott_percent": 129,
"ott_bw_up": 149,
"tps_qty_index": 77,
"max_risk_long": 55
},
"0331291490590913": {
"ott_len": 33,
"ott_percent": 129,
"ott_bw_up": 149,
"tps_qty_index": 59,
"max_risk_long": 91
},
"0331291471040773": {
"ott_len": 33,
"ott_percent": 129,
"ott_bw_up": 147,
"tps_qty_index": 104,
"max_risk_long": 77
},
"0331031160930523": {
"ott_len": 33,
"ott_percent": 103,
"ott_bw_up": 116,
"tps_qty_index": 93,
"max_risk_long": 52
},
"0321031491100503": {
"ott_len": 32,
"ott_percent": 103,
"ott_bw_up": 149,
"tps_qty_index": 110,
"max_risk_long": 50
},
"70788869522": {
"ott_len": 70,
"ott_percent": 78,
"ott_bw_up": 88,
"tps_qty_index": 69,
"max_risk_long": 52
},
"0331701560110653": {
"ott_len": 33,
"ott_percent": 170,
"ott_bw_up": 156,
"tps_qty_index": 11,
"max_risk_long": 65
},
"0320781481040913": {
"ott_len": 32,
"ott_percent": 78,
"ott_bw_up": 148,
"tps_qty_index": 104,
"max_risk_long": 91
},
"0330781471100523": {
"ott_len": 33,
"ott_percent": 78,
"ott_bw_up": 147,
"tps_qty_index": 110,
"max_risk_long": 52
},
"0331291491100523": {
"ott_len": 33,
"ott_percent": 129,
"ott_bw_up": 149,
"tps_qty_index": 110,
"max_risk_long": 52
},
"0310940951100553": {
"ott_len": 31,
"ott_percent": 94,
"ott_bw_up": 95,
"tps_qty_index": 110,
"max_risk_long": 55
},
"0331291490770523": {
"ott_len": 33,
"ott_percent": 129,
"ott_bw_up": 149,
"tps_qty_index": 77,
"max_risk_long": 52
},
"0331031470990523": {
"ott_len": 33,
"ott_percent": 103,
"ott_bw_up": 147,
"tps_qty_index": 99,
"max_risk_long": 52
},
"0331291490750553": {
"ott_len": 33,
"ott_percent": 129,
"ott_bw_up": 149,
"tps_qty_index": 75,
"max_risk_long": 55
},
"0310941491060553": {
"ott_len": 31,
"ott_percent": 94,
"ott_bw_up": 149,
"tps_qty_index": 106,
"max_risk_long": 55
},
"0331031250590653": {
"ott_len": 33,
"ott_percent": 103,
"ott_bw_up": 125,
"tps_qty_index": 59,
"max_risk_long": 65
},
"0321031491060523": {
"ott_len": 32,
"ott_percent": 103,
"ott_bw_up": 149,
"tps_qty_index": 106,
"max_risk_long": 52
},
"0321031471100913": {
"ott_len": 32,
"ott_percent": 103,
"ott_bw_up": 147,
"tps_qty_index": 110,
"max_risk_long": 91
},
"0331291390590523": {
"ott_len": 33,
"ott_percent": 129,
"ott_bw_up": 139,
"tps_qty_index": 59,
"max_risk_long": 52
},
"0331031490990523": {
"ott_len": 33,
"ott_percent": 103,
"ott_bw_up": 149,
"tps_qty_index": 99,
"max_risk_long": 52
},
"0331701490570523": {
"ott_len": 33,
"ott_percent": 170,
"ott_bw_up": 149,
"tps_qty_index": 57,
"max_risk_long": 52
},
"0331030930990523": {
"ott_len": 33,
"ott_percent": 103,
"ott_bw_up": 93,
"tps_qty_index": 99,
"max_risk_long": 52
},
"0331291490920553": {
"ott_len": 33,
"ott_percent": 129,
"ott_bw_up": 149,
"tps_qty_index": 92,
"max_risk_long": 55
},
"0330781490840673": {
"ott_len": 33,
"ott_percent": 78,
"ott_bw_up": 149,
"tps_qty_index": 84,
"max_risk_long": 67
},
"70788893512": {
"ott_len": 70,
"ott_percent": 78,
"ott_bw_up": 88,
"tps_qty_index": 93,
"max_risk_long": 51
},
"0331291490750463": {
"ott_len": 33,
"ott_percent": 129,
"ott_bw_up": 149,
"tps_qty_index": 75,
"max_risk_long": 46
},
"0331031471030523": {
"ott_len": 33,
"ott_percent": 103,
"ott_bw_up": 147,
"tps_qty_index": 103,
"max_risk_long": 52
},
"0331701490770653": {
"ott_len": 33,
"ott_percent": 170,
"ott_bw_up": 149,
"tps_qty_index": 77,
"max_risk_long": 65
},
"0331291490320523": {
"ott_len": 33,
"ott_percent": 129,
"ott_bw_up": 149,
"tps_qty_index": 32,
"max_risk_long": 52
},
"0331031470860523": {
"ott_len": 33,
"ott_percent": 103,
"ott_bw_up": 147,
"tps_qty_index": 86,
"max_risk_long": 52
},
"0331031470500493": {
"ott_len": 33,
"ott_percent": 103,
"ott_bw_up": 147,
"tps_qty_index": 50,
"max_risk_long": 49
},
"0331481490610653": {
"ott_len": 33,
"ott_percent": 148,
"ott_bw_up": 149,
"tps_qty_index": 61,
"max_risk_long": 65
},
"0331031471040493": {
"ott_len": 33,
"ott_percent": 103,
"ott_bw_up": 147,
"tps_qty_index": 104,
"max_risk_long": 49
},
"0331701440590523": {
"ott_len": 33,
"ott_percent": 170,
"ott_bw_up": 144,
"tps_qty_index": 59,
"max_risk_long": 52
},
"0611291380590583": {
"ott_len": 61,
"ott_percent": 129,
"ott_bw_up": 138,
"tps_qty_index": 59,
"max_risk_long": 58
},
"0701030980120503": {
"ott_len": 70,
"ott_percent": 103,
"ott_bw_up": 98,
"tps_qty_index": 12,
"max_risk_long": 50
},
"0331291490930653": {
"ott_len": 33,
"ott_percent": 129,
"ott_bw_up": 149,
"tps_qty_index": 93,
"max_risk_long": 65
},
"0320781481100493": {
"ott_len": 32,
"ott_percent": 78,
"ott_bw_up": 148,
"tps_qty_index": 110,
"max_risk_long": 49
},
"0321701310840573": {
"ott_len": 32,
"ott_percent": 170,
"ott_bw_up": 131,
"tps_qty_index": 84,
"max_risk_long": 57
},
"0331701380590653": {
"ott_len": 33,
"ott_percent": 170,
"ott_bw_up": 138,
"tps_qty_index": 59,
"max_risk_long": 65
},
"0310941380570523": {
"ott_len": 31,
"ott_percent": 94,
"ott_bw_up": 138,
"tps_qty_index": 57,
"max_risk_long": 52
},
"0331701490110553": {
"ott_len": 33,
"ott_percent": 170,
"ott_bw_up": 149,
"tps_qty_index": 11,
"max_risk_long": 55
},
"0331291121060523": {
"ott_len": 33,
"ott_percent": 129,
"ott_bw_up": 112,
"tps_qty_index": 106,
"max_risk_long": 52
},
"0331031471100523": {
"ott_len": 33,
"ott_percent": 103,
"ott_bw_up": 147,
"tps_qty_index": 110,
"max_risk_long": 52
},
"0701701380670503": {
"ott_len": 70,
"ott_percent": 170,
"ott_bw_up": 138,
"tps_qty_index": 67,
"max_risk_long": 50
},
"0701701381100913": {
"ott_len": 70,
"ott_percent": 170,
"ott_bw_up": 138,
"tps_qty_index": 110,
"max_risk_long": 91
},
"0331031180590453": {
"ott_len": 33,
"ott_percent": 103,
"ott_bw_up": 118,
"tps_qty_index": 59,
"max_risk_long": 45
},
"0331291470920553": {
"ott_len": 33,
"ott_percent": 129,
"ott_bw_up": 147,
"tps_qty_index": 92,
"max_risk_long": 55
},
"0331031490680493": {
"ott_len": 33,
"ott_percent": 103,
"ott_bw_up": 149,
"tps_qty_index": 68,
"max_risk_long": 49
},
"0310941471120553": {
"ott_len": 31,
"ott_percent": 94,
"ott_bw_up": 147,
"tps_qty_index": 112,
"max_risk_long": 55
},
"0331291490770663": {
"ott_len": 33,
"ott_percent": 129,
"ott_bw_up": 149,
"tps_qty_index": 77,
"max_risk_long": 66
},
"0361701470570523": {
"ott_len": 36,
"ott_percent": 170,
"ott_bw_up": 147,
"tps_qty_index": 57,
"max_risk_long": 52
},
"0310781291100523": {
"ott_len": 31,
"ott_percent": 78,
"ott_bw_up": 129,
"tps_qty_index": 110,
"max_risk_long": 52
},
"0310941471100523": {
"ott_len": 31,
"ott_percent": 94,
"ott_bw_up": 147,
"tps_qty_index": 110,
"max_risk_long": 52
},
"0331031470510523": {
"ott_len": 33,
"ott_percent": 103,
"ott_bw_up": 147,
"tps_qty_index": 51,
"max_risk_long": 52
},
"0701701490770553": {
"ott_len": 70,
"ott_percent": 170,
"ott_bw_up": 149,
"tps_qty_index": 77,
"max_risk_long": 55
},
"0331031471100553": {
"ott_len": 33,
"ott_percent": 103,
"ott_bw_up": 147,
"tps_qty_index": 110,
"max_risk_long": 55
},
"0331291180570523": {
"ott_len": 33,
"ott_percent": 129,
"ott_bw_up": 118,
"tps_qty_index": 57,
"max_risk_long": 52
},
"0311031490750553": {
"ott_len": 31,
"ott_percent": 103,
"ott_bw_up": 149,
"tps_qty_index": 75,
"max_risk_long": 55
},
"0321031471100553": {
"ott_len": 32,
"ott_percent": 103,
"ott_bw_up": 147,
"tps_qty_index": 110,
"max_risk_long": 55
},
"0331291110130453": {
"ott_len": 33,
"ott_percent": 129,
"ott_bw_up": 111,
"tps_qty_index": 13,
"max_risk_long": 45
},
"0331291481100553": {
"ott_len": 33,
"ott_percent": 129,
"ott_bw_up": 148,
"tps_qty_index": 110,
"max_risk_long": 55
},
"0321291490930913": {
"ott_len": 32,
"ott_percent": 129,
"ott_bw_up": 149,
"tps_qty_index": 93,
"max_risk_long": 91
}
}
| 18.613921
| 23
| 0.616512
| 5,416
| 37,172
| 3.810377
| 0.079579
| 0.082861
| 0.096671
| 0.062364
| 0.741096
| 0.719388
| 0.708291
| 0.663275
| 0.605417
| 0.545622
| 0
| 0.259919
| 0.191757
| 37,172
| 1,997
| 24
| 18.613921
| 0.426974
| 0
| 0
| 0.661492
| 0
| 0
| 0.525919
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
968b7a148074e2185b54bca540a447d882f200c3
| 278
|
py
|
Python
|
tests/test_route_legs.py
|
jsbeckwith/unweaver
|
a4ba9e4e288c75e93bf7f9d67bc11680f09c3da0
|
[
"Apache-2.0"
] | 4
|
2019-04-24T16:38:57.000Z
|
2021-12-28T20:38:08.000Z
|
tests/test_route_legs.py
|
jsbeckwith/unweaver
|
a4ba9e4e288c75e93bf7f9d67bc11680f09c3da0
|
[
"Apache-2.0"
] | 3
|
2021-06-02T04:06:33.000Z
|
2021-11-02T01:47:20.000Z
|
tests/test_route_legs.py
|
jsbeckwith/unweaver
|
a4ba9e4e288c75e93bf7f9d67bc11680f09c3da0
|
[
"Apache-2.0"
] | 1
|
2020-08-13T04:42:05.000Z
|
2020-08-13T04:42:05.000Z
|
from unweaver.algorithms.shortest_path import route_legs
from .constants import cost_fun
def test_route_legs(built_G, test_waypoint_legs):
# This route takes 4 seconds or so. Why so slow? Profile.
cost, path, route = route_legs(built_G, test_waypoint_legs, cost_fun)
| 30.888889
| 73
| 0.78777
| 45
| 278
| 4.577778
| 0.555556
| 0.131068
| 0.135922
| 0.145631
| 0.300971
| 0.300971
| 0.300971
| 0
| 0
| 0
| 0
| 0.004219
| 0.147482
| 278
| 8
| 74
| 34.75
| 0.864979
| 0.197842
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0
| 0.75
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
969dc72c02e657a049debfb2bbbcb280601ecdaf
| 147
|
py
|
Python
|
pix/examples/__init__.py
|
sambvfx/pix
|
6fbdd78d8a02dd6a0d21fa741c739831f01def98
|
[
"MIT"
] | 13
|
2016-12-01T00:35:44.000Z
|
2022-03-07T04:02:42.000Z
|
pix/examples/__init__.py
|
ninoNinkovic/pix
|
6fbdd78d8a02dd6a0d21fa741c739831f01def98
|
[
"MIT"
] | 3
|
2017-04-27T21:24:48.000Z
|
2019-05-14T01:10:06.000Z
|
pix/examples/__init__.py
|
ninoNinkovic/pix
|
6fbdd78d8a02dd6a0d21fa741c739831f01def98
|
[
"MIT"
] | 3
|
2016-12-07T20:58:38.000Z
|
2018-09-05T18:37:21.000Z
|
"""
A variety of examples to showcase useage of pix.
Note that some libraries or other thirdparty resources may be required to run
an example.
"""
| 24.5
| 77
| 0.768707
| 24
| 147
| 4.708333
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.176871
| 147
| 6
| 78
| 24.5
| 0.933884
| 0.945578
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
96b2e6e7329fab3f3d551b7e818e17a2b211f8e3
| 185
|
py
|
Python
|
cyder/search/management/commands/compile_test.py
|
ngokevin/chili
|
36c354ac567471d5e36dccf9eea5096c6b02d4b9
|
[
"BSD-3-Clause"
] | 2
|
2019-03-16T00:47:09.000Z
|
2022-03-04T14:39:08.000Z
|
cyder/search/management/commands/compile_test.py
|
ngokevin/chili
|
36c354ac567471d5e36dccf9eea5096c6b02d4b9
|
[
"BSD-3-Clause"
] | 1
|
2020-04-24T08:24:55.000Z
|
2020-04-24T08:24:55.000Z
|
cyder/search/management/commands/compile_test.py
|
ngokevin/chili
|
36c354ac567471d5e36dccf9eea5096c6b02d4b9
|
[
"BSD-3-Clause"
] | null | null | null |
from django.core.management.base import BaseCommand, CommandError
from search.compiler import invparse
class Command(BaseCommand):
def handle(self, *args, **options):
pass
| 26.428571
| 65
| 0.756757
| 22
| 185
| 6.363636
| 0.863636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.156757
| 185
| 6
| 66
| 30.833333
| 0.897436
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0.2
| 0.4
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
|
0
| 4
|
7385665f28f4a284732d2e6e4c1a488033550b03
| 90
|
py
|
Python
|
learn-python/variable/boolean/in_intro.py
|
Moazzam125/learn-python
|
a0a92a5f4d1a031d0f66a7d10682c1844b1da80d
|
[
"MIT"
] | 2
|
2020-12-25T06:42:13.000Z
|
2020-12-25T10:25:55.000Z
|
learn-python/variable/boolean/in_intro.py
|
Moazzam125/learn-python
|
a0a92a5f4d1a031d0f66a7d10682c1844b1da80d
|
[
"MIT"
] | null | null | null |
learn-python/variable/boolean/in_intro.py
|
Moazzam125/learn-python
|
a0a92a5f4d1a031d0f66a7d10682c1844b1da80d
|
[
"MIT"
] | 2
|
2021-12-27T06:15:40.000Z
|
2022-01-05T15:08:29.000Z
|
''' check inside variable '''
sample_in = ['inside', 'list']
print('inside' in sample_in)
| 22.5
| 30
| 0.666667
| 12
| 90
| 4.833333
| 0.583333
| 0.275862
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 90
| 4
| 31
| 22.5
| 0.74359
| 0.233333
| 0
| 0
| 0
| 0
| 0.258065
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
73bceab804b48e99b767b6439e693e20a336ccec
| 194
|
py
|
Python
|
path4gmns/__init__.py
|
FangTang999/Path4GMNS
|
d319bb4b97a51055c1917820d1f5eaf7b8032a51
|
[
"Apache-2.0"
] | null | null | null |
path4gmns/__init__.py
|
FangTang999/Path4GMNS
|
d319bb4b97a51055c1917820d1f5eaf7b8032a51
|
[
"Apache-2.0"
] | null | null | null |
path4gmns/__init__.py
|
FangTang999/Path4GMNS
|
d319bb4b97a51055c1917820d1f5eaf7b8032a51
|
[
"Apache-2.0"
] | null | null | null |
from .accessibility import *
from .colgen import *
from .dtaapi import *
from .utils import *
__version__ = '0.7.2'
# print out the current version
print(f'path4gmns, version {__version__}')
| 17.636364
| 42
| 0.731959
| 26
| 194
| 5.153846
| 0.615385
| 0.223881
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02454
| 0.159794
| 194
| 11
| 42
| 17.636364
| 0.797546
| 0.149485
| 0
| 0
| 0
| 0
| 0.22561
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.166667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
73c63adaa92e5369bbe14b84cd94a4b99a9ce04b
| 775
|
py
|
Python
|
Queue_class.py
|
Chaytali/Python
|
a5dbb537078747283850e69637d2994b267f0a3c
|
[
"bzip2-1.0.6"
] | null | null | null |
Queue_class.py
|
Chaytali/Python
|
a5dbb537078747283850e69637d2994b267f0a3c
|
[
"bzip2-1.0.6"
] | null | null | null |
Queue_class.py
|
Chaytali/Python
|
a5dbb537078747283850e69637d2994b267f0a3c
|
[
"bzip2-1.0.6"
] | null | null | null |
class Queue:
def __init__(self):
self.queue = list()
def enqueue(self,data):
if data not in self.queue:
self.queue.insert(0,data)
return True
return False
def dequeue(self):
if len(self.queue)>0:
return self.queue.pop()
return ("Queue Empty!")
def size(self):
return len(self.queue)
def printQueue(self):
return self.queue
myQueue = Queue()
print(myQueue.enqueue(5))
print(myQueue.enqueue(6))
print(myQueue.enqueue(9))
print(myQueue.enqueue(5))
print(myQueue.enqueue(3))
print(myQueue.size())
print(myQueue.dequeue())
print(myQueue.dequeue())
print(myQueue.dequeue())
print(myQueue.dequeue())
print(myQueue.size())
print(myQueue.dequeue())
| 18.452381
| 35
| 0.619355
| 98
| 775
| 4.857143
| 0.285714
| 0.302521
| 0.19958
| 0.201681
| 0.430672
| 0.430672
| 0.34874
| 0.184874
| 0.184874
| 0.184874
| 0
| 0.011885
| 0.24
| 775
| 41
| 36
| 18.902439
| 0.796265
| 0
| 0
| 0.310345
| 0
| 0
| 0.015484
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.172414
| false
| 0
| 0
| 0.068966
| 0.413793
| 0.448276
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
73dc77f94297a4ff36e4db5e3e360f691189f6f8
| 110
|
py
|
Python
|
robosuite/models/__init__.py
|
kyungjaelee/robosuite
|
0d73fcca9ed8e638632f4bd7b0f1b8ebf4640fb1
|
[
"MIT"
] | 397
|
2020-09-28T02:49:58.000Z
|
2022-03-30T18:08:19.000Z
|
robosuite/models/__init__.py
|
kyungjaelee/robosuite
|
0d73fcca9ed8e638632f4bd7b0f1b8ebf4640fb1
|
[
"MIT"
] | 169
|
2020-09-28T02:17:59.000Z
|
2022-03-29T13:32:43.000Z
|
robosuite/models/__init__.py
|
kyungjaelee/robosuite
|
0d73fcca9ed8e638632f4bd7b0f1b8ebf4640fb1
|
[
"MIT"
] | 131
|
2020-09-28T14:50:35.000Z
|
2022-03-31T02:27:33.000Z
|
import os
from .world import MujocoWorldBase
assets_root = os.path.join(os.path.dirname(__file__), "assets")
| 22
| 63
| 0.781818
| 16
| 110
| 5.0625
| 0.6875
| 0.148148
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 110
| 4
| 64
| 27.5
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0.054545
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
73f3086bf3dbf6a78a55ee4582870e63f985104f
| 98
|
py
|
Python
|
maniacal-moths/newsly/news_wrapper/apps.py
|
Kushagra-0801/summer-code-jam-2020
|
aae9a678b0b30f20ab3cc6cf2b0606ee1f762ca0
|
[
"MIT"
] | null | null | null |
maniacal-moths/newsly/news_wrapper/apps.py
|
Kushagra-0801/summer-code-jam-2020
|
aae9a678b0b30f20ab3cc6cf2b0606ee1f762ca0
|
[
"MIT"
] | null | null | null |
maniacal-moths/newsly/news_wrapper/apps.py
|
Kushagra-0801/summer-code-jam-2020
|
aae9a678b0b30f20ab3cc6cf2b0606ee1f762ca0
|
[
"MIT"
] | 1
|
2020-08-04T05:44:34.000Z
|
2020-08-04T05:44:34.000Z
|
from django.apps import AppConfig
class NewsWrapperConfig(AppConfig):
name = 'news_wrapper'
| 16.333333
| 35
| 0.77551
| 11
| 98
| 6.818182
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153061
| 98
| 5
| 36
| 19.6
| 0.903614
| 0
| 0
| 0
| 0
| 0
| 0.122449
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
fb76668f59ddda3a2169b235f46f9de04dc0a0b9
| 179
|
py
|
Python
|
server.py
|
mohaijiang/hello-python
|
b1f52fded6d68a685049347f3d6ef95c3f891934
|
[
"Apache-2.0"
] | null | null | null |
server.py
|
mohaijiang/hello-python
|
b1f52fded6d68a685049347f3d6ef95c3f891934
|
[
"Apache-2.0"
] | null | null | null |
server.py
|
mohaijiang/hello-python
|
b1f52fded6d68a685049347f3d6ef95c3f891934
|
[
"Apache-2.0"
] | null | null | null |
from wsgiref.simple_server import make_server
from hello import application
httpd = make_server('', 8080, application)
print "Serving HTTP on port 8080..."
httpd.serve_forever()
| 25.571429
| 45
| 0.793296
| 25
| 179
| 5.52
| 0.68
| 0.144928
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.050633
| 0.117318
| 179
| 6
| 46
| 29.833333
| 0.822785
| 0
| 0
| 0
| 0
| 0
| 0.156425
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.4
| null | null | 0.2
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
fb8ba578e75d3557a7e5eb4b222067a684b35d3d
| 61
|
py
|
Python
|
package_eg_test.py
|
theintthandarnaing/python_exercises
|
86d4ca637e01a9819cfaeb55ff48c04d1cb074db
|
[
"MIT"
] | null | null | null |
package_eg_test.py
|
theintthandarnaing/python_exercises
|
86d4ca637e01a9819cfaeb55ff48c04d1cb074db
|
[
"MIT"
] | null | null | null |
package_eg_test.py
|
theintthandarnaing/python_exercises
|
86d4ca637e01a9819cfaeb55ff48c04d1cb074db
|
[
"MIT"
] | null | null | null |
import package_example.ex41
package_example2.ex41.convert()
| 15.25
| 31
| 0.852459
| 8
| 61
| 6.25
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.087719
| 0.065574
| 61
| 3
| 32
| 20.333333
| 0.789474
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
fb96e8c8a33784ea9235d2170a651a12cf9ab50f
| 1,035
|
py
|
Python
|
aiotdlib/api/functions/get_recovery_email_address.py
|
jraylan/aiotdlib
|
4528fcfca7c5c69b54a878ce6ce60e934a2dcc73
|
[
"MIT"
] | 37
|
2021-05-04T10:41:41.000Z
|
2022-03-30T13:48:05.000Z
|
aiotdlib/api/functions/get_recovery_email_address.py
|
jraylan/aiotdlib
|
4528fcfca7c5c69b54a878ce6ce60e934a2dcc73
|
[
"MIT"
] | 13
|
2021-07-17T19:54:51.000Z
|
2022-02-26T06:50:00.000Z
|
aiotdlib/api/functions/get_recovery_email_address.py
|
jraylan/aiotdlib
|
4528fcfca7c5c69b54a878ce6ce60e934a2dcc73
|
[
"MIT"
] | 7
|
2021-09-22T21:27:11.000Z
|
2022-02-20T02:33:19.000Z
|
# =============================================================================== #
# #
# This file has been generated automatically!! Do not change this manually! #
# #
# =============================================================================== #
from __future__ import annotations
from pydantic import Field
from ..base_object import BaseObject
class GetRecoveryEmailAddress(BaseObject):
"""
Returns a 2-step verification recovery email address that was previously set up. This method can be used to verify a password provided by the user
:param password: The password for the current user
:type password: :class:`str`
"""
ID: str = Field("getRecoveryEmailAddress", alias="@type")
password: str
@staticmethod
def read(q: dict) -> GetRecoveryEmailAddress:
return GetRecoveryEmailAddress.construct(**q)
| 36.964286
| 150
| 0.48599
| 83
| 1,035
| 6
| 0.710843
| 0.048193
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001391
| 0.305314
| 1,035
| 27
| 151
| 38.333333
| 0.691238
| 0.610628
| 0
| 0
| 1
| 0
| 0.077778
| 0.063889
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0.111111
| 0.333333
| 0.111111
| 0.888889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
|
0
| 4
|
836f5319bee48a7a8bd9c98c32d9d74c34076a5b
| 204
|
py
|
Python
|
PoseEstimation/Script/Main/Modules/setup.py
|
AtsushiHashimoto/KinectOnTheCeiling
|
116448e706da8b4e87e5402310747f46821beb4a
|
[
"MIT"
] | null | null | null |
PoseEstimation/Script/Main/Modules/setup.py
|
AtsushiHashimoto/KinectOnTheCeiling
|
116448e706da8b4e87e5402310747f46821beb4a
|
[
"MIT"
] | null | null | null |
PoseEstimation/Script/Main/Modules/setup.py
|
AtsushiHashimoto/KinectOnTheCeiling
|
116448e706da8b4e87e5402310747f46821beb4a
|
[
"MIT"
] | null | null | null |
import numpy
from distutils.core import setup
from Cython.Build import cythonize
setup(
name='features_labels',
ext_modules=cythonize('features_labels.pyx', include_dirs=[numpy.get_include()])
)
| 22.666667
| 84
| 0.779412
| 27
| 204
| 5.703704
| 0.666667
| 0.181818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 204
| 8
| 85
| 25.5
| 0.855556
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.428571
| 0
| 0.428571
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
837a770a55ce4b78d9ca5b50b38670c17b49502e
| 70
|
py
|
Python
|
salt/transport/table/handshake/__init__.py
|
pille/salt
|
47322575309faac8c4755287d930469caffc1c65
|
[
"Apache-2.0"
] | 1
|
2019-06-27T13:03:07.000Z
|
2019-06-27T13:03:07.000Z
|
salt/transport/table/handshake/__init__.py
|
pille/salt
|
47322575309faac8c4755287d930469caffc1c65
|
[
"Apache-2.0"
] | null | null | null |
salt/transport/table/handshake/__init__.py
|
pille/salt
|
47322575309faac8c4755287d930469caffc1c65
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
Package containing network handshakes
'''
| 14
| 37
| 0.628571
| 7
| 70
| 6.285714
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016949
| 0.157143
| 70
| 4
| 38
| 17.5
| 0.728814
| 0.857143
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
83985f718420ec71d0cafd6ad060360995ca22b2
| 152
|
py
|
Python
|
unstamp/mail_submission_server.py
|
fallingduck/unstamp
|
de2d1520ad7fea12a3dd3bfda4d5651a0ba27c59
|
[
"0BSD"
] | null | null | null |
unstamp/mail_submission_server.py
|
fallingduck/unstamp
|
de2d1520ad7fea12a3dd3bfda4d5651a0ba27c59
|
[
"0BSD"
] | null | null | null |
unstamp/mail_submission_server.py
|
fallingduck/unstamp
|
de2d1520ad7fea12a3dd3bfda4d5651a0ba27c59
|
[
"0BSD"
] | null | null | null |
'''Unstamp Mail Submission Agent Server
This server receives outgoing mail from the email client, and gives it to the
Mail Transfer Agent to send.
'''
| 25.333333
| 77
| 0.776316
| 24
| 152
| 4.916667
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.171053
| 152
| 5
| 78
| 30.4
| 0.936508
| 0.947368
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
83a5b5f4a929828c60799584df23cd33eb1386a6
| 991
|
py
|
Python
|
tests/test_replay_traffic.py
|
smaato/biggraphite
|
edf2c6e56505806c122196745de149cd6f53b453
|
[
"Apache-2.0"
] | null | null | null |
tests/test_replay_traffic.py
|
smaato/biggraphite
|
edf2c6e56505806c122196745de149cd6f53b453
|
[
"Apache-2.0"
] | null | null | null |
tests/test_replay_traffic.py
|
smaato/biggraphite
|
edf2c6e56505806c122196745de149cd6f53b453
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2016 Criteo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
# pylama:ignore=W0611
# No tests, at least check that the syntax is valid.
# TODO: bundle a small pcap file and test that we can parse
# it (also add a --dry_run).
from biggraphite.cli import replay_traffic
class TestReplayTraffic(unittest.TestCase):
def test_import(self):
pass
if __name__ == "__main__":
unittest.main()
| 29.147059
| 74
| 0.750757
| 151
| 991
| 4.821192
| 0.715232
| 0.082418
| 0.035714
| 0.043956
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014706
| 0.176589
| 991
| 33
| 75
| 30.030303
| 0.877451
| 0.726539
| 0
| 0
| 0
| 0
| 0.031621
| 0
| 0
| 0
| 0
| 0.030303
| 0
| 1
| 0.125
| false
| 0.125
| 0.5
| 0
| 0.75
| 0.125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 4
|
83c1d0384be106b6cc1117ef85026c26c19d3b01
| 190
|
py
|
Python
|
src/cmp/cool_lang/ast/string_node.py
|
codestrange/cool-compiler-2020
|
30508965d75a1a1d1362d0b51bef8da3978fd0c2
|
[
"MIT"
] | 3
|
2020-01-14T04:47:32.000Z
|
2020-09-10T17:57:20.000Z
|
src/cmp/cool_lang/ast/string_node.py
|
codestrange/cool-compiler-2020
|
30508965d75a1a1d1362d0b51bef8da3978fd0c2
|
[
"MIT"
] | 5
|
2020-01-14T06:06:35.000Z
|
2020-02-19T01:01:33.000Z
|
src/cmp/cool_lang/ast/string_node.py
|
codestrange/cool-compiler-2020
|
30508965d75a1a1d1362d0b51bef8da3978fd0c2
|
[
"MIT"
] | 3
|
2020-01-14T04:58:24.000Z
|
2020-01-14T16:23:41.000Z
|
from .atomic_node import AtomicNode
class StringNode(AtomicNode):
def __init__(self, token: str, line: int, column: int):
super(StringNode, self).__init__(token, line, column)
| 27.142857
| 61
| 0.721053
| 24
| 190
| 5.333333
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.168421
| 190
| 6
| 62
| 31.666667
| 0.810127
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
83c97e5a636c39a577f9249aa9985a8c35504f5f
| 22,185
|
py
|
Python
|
pacu/models/awsapi/cloudtrail.py
|
RyanJarv/Pacu2
|
27df4bcf296fc8f467d3dc671a47bf9519ce7a24
|
[
"MIT"
] | 1
|
2022-03-09T14:51:54.000Z
|
2022-03-09T14:51:54.000Z
|
pacu/models/awsapi/cloudtrail.py
|
RyanJarv/Pacu2
|
27df4bcf296fc8f467d3dc671a47bf9519ce7a24
|
[
"MIT"
] | null | null | null |
pacu/models/awsapi/cloudtrail.py
|
RyanJarv/Pacu2
|
27df4bcf296fc8f467d3dc671a47bf9519ce7a24
|
[
"MIT"
] | null | null | null |
# generated by datamodel-codegen:
# filename: openapi.yaml
# timestamp: 2021-12-31T02:46:16+00:00
from __future__ import annotations
from datetime import datetime
from enum import Enum
from typing import Annotated, Any, List, Optional
from pydantic import BaseModel, Field
class AddTagsResponse(BaseModel):
"""
Returns the objects or data if successful. Otherwise, returns an error.
"""
pass
class ResourceNotFoundException(BaseModel):
__root__: Any
class CloudTrailARNInvalidException(ResourceNotFoundException):
pass
class ResourceTypeNotSupportedException(ResourceNotFoundException):
pass
class TagsLimitExceededException(ResourceNotFoundException):
pass
class InvalidTrailNameException(ResourceNotFoundException):
pass
class InvalidTagParameterException(ResourceNotFoundException):
pass
class UnsupportedOperationException(ResourceNotFoundException):
pass
class OperationNotPermittedException(ResourceNotFoundException):
pass
class NotOrganizationMasterAccountException(ResourceNotFoundException):
pass
class MaximumNumberOfTrailsExceededException(ResourceNotFoundException):
pass
class TrailAlreadyExistsException(ResourceNotFoundException):
pass
class S3BucketDoesNotExistException(ResourceNotFoundException):
pass
class InsufficientS3BucketPolicyException(ResourceNotFoundException):
pass
class InsufficientSnsTopicPolicyException(ResourceNotFoundException):
pass
class InsufficientEncryptionPolicyException(ResourceNotFoundException):
pass
class InvalidS3BucketNameException(ResourceNotFoundException):
pass
class InvalidS3PrefixException(ResourceNotFoundException):
pass
class InvalidSnsTopicNameException(ResourceNotFoundException):
pass
class InvalidKmsKeyIdException(ResourceNotFoundException):
pass
class TrailNotProvidedException(ResourceNotFoundException):
pass
class InvalidParameterCombinationException(ResourceNotFoundException):
pass
class KmsKeyNotFoundException(ResourceNotFoundException):
pass
class KmsKeyDisabledException(ResourceNotFoundException):
pass
class KmsException(ResourceNotFoundException):
pass
class InvalidCloudWatchLogsLogGroupArnException(ResourceNotFoundException):
pass
class InvalidCloudWatchLogsRoleArnException(ResourceNotFoundException):
pass
class CloudWatchLogsDeliveryUnavailableException(ResourceNotFoundException):
pass
class CloudTrailAccessNotEnabledException(ResourceNotFoundException):
pass
class InsufficientDependencyServiceAccessPermissionException(ResourceNotFoundException):
pass
class OrganizationsNotInUseException(ResourceNotFoundException):
pass
class OrganizationNotInAllFeaturesModeException(ResourceNotFoundException):
pass
class CloudTrailInvalidClientTokenIdException(ResourceNotFoundException):
pass
class DeleteTrailResponse(AddTagsResponse):
"""
Returns the objects or data listed below if successful. Otherwise, returns an error.
"""
pass
class TrailNotFoundException(ResourceNotFoundException):
pass
class InvalidHomeRegionException(ResourceNotFoundException):
pass
class ConflictException(ResourceNotFoundException):
pass
class InsightNotEnabledException(ResourceNotFoundException):
pass
class InvalidTimeRangeException(ResourceNotFoundException):
pass
class InvalidTokenException(ResourceNotFoundException):
pass
class InvalidLookupAttributesException(ResourceNotFoundException):
pass
class InvalidMaxResultsException(ResourceNotFoundException):
pass
class InvalidNextTokenException(ResourceNotFoundException):
pass
class InvalidEventCategoryException(ResourceNotFoundException):
pass
class InvalidEventSelectorsException(ResourceNotFoundException):
pass
class InvalidInsightSelectorsException(ResourceNotFoundException):
pass
class RemoveTagsResponse(AddTagsResponse):
"""
Returns the objects or data listed below if successful. Otherwise, returns an error.
"""
pass
class StartLoggingResponse(AddTagsResponse):
"""
Returns the objects or data listed below if successful. Otherwise, returns an error.
"""
pass
class StopLoggingResponse(AddTagsResponse):
"""
Returns the objects or data listed below if successful. Otherwise, returns an error.
"""
pass
class String(BaseModel):
__root__: str
class SelectorName(BaseModel):
__root__: Annotated[str, Field(max_length=1000, min_length=0, regex='.*')]
class SelectorField(BaseModel):
__root__: Annotated[
str, Field(max_length=1000, min_length=1, regex='[\\w|\\d|\\.|_]+')
]
class Boolean(BaseModel):
__root__: bool
class ByteBuffer(String):
pass
class DataResourceValues(BaseModel):
__root__: List[String]
class DataResource(BaseModel):
"""
<p>The Amazon S3 buckets, Lambda functions, or Amazon DynamoDB tables that you specify in your event selectors for your trail to log data events. Data events provide information about the resource operations performed on or within a resource itself. These are also known as data plane operations. You can specify up to 250 data resources for a trail.</p> <note> <p>The total number of allowed data resources is 250. This number can be distributed between 1 and 5 event selectors, but the total cannot exceed 250 across all selectors.</p> <p>If you are using advanced event selectors, the maximum total number of values for all conditions, across all advanced event selectors for the trail, is 500.</p> </note> <p>The following example demonstrates how logging works when you configure logging of all data events for an S3 bucket named <code>bucket-1</code>. In this example, the CloudTrail user specified an empty prefix, and the option to log both <code>Read</code> and <code>Write</code> data events.</p> <ol> <li> <p>A user uploads an image file to <code>bucket-1</code>.</p> </li> <li> <p>The <code>PutObject</code> API operation is an Amazon S3 object-level API. It is recorded as a data event in CloudTrail. Because the CloudTrail user specified an S3 bucket with an empty prefix, events that occur on any object in that bucket are logged. The trail processes and logs the event.</p> </li> <li> <p>A user uploads an object to an Amazon S3 bucket named <code>arn:aws:s3:::bucket-2</code>.</p> </li> <li> <p>The <code>PutObject</code> API operation occurred for an object in an S3 bucket that the CloudTrail user didn't specify for the trail. The trail doesn’t log the event.</p> </li> </ol> <p>The following example demonstrates how logging works when you configure logging of Lambda data events for a Lambda function named <i>MyLambdaFunction</i>, but not for all Lambda functions.</p> <ol> <li> <p>A user runs a script that includes a call to the <i>MyLambdaFunction</i> function and the <i>MyOtherLambdaFunction</i> function.</p> </li> <li> <p>The <code>Invoke</code> API operation on <i>MyLambdaFunction</i> is an Lambda API. It is recorded as a data event in CloudTrail. Because the CloudTrail user specified logging data events for <i>MyLambdaFunction</i>, any invocations of that function are logged. The trail processes and logs the event.</p> </li> <li> <p>The <code>Invoke</code> API operation on <i>MyOtherLambdaFunction</i> is an Lambda API. Because the CloudTrail user did not specify logging data events for all Lambda functions, the <code>Invoke</code> operation for <i>MyOtherLambdaFunction</i> does not match the function specified for the trail. The trail doesn’t log the event. </p> </li> </ol>
"""
Type: Optional[String] = None
Values: Optional[DataResourceValues] = None
class DataResources(BaseModel):
__root__: List[DataResource]
class Date(BaseModel):
__root__: datetime
class TrailNameList(DataResourceValues):
pass
class EventCategory(Enum):
insight = 'insight'
class ReadWriteType(Enum):
ReadOnly = 'ReadOnly'
WriteOnly = 'WriteOnly'
All = 'All'
class ExcludeManagementEventSources(DataResourceValues):
pass
class EventSelector(BaseModel):
"""
<p>Use event selectors to further specify the management and data event settings for your trail. By default, trails created without specific event selectors will be configured to log all read and write management events, and no data events. When an event occurs in your account, CloudTrail evaluates the event selector for all trails. For each trail, if the event matches any event selector, the trail processes and logs the event. If the event doesn't match any event selector, the trail doesn't log the event.</p> <p>You can configure up to five event selectors for a trail.</p> <p>You cannot apply both event selectors and advanced event selectors to a trail.</p>
"""
ReadWriteType: Optional[ReadWriteType] = None
IncludeManagementEvents: Optional[Boolean] = None
DataResources: Optional[DataResources] = None
ExcludeManagementEventSources: Optional[ExcludeManagementEventSources] = None
class EventSelectors(BaseModel):
__root__: List[EventSelector]
class Trail(BaseModel):
"""
The settings for a trail.
"""
Name: Optional[String] = None
S3BucketName: Optional[String] = None
S3KeyPrefix: Optional[String] = None
SnsTopicName: Optional[String] = None
SnsTopicARN: Optional[String] = None
IncludeGlobalServiceEvents: Optional[Boolean] = None
IsMultiRegionTrail: Optional[Boolean] = None
HomeRegion: Optional[String] = None
TrailARN: Optional[String] = None
LogFileValidationEnabled: Optional[Boolean] = None
CloudWatchLogsLogGroupArn: Optional[String] = None
CloudWatchLogsRoleArn: Optional[String] = None
KmsKeyId: Optional[String] = None
HasCustomEventSelectors: Optional[Boolean] = None
HasInsightSelectors: Optional[Boolean] = None
IsOrganizationTrail: Optional[Boolean] = None
class InsightType(Enum):
ApiCallRateInsight = 'ApiCallRateInsight'
class InsightSelector(BaseModel):
"""
A JSON string that contains a list of insight types that are logged on a trail.
"""
InsightType: Optional[InsightType] = None
class ResourceIdList(DataResourceValues):
pass
class LookupAttributeKey(Enum):
EventId = 'EventId'
EventName = 'EventName'
ReadOnly = 'ReadOnly'
Username = 'Username'
ResourceType = 'ResourceType'
ResourceName = 'ResourceName'
EventSource = 'EventSource'
AccessKeyId = 'AccessKeyId'
class LookupAttribute(BaseModel):
"""
Specifies an attribute and value that filter the events returned.
"""
AttributeKey: LookupAttributeKey
AttributeValue: String
class LookupAttributesList(BaseModel):
__root__: List[LookupAttribute]
class MaxResults(BaseModel):
__root__: Annotated[int, Field(ge=1.0, le=50.0)]
class NextToken(String):
pass
class OperatorValue(BaseModel):
__root__: Annotated[str, Field(max_length=2048, min_length=1, regex='.+')]
class PublicKey(BaseModel):
"""
Contains information about a returned public key.
"""
Value: Optional[ByteBuffer] = None
ValidityStartTime: Optional[Date] = None
ValidityEndTime: Optional[Date] = None
Fingerprint: Optional[String] = None
class Resource(BaseModel):
"""
Specifies the type and name of a resource referenced by an event.
"""
ResourceType: Optional[String] = None
ResourceName: Optional[String] = None
class Tag(BaseModel):
"""
A custom key-value pair associated with a resource such as a CloudTrail trail.
"""
Key: String
Value: Optional[String] = None
class TrailInfo(BaseModel):
"""
Information about a CloudTrail trail, including the trail's name, home region, and Amazon Resource Name (ARN).
"""
TrailARN: Optional[String] = None
Name: Optional[String] = None
HomeRegion: Optional[String] = None
class CreateTrailResponse(BaseModel):
"""
Returns the objects or data listed below if successful. Otherwise, returns an error.
"""
Name: Optional[String] = None
S3BucketName: Optional[String] = None
S3KeyPrefix: Optional[String] = None
SnsTopicName: Optional[String] = None
SnsTopicARN: Optional[String] = None
IncludeGlobalServiceEvents: Optional[Boolean] = None
IsMultiRegionTrail: Optional[Boolean] = None
TrailARN: Optional[String] = None
LogFileValidationEnabled: Optional[Boolean] = None
CloudWatchLogsLogGroupArn: Optional[String] = None
CloudWatchLogsRoleArn: Optional[String] = None
KmsKeyId: Optional[String] = None
IsOrganizationTrail: Optional[Boolean] = None
class DeleteTrailRequest(BaseModel):
"""
The request that specifies the name of a trail to delete.
"""
Name: String
class DescribeTrailsRequest(BaseModel):
"""
Returns information about the trail.
"""
trailNameList: Optional[TrailNameList] = None
includeShadowTrails: Optional[Boolean] = None
class GetEventSelectorsRequest(BaseModel):
TrailName: String
class GetInsightSelectorsRequest(BaseModel):
TrailName: String
class GetTrailResponse(BaseModel):
Trail: Optional[Trail] = None
class GetTrailRequest(BaseModel):
Name: String
class GetTrailStatusResponse(BaseModel):
"""
Returns the objects or data listed below if successful. Otherwise, returns an error.
"""
IsLogging: Optional[Boolean] = None
LatestDeliveryError: Optional[String] = None
LatestNotificationError: Optional[String] = None
LatestDeliveryTime: Optional[Date] = None
LatestNotificationTime: Optional[Date] = None
StartLoggingTime: Optional[Date] = None
StopLoggingTime: Optional[Date] = None
LatestCloudWatchLogsDeliveryError: Optional[String] = None
LatestCloudWatchLogsDeliveryTime: Optional[Date] = None
LatestDigestDeliveryTime: Optional[Date] = None
LatestDigestDeliveryError: Optional[String] = None
LatestDeliveryAttemptTime: Optional[String] = None
LatestNotificationAttemptTime: Optional[String] = None
LatestNotificationAttemptSucceeded: Optional[String] = None
LatestDeliveryAttemptSucceeded: Optional[String] = None
TimeLoggingStarted: Optional[String] = None
TimeLoggingStopped: Optional[String] = None
class GetTrailStatusRequest(BaseModel):
"""
The name of a trail about which you want the current status.
"""
Name: String
class ListPublicKeysRequest(BaseModel):
"""
Requests the public keys for a specified time range.
"""
StartTime: Optional[Date] = None
EndTime: Optional[Date] = None
NextToken: Optional[String] = None
class ListTagsRequest(BaseModel):
"""
Specifies a list of trail tags to return.
"""
ResourceIdList: ResourceIdList
NextToken: Optional[String] = None
class ListTrailsRequest(BaseModel):
NextToken: Optional[String] = None
class LookupEventsRequest(BaseModel):
"""
Contains a request for LookupEvents.
"""
LookupAttributes: Optional[LookupAttributesList] = None
StartTime: Optional[Date] = None
EndTime: Optional[Date] = None
EventCategory: Optional[EventCategory] = None
MaxResults: Optional[MaxResults] = None
NextToken: Optional[NextToken] = None
class StartLoggingRequest(BaseModel):
"""
The request to CloudTrail to start logging Amazon Web Services API calls for an account.
"""
Name: String
class StopLoggingRequest(BaseModel):
"""
Passes the request to CloudTrail to stop logging Amazon Web Services API calls for the specified account.
"""
Name: String
class UpdateTrailResponse(CreateTrailResponse):
"""
Returns the objects or data listed below if successful. Otherwise, returns an error.
"""
pass
class UpdateTrailRequest(BaseModel):
"""
Specifies settings to update for the trail.
"""
Name: String
S3BucketName: Optional[String] = None
S3KeyPrefix: Optional[String] = None
SnsTopicName: Optional[String] = None
IncludeGlobalServiceEvents: Optional[Boolean] = None
IsMultiRegionTrail: Optional[Boolean] = None
EnableLogFileValidation: Optional[Boolean] = None
CloudWatchLogsLogGroupArn: Optional[String] = None
CloudWatchLogsRoleArn: Optional[String] = None
KmsKeyId: Optional[String] = None
IsOrganizationTrail: Optional[Boolean] = None
class TagsList(BaseModel):
"""
A list of tags.
"""
__root__: Annotated[List[Tag], Field(description='A list of tags.')]
class Operator(BaseModel):
__root__: Annotated[List[OperatorValue], Field(min_items=1)]
class AdvancedFieldSelector(BaseModel):
"""
A single selector statement in an advanced event selector.
"""
Field: SelectorField
Equals: Optional[Operator] = None
StartsWith: Optional[Operator] = None
EndsWith: Optional[Operator] = None
NotEquals: Optional[Operator] = None
NotStartsWith: Optional[Operator] = None
NotEndsWith: Optional[Operator] = None
class TrailList(BaseModel):
__root__: List[Trail]
class ResourceList(BaseModel):
"""
A list of resources referenced by the event returned.
"""
__root__: Annotated[
List[Resource],
Field(description='A list of resources referenced by the event returned.'),
]
class Event(BaseModel):
"""
Contains information about an event that was returned by a lookup request. The result includes a representation of a CloudTrail event.
"""
EventId: Optional[String] = None
EventName: Optional[String] = None
ReadOnly: Optional[String] = None
AccessKeyId: Optional[String] = None
EventTime: Optional[Date] = None
EventSource: Optional[String] = None
Username: Optional[String] = None
Resources: Optional[ResourceList] = None
CloudTrailEvent: Optional[String] = None
class EventsList(BaseModel):
__root__: List[Event]
class InsightSelectors(BaseModel):
__root__: List[InsightSelector]
class PublicKeyList(BaseModel):
__root__: List[PublicKey]
class Trails(BaseModel):
__root__: List[TrailInfo]
class ResourceTag(BaseModel):
"""
A resource tag.
"""
ResourceId: Optional[String] = None
TagsList: Optional[TagsList] = None
class AddTagsRequest(BaseModel):
"""
Specifies the tags to add to a trail.
"""
ResourceId: String
TagsList: Optional[TagsList] = None
class CreateTrailRequest(BaseModel):
"""
Specifies the settings for each trail.
"""
Name: String
S3BucketName: String
S3KeyPrefix: Optional[String] = None
SnsTopicName: Optional[String] = None
IncludeGlobalServiceEvents: Optional[Boolean] = None
IsMultiRegionTrail: Optional[Boolean] = None
EnableLogFileValidation: Optional[Boolean] = None
CloudWatchLogsLogGroupArn: Optional[String] = None
CloudWatchLogsRoleArn: Optional[String] = None
KmsKeyId: Optional[String] = None
IsOrganizationTrail: Optional[Boolean] = None
TagsList: Optional[TagsList] = None
class DescribeTrailsResponse(BaseModel):
"""
Returns the objects or data listed below if successful. Otherwise, returns an error.
"""
trailList: Optional[TrailList] = None
class GetInsightSelectorsResponse(BaseModel):
TrailARN: Optional[String] = None
InsightSelectors: Optional[InsightSelectors] = None
class ListPublicKeysResponse(BaseModel):
"""
Returns the objects or data listed below if successful. Otherwise, returns an error.
"""
PublicKeyList: Optional[PublicKeyList] = None
NextToken: Optional[String] = None
class ListTrailsResponse(BaseModel):
Trails: Optional[Trails] = None
NextToken: Optional[String] = None
class LookupEventsResponse(BaseModel):
"""
Contains a response to a LookupEvents action.
"""
Events: Optional[EventsList] = None
NextToken: Optional[NextToken] = None
class PutInsightSelectorsResponse(GetInsightSelectorsResponse):
pass
class PutInsightSelectorsRequest(BaseModel):
TrailName: String
InsightSelectors: InsightSelectors
class RemoveTagsRequest(BaseModel):
"""
Specifies the tags to remove from a trail.
"""
ResourceId: String
TagsList: Optional[TagsList] = None
class AdvancedFieldSelectors(BaseModel):
__root__: Annotated[List[AdvancedFieldSelector], Field(min_items=1)]
class AdvancedEventSelector(BaseModel):
"""
<p>Advanced event selectors let you create fine-grained selectors for the following CloudTrail event record fields. They help you control costs by logging only those events that are important to you. For more information about advanced event selectors, see <a href="https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-data-events-with-cloudtrail.html">Logging data events for trails</a> in the <i>CloudTrail User Guide</i>.</p> <ul> <li> <p> <code>readOnly</code> </p> </li> <li> <p> <code>eventSource</code> </p> </li> <li> <p> <code>eventName</code> </p> </li> <li> <p> <code>eventCategory</code> </p> </li> <li> <p> <code>resources.type</code> </p> </li> <li> <p> <code>resources.ARN</code> </p> </li> </ul> <p>You cannot apply both event selectors and advanced event selectors to a trail.</p>
"""
Name: Optional[SelectorName] = None
FieldSelectors: AdvancedFieldSelectors
class AdvancedEventSelectors(BaseModel):
__root__: List[AdvancedEventSelector]
class ResourceTagList(BaseModel):
__root__: List[ResourceTag]
class GetEventSelectorsResponse(BaseModel):
TrailARN: Optional[String] = None
EventSelectors: Optional[EventSelectors] = None
AdvancedEventSelectors: Optional[AdvancedEventSelectors] = None
class ListTagsResponse(BaseModel):
"""
Returns the objects or data listed below if successful. Otherwise, returns an error.
"""
ResourceTagList: Optional[ResourceTagList] = None
NextToken: Optional[String] = None
class PutEventSelectorsResponse(GetEventSelectorsResponse):
pass
class PutEventSelectorsRequest(BaseModel):
TrailName: String
EventSelectors: Optional[EventSelectors] = None
AdvancedEventSelectors: Optional[AdvancedEventSelectors] = None
| 28.046776
| 2,722
| 0.742258
| 2,297
| 22,185
| 7.12364
| 0.202438
| 0.054758
| 0.070403
| 0.016867
| 0.300495
| 0.270794
| 0.243048
| 0.23162
| 0.209741
| 0.197641
| 0
| 0.00415
| 0.174577
| 22,185
| 790
| 2,723
| 28.082278
| 0.889362
| 0.300518
| 0
| 0.378947
| 1
| 0
| 0.014133
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.147368
| 0.013158
| 0
| 0.839474
| 0.002632
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 4
|
83cee8ff993ceac9f72a0e51f92b3bd3dd100a33
| 282
|
py
|
Python
|
boto3_exceptions/apigatewayv2.py
|
siteshen/boto3_exceptions
|
d6174c2577c9d4b17a09a89cd0e4bd1fe555b26b
|
[
"MIT"
] | 2
|
2021-06-22T00:00:35.000Z
|
2021-07-15T03:25:52.000Z
|
boto3_exceptions/apigatewayv2.py
|
siteshen/boto3_exceptions
|
d6174c2577c9d4b17a09a89cd0e4bd1fe555b26b
|
[
"MIT"
] | null | null | null |
boto3_exceptions/apigatewayv2.py
|
siteshen/boto3_exceptions
|
d6174c2577c9d4b17a09a89cd0e4bd1fe555b26b
|
[
"MIT"
] | null | null | null |
import boto3
exceptions = boto3.client('apigatewayv2').exceptions
BadRequestException = exceptions.BadRequestException
ConflictException = exceptions.ConflictException
NotFoundException = exceptions.NotFoundException
TooManyRequestsException = exceptions.TooManyRequestsException
| 31.333333
| 62
| 0.879433
| 19
| 282
| 13.052632
| 0.473684
| 0.233871
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011407
| 0.067376
| 282
| 8
| 63
| 35.25
| 0.931559
| 0
| 0
| 0
| 0
| 0
| 0.042553
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0
| 1
| 0
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
83dde1b2ac414779e93b696131ef6bdfbd262264
| 83
|
py
|
Python
|
skip/apps.py
|
LCOGT/skip
|
2524ba71c39876aae8a31fff3de55e6cb7aa1f83
|
[
"BSD-3-Clause"
] | null | null | null |
skip/apps.py
|
LCOGT/skip
|
2524ba71c39876aae8a31fff3de55e6cb7aa1f83
|
[
"BSD-3-Clause"
] | 4
|
2020-09-10T20:31:54.000Z
|
2022-02-27T18:40:23.000Z
|
skip/apps.py
|
scimma/skip
|
aa9437d8c4f7d5edbffaec20e6651339241bbb95
|
[
"BSD-3-Clause"
] | null | null | null |
from django.apps import AppConfig
class SkipConfig(AppConfig):
name = 'skip'
| 13.833333
| 33
| 0.73494
| 10
| 83
| 6.1
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.180723
| 83
| 5
| 34
| 16.6
| 0.897059
| 0
| 0
| 0
| 0
| 0
| 0.048193
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
83e36583921a70281f2d01b179a5d51d91b1ff22
| 222
|
py
|
Python
|
Seeder/source/translation.py
|
WebarchivCZ/Seeder
|
1958c5d3f6bdcbbdb2c81dcb6abc7f689125b6a8
|
[
"MIT"
] | 8
|
2017-08-16T19:18:57.000Z
|
2022-01-24T10:08:19.000Z
|
Seeder/source/translation.py
|
WebarchivCZ/Seeder
|
1958c5d3f6bdcbbdb2c81dcb6abc7f689125b6a8
|
[
"MIT"
] | 242
|
2017-02-03T19:15:52.000Z
|
2022-03-25T08:02:52.000Z
|
Seeder/source/translation.py
|
WebarchivCZ/Seeder
|
1958c5d3f6bdcbbdb2c81dcb6abc7f689125b6a8
|
[
"MIT"
] | 2
|
2019-03-06T12:36:29.000Z
|
2019-07-08T12:52:20.000Z
|
from modeltranslation.translator import TranslationOptions, register
from . import models
@register(models.Category)
@register(models.SubCategory)
class NewsTranslationOptions(TranslationOptions):
fields = ('name',)
| 24.666667
| 68
| 0.810811
| 20
| 222
| 9
| 0.65
| 0.155556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.099099
| 222
| 8
| 69
| 27.75
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0.018018
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
f7a450127ccc01eaa6cd2da1580d42b0c9a0f9ec
| 13,079
|
py
|
Python
|
sdk/python/pulumi_kong/target.py
|
pulumi/pulumi-kong
|
775c17e4eac38934252410ed3dcdc6fc3bd40c5c
|
[
"ECL-2.0",
"Apache-2.0"
] | 4
|
2020-02-23T10:05:20.000Z
|
2020-05-15T14:22:10.000Z
|
sdk/python/pulumi_kong/target.py
|
pulumi/pulumi-kong
|
775c17e4eac38934252410ed3dcdc6fc3bd40c5c
|
[
"ECL-2.0",
"Apache-2.0"
] | 41
|
2020-04-21T22:04:23.000Z
|
2022-03-31T15:29:53.000Z
|
sdk/python/pulumi_kong/target.py
|
pulumi/pulumi-kong
|
775c17e4eac38934252410ed3dcdc6fc3bd40c5c
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['TargetArgs', 'Target']
@pulumi.input_type
class TargetArgs:
def __init__(__self__, *,
target: pulumi.Input[str],
upstream_id: pulumi.Input[str],
weight: pulumi.Input[int],
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a Target resource.
:param pulumi.Input[str] target: is the target address (IP or hostname) and port. If omitted the port defaults to 8000.
:param pulumi.Input[str] upstream_id: is the id of the upstream to apply this target to.
:param pulumi.Input[int] weight: is the weight this target gets within the upstream load balancer (0-1000, defaults to 100).
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: A list set of strings associated with the Plugin for grouping and filtering
"""
pulumi.set(__self__, "target", target)
pulumi.set(__self__, "upstream_id", upstream_id)
pulumi.set(__self__, "weight", weight)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def target(self) -> pulumi.Input[str]:
"""
is the target address (IP or hostname) and port. If omitted the port defaults to 8000.
"""
return pulumi.get(self, "target")
@target.setter
def target(self, value: pulumi.Input[str]):
pulumi.set(self, "target", value)
@property
@pulumi.getter(name="upstreamId")
def upstream_id(self) -> pulumi.Input[str]:
"""
is the id of the upstream to apply this target to.
"""
return pulumi.get(self, "upstream_id")
@upstream_id.setter
def upstream_id(self, value: pulumi.Input[str]):
pulumi.set(self, "upstream_id", value)
@property
@pulumi.getter
def weight(self) -> pulumi.Input[int]:
"""
is the weight this target gets within the upstream load balancer (0-1000, defaults to 100).
"""
return pulumi.get(self, "weight")
@weight.setter
def weight(self, value: pulumi.Input[int]):
pulumi.set(self, "weight", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list set of strings associated with the Plugin for grouping and filtering
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class _TargetState:
def __init__(__self__, *,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
target: Optional[pulumi.Input[str]] = None,
upstream_id: Optional[pulumi.Input[str]] = None,
weight: Optional[pulumi.Input[int]] = None):
"""
Input properties used for looking up and filtering Target resources.
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: A list set of strings associated with the Plugin for grouping and filtering
:param pulumi.Input[str] target: is the target address (IP or hostname) and port. If omitted the port defaults to 8000.
:param pulumi.Input[str] upstream_id: is the id of the upstream to apply this target to.
:param pulumi.Input[int] weight: is the weight this target gets within the upstream load balancer (0-1000, defaults to 100).
"""
if tags is not None:
pulumi.set(__self__, "tags", tags)
if target is not None:
pulumi.set(__self__, "target", target)
if upstream_id is not None:
pulumi.set(__self__, "upstream_id", upstream_id)
if weight is not None:
pulumi.set(__self__, "weight", weight)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list set of strings associated with the Plugin for grouping and filtering
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter
def target(self) -> Optional[pulumi.Input[str]]:
"""
is the target address (IP or hostname) and port. If omitted the port defaults to 8000.
"""
return pulumi.get(self, "target")
@target.setter
def target(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "target", value)
@property
@pulumi.getter(name="upstreamId")
def upstream_id(self) -> Optional[pulumi.Input[str]]:
"""
is the id of the upstream to apply this target to.
"""
return pulumi.get(self, "upstream_id")
@upstream_id.setter
def upstream_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "upstream_id", value)
@property
@pulumi.getter
def weight(self) -> Optional[pulumi.Input[int]]:
"""
is the weight this target gets within the upstream load balancer (0-1000, defaults to 100).
"""
return pulumi.get(self, "weight")
@weight.setter
def weight(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "weight", value)
class Target(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
target: Optional[pulumi.Input[str]] = None,
upstream_id: Optional[pulumi.Input[str]] = None,
weight: Optional[pulumi.Input[int]] = None,
__props__=None):
"""
## Example Usage
```python
import pulumi
import pulumi_kong as kong
target = kong.Target("target",
target="sample_target:80",
upstream_id=kong_upstream["upstream"]["id"],
weight=10)
```
## Import
To import a target use a combination of the upstream id and the target id as follows
```sh
$ pulumi import kong:index/target:Target <target_identifier> <upstream_id>/<target_id>
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: A list set of strings associated with the Plugin for grouping and filtering
:param pulumi.Input[str] target: is the target address (IP or hostname) and port. If omitted the port defaults to 8000.
:param pulumi.Input[str] upstream_id: is the id of the upstream to apply this target to.
:param pulumi.Input[int] weight: is the weight this target gets within the upstream load balancer (0-1000, defaults to 100).
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: TargetArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
## Example Usage
```python
import pulumi
import pulumi_kong as kong
target = kong.Target("target",
target="sample_target:80",
upstream_id=kong_upstream["upstream"]["id"],
weight=10)
```
## Import
To import a target use a combination of the upstream id and the target id as follows
```sh
$ pulumi import kong:index/target:Target <target_identifier> <upstream_id>/<target_id>
```
:param str resource_name: The name of the resource.
:param TargetArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(TargetArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
target: Optional[pulumi.Input[str]] = None,
upstream_id: Optional[pulumi.Input[str]] = None,
weight: Optional[pulumi.Input[int]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = TargetArgs.__new__(TargetArgs)
__props__.__dict__["tags"] = tags
if target is None and not opts.urn:
raise TypeError("Missing required property 'target'")
__props__.__dict__["target"] = target
if upstream_id is None and not opts.urn:
raise TypeError("Missing required property 'upstream_id'")
__props__.__dict__["upstream_id"] = upstream_id
if weight is None and not opts.urn:
raise TypeError("Missing required property 'weight'")
__props__.__dict__["weight"] = weight
super(Target, __self__).__init__(
'kong:index/target:Target',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
target: Optional[pulumi.Input[str]] = None,
upstream_id: Optional[pulumi.Input[str]] = None,
weight: Optional[pulumi.Input[int]] = None) -> 'Target':
"""
Get an existing Target resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: A list set of strings associated with the Plugin for grouping and filtering
:param pulumi.Input[str] target: is the target address (IP or hostname) and port. If omitted the port defaults to 8000.
:param pulumi.Input[str] upstream_id: is the id of the upstream to apply this target to.
:param pulumi.Input[int] weight: is the weight this target gets within the upstream load balancer (0-1000, defaults to 100).
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _TargetState.__new__(_TargetState)
__props__.__dict__["tags"] = tags
__props__.__dict__["target"] = target
__props__.__dict__["upstream_id"] = upstream_id
__props__.__dict__["weight"] = weight
return Target(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
A list set of strings associated with the Plugin for grouping and filtering
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def target(self) -> pulumi.Output[str]:
"""
is the target address (IP or hostname) and port. If omitted the port defaults to 8000.
"""
return pulumi.get(self, "target")
@property
@pulumi.getter(name="upstreamId")
def upstream_id(self) -> pulumi.Output[str]:
"""
is the id of the upstream to apply this target to.
"""
return pulumi.get(self, "upstream_id")
@property
@pulumi.getter
def weight(self) -> pulumi.Output[int]:
"""
is the weight this target gets within the upstream load balancer (0-1000, defaults to 100).
"""
return pulumi.get(self, "weight")
| 39.875
| 138
| 0.62413
| 1,602
| 13,079
| 4.916355
| 0.099875
| 0.096369
| 0.07288
| 0.041265
| 0.789106
| 0.75546
| 0.728923
| 0.698197
| 0.67293
| 0.664678
| 0
| 0.009777
| 0.272727
| 13,079
| 327
| 139
| 39.996942
| 0.81823
| 0.345822
| 0
| 0.602273
| 1
| 0
| 0.075197
| 0.003096
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153409
| false
| 0.005682
| 0.028409
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
f7a6d9972299b6cbf0fca18d10b5b792a996bf98
| 628
|
py
|
Python
|
sumolib/files/__init__.py
|
team-know-name/Traffic-Light
|
221cb4f5e475bddbbddef859f87f1b01467fa182
|
[
"MIT"
] | 4
|
2019-10-09T15:04:25.000Z
|
2021-05-19T05:01:22.000Z
|
sumolib/files/__init__.py
|
Sulekhiya/Traffic-Light
|
500b0c3f4a5f50b8e4476e8aa055fa9d258e8a45
|
[
"MIT"
] | null | null | null |
sumolib/files/__init__.py
|
Sulekhiya/Traffic-Light
|
500b0c3f4a5f50b8e4476e8aa055fa9d258e8a45
|
[
"MIT"
] | 4
|
2019-10-12T09:55:12.000Z
|
2021-08-21T03:17:07.000Z
|
# Eclipse SUMO, Simulation of Urban MObility; see https://eclipse.org/sumo
# Copyright (C) 2012-2019 German Aerospace Center (DLR) and others.
# This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v2.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v20.html
# SPDX-License-Identifier: EPL-2.0
# @file __init__.py
# @author Daniel Krajzewicz
# @author Jakob Erdmann
# @author Michael Behrisch
# @date 2012-12-04
# @version $Id$
from __future__ import absolute_import
from . import additional, selection # noqa
| 33.052632
| 74
| 0.754777
| 90
| 628
| 5.166667
| 0.766667
| 0.043011
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.041509
| 0.156051
| 628
| 18
| 75
| 34.888889
| 0.835849
| 0.828025
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
f7be31bfcda052ff9f496a287a7daa53197b723d
| 1,078
|
py
|
Python
|
src/model/Student.py
|
bazilinskyy/agent-based-uni
|
a8a5086a9d012e6cd972cf58c7865463b5e6f9b3
|
[
"MIT"
] | null | null | null |
src/model/Student.py
|
bazilinskyy/agent-based-uni
|
a8a5086a9d012e6cd972cf58c7865463b5e6f9b3
|
[
"MIT"
] | null | null | null |
src/model/Student.py
|
bazilinskyy/agent-based-uni
|
a8a5086a9d012e6cd972cf58c7865463b5e6f9b3
|
[
"MIT"
] | null | null | null |
from Person import Person
#TODO make name private
class Student(Person):
__doc__ = "Student"
points = 0
marks = 0
semester = 0
totalSemesters = 8
totalMarks = 0
modules = []
moduleEnrollments = {}
facult = ""
resultFromSimluation = True # Result from simualtuion: True -> advance to next year; False -> expelled
passedByCompFromSimulation = 0 # Counter of a number of passed by compensation modules
def __init__(self, studentID, name = "Student X", gender = "m", leavingCertificate = 700):
self.studentID = studentID
Person.__init__(self, name, gender)
self.modules = []
self.moduleEnrollments = {}
self.semester = 1
self.leavingCertificate = leavingCertificate #TODO: check Irish system
self.faculty = ""
def getModules(self):
return self.modules
def getCourse(self):
return self.course
#TODO
def canTake(self, module):
return True
#TODO
def hasTaken(self, module):
return True
def getSemester(self):
return self.semester
def getTotalSemesters():
return self.totalSemesters
def getTotalMarks():
return self.totalMarks
| 22
| 103
| 0.726345
| 126
| 1,078
| 6.119048
| 0.492063
| 0.064851
| 0.054475
| 0.051881
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011325
| 0.180891
| 1,078
| 49
| 104
| 22
| 0.861835
| 0.167904
| 0
| 0.057143
| 0
| 0
| 0.019058
| 0
| 0
| 0
| 0
| 0.020408
| 0
| 1
| 0.228571
| false
| 0.028571
| 0.028571
| 0.2
| 0.8
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
f7c9b816a0b6a742250f2b56ec3b2022f4dcc2ee
| 188
|
py
|
Python
|
portfolio/portfolio/views.py
|
gabrielx52/personal_site
|
228099a727922fa0298afa3deacf0e2e55dcf958
|
[
"MIT"
] | null | null | null |
portfolio/portfolio/views.py
|
gabrielx52/personal_site
|
228099a727922fa0298afa3deacf0e2e55dcf958
|
[
"MIT"
] | null | null | null |
portfolio/portfolio/views.py
|
gabrielx52/personal_site
|
228099a727922fa0298afa3deacf0e2e55dcf958
|
[
"MIT"
] | null | null | null |
"""Portfolio site views."""
from django.views.generic import TemplateView
class HomeView(TemplateView):
"""Home view class based view."""
template_name = 'portfolio/home.html'
| 18.8
| 45
| 0.718085
| 22
| 188
| 6.090909
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.154255
| 188
| 9
| 46
| 20.888889
| 0.842767
| 0.260638
| 0
| 0
| 0
| 0
| 0.149606
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
f7d4d85d053aa0c9cd98eb94c420e98e210fc6fa
| 198
|
py
|
Python
|
pyadb/device/helper/base.py
|
HsOjo/OjoPyADB
|
4f5272b5a838a09a3a5d4653dea7e24b5103283d
|
[
"MIT"
] | 2
|
2021-07-07T02:07:00.000Z
|
2021-08-23T01:50:40.000Z
|
pyadb/device/helper/base.py
|
HsOjo/OjoPyADB
|
4f5272b5a838a09a3a5d4653dea7e24b5103283d
|
[
"MIT"
] | null | null | null |
pyadb/device/helper/base.py
|
HsOjo/OjoPyADB
|
4f5272b5a838a09a3a5d4653dea7e24b5103283d
|
[
"MIT"
] | null | null | null |
class BaseHelper:
def __init__(self, device):
self.device = device
def params(self, locals_: dict):
params = locals_.copy()
params.pop('self')
return params
| 22
| 36
| 0.59596
| 22
| 198
| 5.090909
| 0.545455
| 0.178571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.29798
| 198
| 8
| 37
| 24.75
| 0.805755
| 0
| 0
| 0
| 0
| 0
| 0.020202
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0
| 0
| 0.571429
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
f7d8e40f916842fd7b3408e8431f99772a6424b6
| 114
|
py
|
Python
|
tests/forms.py
|
nanorepublica/django-donations
|
349aaf17029f3f9b4723fead3fa28dd85959f14e
|
[
"BSD-3-Clause"
] | 9
|
2015-10-13T11:41:20.000Z
|
2020-11-30T04:38:43.000Z
|
tests/forms.py
|
nanorepublica/django-donations
|
349aaf17029f3f9b4723fead3fa28dd85959f14e
|
[
"BSD-3-Clause"
] | 63
|
2015-10-22T17:41:27.000Z
|
2021-11-20T12:18:26.000Z
|
tests/forms.py
|
nanorepublica/django-donations
|
349aaf17029f3f9b4723fead3fa28dd85959f14e
|
[
"BSD-3-Clause"
] | 3
|
2017-08-29T02:44:12.000Z
|
2020-04-07T23:43:12.000Z
|
from donations.forms import DonationForm
class FixedDonationForm(DonationForm):
amounts = [1, 5, 10, 10000]
| 19
| 40
| 0.754386
| 13
| 114
| 6.615385
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09375
| 0.157895
| 114
| 5
| 41
| 22.8
| 0.802083
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
f7f696d3f5d0ff6a558cdf619193a9e7b3c96b99
| 97
|
py
|
Python
|
landingpage/apps.py
|
Emmastro/africanlibraries
|
6755dd5a7d3453c7ba6e63d49071f9f5af280f71
|
[
"Apache-2.0"
] | null | null | null |
landingpage/apps.py
|
Emmastro/africanlibraries
|
6755dd5a7d3453c7ba6e63d49071f9f5af280f71
|
[
"Apache-2.0"
] | null | null | null |
landingpage/apps.py
|
Emmastro/africanlibraries
|
6755dd5a7d3453c7ba6e63d49071f9f5af280f71
|
[
"Apache-2.0"
] | null | null | null |
from django.apps import AppConfig
class LandingpageConfig(AppConfig):
name = 'Landingpage'
| 16.166667
| 35
| 0.773196
| 10
| 97
| 7.5
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.154639
| 97
| 5
| 36
| 19.4
| 0.914634
| 0
| 0
| 0
| 0
| 0
| 0.113402
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
7915ecc432372e2e1a6ed84be33000fc8cbd30a2
| 122
|
py
|
Python
|
tests/test_markers.py
|
twotwo/python-pytest
|
39f18f4bce8a75c67d8872119e627a8d30268afe
|
[
"MIT"
] | null | null | null |
tests/test_markers.py
|
twotwo/python-pytest
|
39f18f4bce8a75c67d8872119e627a8d30268afe
|
[
"MIT"
] | null | null | null |
tests/test_markers.py
|
twotwo/python-pytest
|
39f18f4bce8a75c67d8872119e627a8d30268afe
|
[
"MIT"
] | null | null | null |
import pytest
def test_1():
...
@pytest.mark.slow
def test_2():
...
@pytest.mark.skip
def test_3():
...
| 7.625
| 17
| 0.54918
| 17
| 122
| 3.764706
| 0.588235
| 0.328125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.032967
| 0.254098
| 122
| 15
| 18
| 8.133333
| 0.67033
| 0
| 0
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.111111
| 0
| 0.444444
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
792f1cbfb62b4631fb62853daeb23fd4dfaecd7f
| 1,520
|
py
|
Python
|
android-runner/ExperimentRunner/Plugins/Profiler.py
|
S2-group/mobilesoft-2020-caching-pwa-replication-package
|
83ad21ba4c7a6a430103caa6616296cbdcf17de3
|
[
"MIT"
] | null | null | null |
android-runner/ExperimentRunner/Plugins/Profiler.py
|
S2-group/mobilesoft-2020-caching-pwa-replication-package
|
83ad21ba4c7a6a430103caa6616296cbdcf17de3
|
[
"MIT"
] | null | null | null |
android-runner/ExperimentRunner/Plugins/Profiler.py
|
S2-group/mobilesoft-2020-caching-pwa-replication-package
|
83ad21ba4c7a6a430103caa6616296cbdcf17de3
|
[
"MIT"
] | 2
|
2020-10-26T17:04:29.000Z
|
2020-10-27T13:06:52.000Z
|
class Profiler(object):
def __init__(self, config, paths):
pass
def dependencies(self):
"""Returns list of needed app dependencies,like com.quicinc.trepn, [] if none"""
raise NotImplementedError
def load(self, device):
"""Load (and start) the profiler process on the device"""
raise NotImplementedError
def start_profiling(self, device, **kwargs):
"""Start the profiling process"""
raise NotImplementedError
def stop_profiling(self, device, **kwargs):
"""Stop the profiling process"""
raise NotImplementedError
def collect_results(self, device):
"""Collect the data and clean up extra files on the device, save data in location set by 'set_output' """
raise NotImplementedError
def unload(self, device):
"""Stop the profiler, removing configuration files on device"""
raise NotImplementedError
def set_output(self, output_dir):
"""Set the output directory before the start_profiling is called"""
raise NotImplementedError
def aggregate_subject(self):
"""Aggregate the data at the end of a subject, collect data and save data to location set by 'set output' """
raise NotImplementedError
def aggregate_end(self, data_dir, output_file):
"""Aggregate the data at the end of the experiment.
Data located in file structure inside data_dir. Save aggregated data to output_file
"""
raise NotImplementedError
| 35.348837
| 117
| 0.672368
| 182
| 1,520
| 5.521978
| 0.362637
| 0.214925
| 0.214925
| 0.065672
| 0.240796
| 0.240796
| 0.149254
| 0.097512
| 0
| 0
| 0
| 0
| 0.248684
| 1,520
| 42
| 118
| 36.190476
| 0.880035
| 0.419737
| 0
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.47619
| false
| 0.047619
| 0
| 0
| 0.52381
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
f71eb1bcceacb04d1e517066e97c304ca359d409
| 444
|
py
|
Python
|
Set0/p0_3.py
|
izzy-el/mitbrazil-intro-python
|
193d552832393d193eb24d6881be0ab2a37b41d1
|
[
"MIT"
] | null | null | null |
Set0/p0_3.py
|
izzy-el/mitbrazil-intro-python
|
193d552832393d193eb24d6881be0ab2a37b41d1
|
[
"MIT"
] | null | null | null |
Set0/p0_3.py
|
izzy-el/mitbrazil-intro-python
|
193d552832393d193eb24d6881be0ab2a37b41d1
|
[
"MIT"
] | null | null | null |
kwh_used = 1000
out = 0
if(kwh_used < 500):
out += 500 * 0.45
elif(kwh_used >= 500 and kwh_used < 1500):
out += 500 * 0.45 + ((kwh_used - 500) * 0.74)
elif(kwh_used >= 1500 and kwh_used < 2500):
out += 500 * 0.45 + ((kwh_used - 500) * 0.74) + ((kwh_used - 1500) * 1.25)
elif(kwh_used >= 2500):
out += 500 * 0.45 + ((kwh_used - 500) * 0.74) + ((kwh_used - 1500) * 1.25) + ((kwh_used - 2500) * 2)
out += out * 0.2
print(out)
| 31.714286
| 104
| 0.547297
| 80
| 444
| 2.875
| 0.2125
| 0.395652
| 0.217391
| 0.156522
| 0.504348
| 0.504348
| 0.504348
| 0.504348
| 0.504348
| 0.408696
| 0
| 0.268657
| 0.245496
| 444
| 14
| 105
| 31.714286
| 0.41791
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.083333
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
f7604308729a4a8b0860b28ad794e8617ed6dbd1
| 10,186
|
py
|
Python
|
corona cases forecasting/main.py
|
ShubhamGupta577/Amazing-Python-Scripts
|
deeb542a77b96fdcfbe21440eee4c620fa06daa9
|
[
"MIT"
] | null | null | null |
corona cases forecasting/main.py
|
ShubhamGupta577/Amazing-Python-Scripts
|
deeb542a77b96fdcfbe21440eee4c620fa06daa9
|
[
"MIT"
] | null | null | null |
corona cases forecasting/main.py
|
ShubhamGupta577/Amazing-Python-Scripts
|
deeb542a77b96fdcfbe21440eee4c620fa06daa9
|
[
"MIT"
] | null | null | null |
# importing libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from statsmodels.tsa.arima_model import ARIMA
import datetime
from datetime import date
import warnings
warnings.filterwarnings('ignore')
plt.style.use('fivethirtyeight')
from pmdarima import auto_arima
confirmed_cases = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv')
deaths_reported = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv')
recovered_cases = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv')
latest_data = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/07-15-2020.csv')
## attributes
# Fetching all the columns from confirmed dataset
cols = confirmed_cases.keys()
# Extracting the date columns
confirmed = confirmed_cases.loc[:, cols[4]:cols[-1]]
deaths = deaths_reported.loc[:, cols[4]:cols[-1]]
recoveries = recovered_cases.loc[:, cols[4]:cols[-1]]
# Range of date
dates = confirmed.keys()
# Summary
world_cases = []
total_deaths = []
mortality_rate = []
recovery_rate = []
total_recovered = []
total_active = []
# Confirmed
india_cases = []
# Death
india_deaths = []
# Recovered
india_recoveries = []
# Fill with the dataset
for i in dates:
india_cases.append(confirmed_cases[confirmed_cases['Country/Region'] == 'India'][i].sum())
india_deaths.append(deaths_reported[deaths_reported['Country/Region'] == 'India'][i].sum())
india_recoveries.append(recovered_cases[recovered_cases['Country/Region'] == 'India'][i].sum())
def daily_increase(data):
d = []
for i in range(len(data)):
if i == 0:
d.append(data[0])
else:
d.append(data[i]-data[i-1])
return d
def fresh_cases_daily():
#confirmed cases
india_daily_increase = daily_increase(india_cases)
# Dates pre processing
days_since_1_22 = np.array([i for i in range(len(dates))]).reshape(-1, 1)
days_in_future = 0
future_forecast = np.array([i for i in range(len(dates)+days_in_future)]).reshape(-1, 1)
start = '1/22/2020'
start_date = datetime.datetime.strptime(start, '%m/%d/%Y')
future_forecast_dates = []
for i in range(len(future_forecast)):
future_forecast_dates.append((start_date + datetime.timedelta(days=i)).strftime('%m/%d/%Y'))
dataCovid= pd.DataFrame({ 'Dates': future_forecast_dates , 'Daily Increase':india_daily_increase })
train = dataCovid[:int(0.7*(len(dataCovid)))]
valid = dataCovid[int(0.7*(len(dataCovid))):]
#preprocessing (since arima takes univariate series as input)
train.drop('Dates',axis=1,inplace=True)
valid.drop('Dates',axis=1,inplace=True)
model = auto_arima(train, trace=True, error_action='ignore', suppress_warnings=True)
model.fit(train)
forecast = model.predict(n_periods=len(valid))
forecast = pd.DataFrame(forecast,index = valid.index,columns=['Prediction'])
def ARIMAmodel(series, order, days = 21):
# Fitting and forecast the series
train = [x for x in series]
model = ARIMA(train, order = order)
model_fit = model.fit(disp=0)
forecast, err, ci = model_fit.forecast(steps = days, alpha = 0.05)
start_day = date.today() + datetime.timedelta(days = 1)
predictions_df = pd.DataFrame({'Forecast':forecast.round()}, index=pd.date_range(start = start_day, periods=days, freq='D'))
return predictions_df, ci
new_positives = dataCovid['Daily Increase'].values
order = {
'new_positives': (2, 1, 5),
}
new_positives_today=new_positives[-1]
# Forecasting with ARIMA models
new_positives_pred, new_positives_ci = ARIMAmodel(new_positives, order['new_positives'])
casesY=[]
datesX=[]
list1 = new_positives_pred.iloc[: ,0]
for i in range(0,21):
casesY.append(list1[i])
datesX.append((date.today()+ datetime.timedelta(days=i)).strftime('%m/%d/%Y'))
# Plot Results for forecasted dates only (detailed)
plt.plot(datesX,casesY,color='red')
plt.title('New active Cases Forecast')
plt.xticks(rotation=90)
# plt.figure(figsize=(22,22))
plt.savefig("./corona cases forecasting/Results/plot1.png",bbox_inches='tight')
plt.autoscale()
plt.show()
def death_cases_daily():
#confirmed cases
india_daily_increase = daily_increase(india_deaths)
# Dates pre processing
days_since_1_22 = np.array([i for i in range(len(dates))]).reshape(-1, 1)
days_in_future = 0
future_forecast = np.array([i for i in range(len(dates)+days_in_future)]).reshape(-1, 1)
start = '1/22/2020'
start_date = datetime.datetime.strptime(start, '%m/%d/%Y')
future_forecast_dates = []
for i in range(len(future_forecast)):
future_forecast_dates.append((start_date + datetime.timedelta(days=i)).strftime('%m/%d/%Y'))
dataCovid= pd.DataFrame({ 'Dates': future_forecast_dates , 'Daily Increase':india_daily_increase })
train = dataCovid[:int(0.7*(len(dataCovid)))]
valid = dataCovid[int(0.7*(len(dataCovid))):]
#preprocessing (since arima takes univariate series as input)
train.drop('Dates',axis=1,inplace=True)
valid.drop('Dates',axis=1,inplace=True)
model = auto_arima(train, trace=True, error_action='ignore', suppress_warnings=True)
model.fit(train)
forecast = model.predict(n_periods=len(valid))
forecast = pd.DataFrame(forecast,index = valid.index,columns=['Prediction'])
def ARIMAmodel(series, order, days = 21):
# Fitting and forecast the series
train = [x for x in series]
model = ARIMA(train, order = order)
model_fit = model.fit(disp=0)
forecast, err, ci = model_fit.forecast(steps = days, alpha = 0.05)
start_day = date.today() + datetime.timedelta(days = 1)
predictions_df = pd.DataFrame({'Forecast':forecast.round()}, index=pd.date_range(start = start_day, periods=days, freq='D'))
return predictions_df, ci
new_deaths = dataCovid['Daily Increase'].values
order = {
'new_deaths': (0, 1, 1),
}
new_deaths_today=new_deaths[-1]
# Forecasting with ARIMA models
new_deaths_pred, new_deaths_ci = ARIMAmodel(new_deaths, order['new_deaths'])
casesY=[]
datesX=[]
list1 = new_deaths_pred.iloc[: ,0]
for i in range(0,21):
casesY.append(list1[i])
datesX.append((date.today()+ datetime.timedelta(days=i)).strftime('%m/%d/%Y'))
# Plot Results for forecasted dates only (detailed)
plt.plot(datesX,casesY,color='red')
plt.title('New death Cases Forecast')
plt.xticks(rotation=90)
# plt.figure(figsize=(22,22))
plt.savefig("./corona cases forecasting/Results/plot2.png",bbox_inches='tight')
plt.autoscale()
plt.show()
def recovered_cases_daily():
#confirmed cases
india_daily_increase = daily_increase(india_recoveries)
# Dates pre processing
days_since_1_22 = np.array([i for i in range(len(dates))]).reshape(-1, 1)
days_in_future = 0
future_forecast = np.array([i for i in range(len(dates)+days_in_future)]).reshape(-1, 1)
start = '1/22/2020'
start_date = datetime.datetime.strptime(start, '%m/%d/%Y')
future_forecast_dates = []
for i in range(len(future_forecast)):
future_forecast_dates.append((start_date + datetime.timedelta(days=i)).strftime('%m/%d/%Y'))
dataCovid= pd.DataFrame({ 'Dates': future_forecast_dates , 'Daily recoveries':india_daily_increase })
train = dataCovid[:int(0.7*(len(dataCovid)))]
valid = dataCovid[int(0.7*(len(dataCovid))):]
#preprocessing (since arima takes univariate series as input)
train.drop('Dates',axis=1,inplace=True)
valid.drop('Dates',axis=1,inplace=True)
model = auto_arima(train, trace=True, error_action='ignore', suppress_warnings=True)
model.fit(train)
forecast = model.predict(n_periods=len(valid))
forecast = pd.DataFrame(forecast,index = valid.index,columns=['Prediction'])
def ARIMAmodel(series, order, days = 21):
# Fitting and forecast the series
train = [x for x in series]
model = ARIMA(train, order = order)
model_fit = model.fit(disp=0)
forecast, err, ci = model_fit.forecast(steps = days, alpha = 0.05)
start_day = date.today() + datetime.timedelta(days = 1)
predictions_df = pd.DataFrame({'Forecast':forecast.round()}, index=pd.date_range(start = start_day, periods=days, freq='D'))
return predictions_df, ci
new_recoveries = dataCovid['Daily recoveries'].values
order = {
'new_recoveries': (1, 1, 2),
}
new_recoveries_today=new_recoveries[-1]
# Forecasting with ARIMA models
new_recoveries_pred, new_recoveries_ci = ARIMAmodel(new_recoveries, order['new_recoveries'])
casesY=[]
datesX=[]
list1 = new_recoveries_pred.iloc[: ,0]
for i in range(0,21):
casesY.append(list1[i])
datesX.append((date.today()+ datetime.timedelta(days=i)).strftime('%m/%d/%Y'))
# Plot Results for forecasted dates only (detailed)
plt.plot(datesX,casesY,color='red')
plt.title('New recovered Cases Forecast')
plt.xticks(rotation=90)
# plt.figure(figsize=(22,22))
plt.savefig("./corona cases forecasting/Results/plot3.png",bbox_inches='tight')
plt.autoscale()
plt.show()
# Taking user input choice for type of prediction method to be intitiated
choice=input("F for fresh cases,D for death cases,R for recovered cases prediction : ")
if choice=='F':
fresh_cases_daily()
elif choice=='D':
death_cases_daily()
elif choice=='R':
recovered_cases_daily()
else:
print("Enter a valid choice")
| 42.26556
| 184
| 0.677106
| 1,378
| 10,186
| 4.838897
| 0.147315
| 0.031494
| 0.012597
| 0.021446
| 0.7488
| 0.744751
| 0.70216
| 0.70216
| 0.696611
| 0.684613
| 0
| 0.021959
| 0.186334
| 10,186
| 240
| 185
| 42.441667
| 0.782577
| 0.093854
| 0
| 0.576087
| 0
| 0.021739
| 0.151742
| 0.009714
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038043
| false
| 0
| 0.043478
| 0
| 0.103261
| 0.005435
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
f79cd5c33da2b32678df41fe15b1c9306789a9fd
| 1,365
|
py
|
Python
|
matrix-methods/frame2d/Frame2D/NodeLoads.py
|
nholtz/structural-analysis
|
246d6358355bd9768e30075d1f6af282ceb995be
|
[
"CC0-1.0"
] | 3
|
2016-05-26T07:01:51.000Z
|
2019-05-31T23:48:11.000Z
|
matrix-methods/frame2d/Frame2D/NodeLoads.py
|
nholtz/structural-analysis
|
246d6358355bd9768e30075d1f6af282ceb995be
|
[
"CC0-1.0"
] | null | null | null |
matrix-methods/frame2d/Frame2D/NodeLoads.py
|
nholtz/structural-analysis
|
246d6358355bd9768e30075d1f6af282ceb995be
|
[
"CC0-1.0"
] | 1
|
2016-08-30T06:08:03.000Z
|
2016-08-30T06:08:03.000Z
|
## Compiled from NodeLoads.ipynb on Sun Dec 10 12:51:11 2017
## DO NOT EDIT THIS FILE. YOUR CHANGES WILL BE LOST!!
## In [1]:
import numpy as np
from salib import extend
## In [9]:
class NodeLoad(object):
def __init__(self,fx=0.,fy=0.,mz=0.):
if np.isscalar(fx):
self.forces = np.matrix([fx,fy,mz],dtype=np.float64).T
else:
self.forces= fx.copy()
def __mul__(self,scale):
if scale == 1.0:
return self
return self.__class__(self.forces*scale)
__rmul__ = __mul__
def __repr__(self):
return "{}({},{},{})".format(self.__class__.__name__,*list(np.array(self.forces.T)[0]))
def __getitem__(self,ix):
return self.forces[ix,0]
## In [11]:
def makeNodeLoad(data):
G = data.get
return NodeLoad(G('FX',0),G('FY',0),G('MZ',0))
## In [13]:
id(NodeLoad)
## In [17]:
@extend
class NodeLoad:
@property
def fx(self):
return self.forces[0,0]
@fx.setter
def fx(self,v):
self.forces[0,0] = v
@property
def fy(self):
return self.forces[1,0]
@fy.setter
def fy(self,v):
self.forces[1,0] = v
@property
def mz(self):
return self.forces[2,0]
@mz.setter
def mz(self,v):
self.forces[2,0] = v
## In [ ]:
| 20.073529
| 95
| 0.536996
| 197
| 1,365
| 3.538071
| 0.365482
| 0.157819
| 0.080344
| 0.086083
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.046267
| 0.303297
| 1,365
| 67
| 96
| 20.373134
| 0.686646
| 0.116484
| 0
| 0.073171
| 1
| 0
| 0.015152
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.268293
| false
| 0
| 0.04878
| 0.121951
| 0.585366
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
e3968a2d3978d495fe4fcaa53bef8db906c43be4
| 5,754
|
py
|
Python
|
migration/migrator/migrations/course/20200402034200_overall_comments.py
|
zeez2030/Submitty
|
7118944ff4adc6f15d76984eb10a1e862926d724
|
[
"BSD-3-Clause"
] | 411
|
2016-06-14T20:52:25.000Z
|
2022-03-31T21:20:25.000Z
|
migration/migrator/migrations/course/20200402034200_overall_comments.py
|
KaelanWillauer/Submitty
|
cf9b6ceda15ec0a661e2ca81ea7864790094c64a
|
[
"BSD-3-Clause"
] | 5,730
|
2016-05-23T21:04:32.000Z
|
2022-03-31T10:08:06.000Z
|
migration/migrator/migrations/course/20200402034200_overall_comments.py
|
KaelanWillauer/Submitty
|
cf9b6ceda15ec0a661e2ca81ea7864790094c64a
|
[
"BSD-3-Clause"
] | 423
|
2016-09-22T21:11:30.000Z
|
2022-03-29T18:55:28.000Z
|
"""Migration for a given Submitty course database."""
def up(config, database, semester, course):
"""
Run up migration.
:param config: Object holding configuration details about Submitty
:type config: migrator.config.Config
:param database: Object for interacting with given database for environment
:type database: migrator.db.Database
:param semester: Semester of the course being migrated
:type semester: str
:param course: Code of course being migrated
:type course: str
"""
# Create overall comment table
database.execute(
"""
CREATE TABLE IF NOT EXISTS gradeable_data_overall_comment (
goc_id integer NOT NULL,
g_id character varying(255) NOT NULL,
goc_user_id character varying(255),
goc_team_id character varying(255),
goc_grader_id character varying(255) NOT NULL,
goc_overall_comment character varying NOT NULL,
CONSTRAINT goc_user_team_id_check CHECK (goc_user_id IS NOT NULL OR goc_team_id IS NOT NULL)
);
"""
)
database.execute("ALTER TABLE gradeable_data_overall_comment DROP CONSTRAINT IF EXISTS gradeable_data_overall_comment_pkey")
database.execute(
"""
ALTER TABLE ONLY gradeable_data_overall_comment
ADD CONSTRAINT gradeable_data_overall_comment_pkey PRIMARY KEY (goc_id);
"""
)
database.execute("ALTER TABLE gradeable_data_overall_comment DROP CONSTRAINT IF EXISTS gradeable_data_overall_comment_g_id_fkey")
database.execute(
"""
ALTER TABLE ONLY gradeable_data_overall_comment
ADD CONSTRAINT gradeable_data_overall_comment_g_id_fkey FOREIGN KEY (g_id) REFERENCES gradeable(g_id) ON DELETE CASCADE;
"""
)
database.execute("ALTER TABLE gradeable_data_overall_comment DROP CONSTRAINT IF EXISTS gradeable_data_overall_comment_goc_user_id_fkey")
database.execute(
"""
ALTER TABLE ONLY gradeable_data_overall_comment
ADD CONSTRAINT gradeable_data_overall_comment_goc_user_id_fkey FOREIGN KEY (goc_user_id) REFERENCES users(user_id) ON DELETE CASCADE;
"""
)
database.execute("ALTER TABLE gradeable_data_overall_comment DROP CONSTRAINT IF EXISTS gradeable_data_overall_comment_goc_team_id_fkey")
database.execute(
"""
ALTER TABLE ONLY gradeable_data_overall_comment
ADD CONSTRAINT gradeable_data_overall_comment_goc_team_id_fkey FOREIGN KEY (goc_team_id) REFERENCES gradeable_teams(team_id) ON DELETE CASCADE;
"""
)
database.execute("ALTER TABLE gradeable_data_overall_comment DROP CONSTRAINT IF EXISTS gradeable_data_overall_comment_goc_grader_id")
database.execute(
"""
ALTER TABLE ONLY gradeable_data_overall_comment
ADD CONSTRAINT gradeable_data_overall_comment_goc_grader_id FOREIGN KEY (goc_grader_id) REFERENCES users(user_id) ON DELETE CASCADE;
"""
)
database.execute("ALTER TABLE gradeable_data_overall_comment DROP CONSTRAINT IF EXISTS gradeable_data_overall_comment_user_unique")
database.execute("ALTER TABLE ONLY gradeable_data_overall_comment ADD CONSTRAINT gradeable_data_overall_comment_user_unique UNIQUE (g_id, goc_user_id, goc_grader_id);")
database.execute("ALTER TABLE gradeable_data_overall_comment DROP CONSTRAINT IF EXISTS gradeable_data_overall_comment_team_unique")
database.execute("ALTER TABLE ONLY gradeable_data_overall_comment ADD CONSTRAINT gradeable_data_overall_comment_team_unique UNIQUE (g_id, goc_team_id, goc_grader_id);")
database.execute(
"""
CREATE SEQUENCE IF NOT EXISTS gradeable_data_overall_comment_goc_id_seq
START WITH 1
INCREMENT BY 1
NO MINVALUE
NO MAXVALUE
CACHE 1;
""")
database.execute("ALTER SEQUENCE gradeable_data_overall_comment_goc_id_seq OWNED BY gradeable_data_overall_comment.goc_id;")
database.execute("ALTER TABLE ONLY gradeable_data_overall_comment ALTER COLUMN goc_id SET DEFAULT nextval('gradeable_data_overall_comment_goc_id_seq'::regclass);")
# All old overall comments belong to the instructor
instructor_id = database.execute("SELECT user_id FROM users WHERE user_group = 1;").first()[0]
rows = database.execute("""
SELECT
g_id,
gd_user_id,
gd_team_id,
gd_overall_comment
FROM
gradeable_data;
"""
)
for g_id, user_id, team_id, comment in rows:
query = '''
INSERT INTO gradeable_data_overall_comment
(
g_id,
goc_user_id,
goc_team_id,
goc_grader_id,
goc_overall_comment
) VALUES (
:g_id, :user_id, :team_id, :grader_id, :comment
)
ON CONFLICT
DO NOTHING;
'''
params = {
'g_id':g_id,
'user_id':user_id,
'team_id':team_id,
'grader_id':instructor_id,
'comment':comment
}
database.session.execute(query, params)
def down(config, database, semester, course):
"""
Run down migration (rollback).
:param config: Object holding configuration details about Submitty
:type config: migrator.config.Config
:param database: Object for interacting with given database for environment
:type database: migrator.db.Database
:param semester: Semester of the course being migrated
:type semester: str
:param course: Code of course being migrated
:type course: str
"""
pass
| 36.884615
| 172
| 0.684741
| 703
| 5,754
| 5.277383
| 0.162162
| 0.14717
| 0.188679
| 0.254717
| 0.769003
| 0.716981
| 0.675202
| 0.618598
| 0.611051
| 0.587871
| 0
| 0.003972
| 0.25617
| 5,754
| 155
| 173
| 37.122581
| 0.86285
| 0.154327
| 0
| 0.145161
| 0
| 0
| 0.655434
| 0.281797
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032258
| false
| 0.016129
| 0
| 0
| 0.032258
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
e397058a6322fac3c23580c5e816a7821e8587de
| 436
|
py
|
Python
|
tmux_super_fingers/actions/os_open_action.py
|
camgraff/tmux_super_fingers
|
10692cc45cf884c29a3ddf4d8a0ffffc5709db34
|
[
"MIT"
] | 41
|
2021-08-23T19:30:51.000Z
|
2022-03-09T15:40:23.000Z
|
tmux_super_fingers/actions/os_open_action.py
|
camgraff/tmux_super_fingers
|
10692cc45cf884c29a3ddf4d8a0ffffc5709db34
|
[
"MIT"
] | 4
|
2021-09-21T19:49:35.000Z
|
2022-03-11T09:37:18.000Z
|
tmux_super_fingers/actions/os_open_action.py
|
camgraff/tmux_super_fingers
|
10692cc45cf884c29a3ddf4d8a0ffffc5709db34
|
[
"MIT"
] | 1
|
2022-03-24T23:15:14.000Z
|
2022-03-24T23:15:14.000Z
|
from .action import Action
from ..targets.target_payload import OsOpenable
from ..cli_adapter import RealCliAdapter, CliAdapter
class OsOpenAction(Action):
def __init__(self, target_payload: OsOpenable, cli_adapter: CliAdapter = RealCliAdapter()):
self.target_payload = target_payload
self.cli_adapter = cli_adapter
def perform(self) -> None:
self.cli_adapter.os_open(self.target_payload.file_or_url)
| 33.538462
| 95
| 0.761468
| 54
| 436
| 5.833333
| 0.425926
| 0.206349
| 0.161905
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.158257
| 436
| 12
| 96
| 36.333333
| 0.858311
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.222222
| false
| 0
| 0.333333
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
e3a58c43d54b55106eb4699912320e8c5ccbf232
| 60
|
py
|
Python
|
trivial_tools/config_handling/__init__.py
|
IgorZyktin/trivial_tools
|
a1256e62d9345b9623850e37bd63df7ce52b81c8
|
[
"MIT"
] | null | null | null |
trivial_tools/config_handling/__init__.py
|
IgorZyktin/trivial_tools
|
a1256e62d9345b9623850e37bd63df7ce52b81c8
|
[
"MIT"
] | null | null | null |
trivial_tools/config_handling/__init__.py
|
IgorZyktin/trivial_tools
|
a1256e62d9345b9623850e37bd63df7ce52b81c8
|
[
"MIT"
] | null | null | null |
from trivial_tools.config_handling.abstract_config import *
| 30
| 59
| 0.883333
| 8
| 60
| 6.25
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.066667
| 60
| 1
| 60
| 60
| 0.892857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
e3ce93bb54ead5f98553a08e9cd74a6fa8d0d0a5
| 169
|
py
|
Python
|
fn_reflection/urllib.py
|
fn-reflection/fn_reflection
|
1e2c1c812dd9d119f3e2b533a8bc5988a1f656d3
|
[
"Apache-2.0"
] | null | null | null |
fn_reflection/urllib.py
|
fn-reflection/fn_reflection
|
1e2c1c812dd9d119f3e2b533a8bc5988a1f656d3
|
[
"Apache-2.0"
] | null | null | null |
fn_reflection/urllib.py
|
fn-reflection/fn_reflection
|
1e2c1c812dd9d119f3e2b533a8bc5988a1f656d3
|
[
"Apache-2.0"
] | null | null | null |
from urllib.parse import urlparse
def get_second_level_domain(url: str):
netloc = urlparse(url).netloc.split('.')
return netloc[-2] if len(netloc) > 1 else ""
| 24.142857
| 48
| 0.698225
| 25
| 169
| 4.6
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014184
| 0.16568
| 169
| 6
| 49
| 28.166667
| 0.801418
| 0
| 0
| 0
| 0
| 0
| 0.005917
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
e3e28eab1f27ffaa0183690dba77c24000925f4c
| 87
|
py
|
Python
|
homepage/tests/test_backend.py
|
jahan-addison/gridpaste
|
a4121fb7eddd3df7f30a2bbef0d5a53f7dd9d8c8
|
[
"MIT"
] | 4
|
2017-09-26T00:46:19.000Z
|
2022-03-01T06:27:24.000Z
|
homepage/tests/test_backend.py
|
bramz/gridpaste
|
0c41bd26c24cdda98e365acb690de418be59c13b
|
[
"MIT"
] | 54
|
2015-01-09T15:48:15.000Z
|
2019-12-21T22:13:14.000Z
|
homepage/tests/test_backend.py
|
bramz/gridpaste
|
0c41bd26c24cdda98e365acb690de418be59c13b
|
[
"MIT"
] | 3
|
2017-09-26T00:46:20.000Z
|
2019-03-19T22:42:49.000Z
|
from backend import __version__
def test_version():
assert __version__ == '2.0.0
| 14.5
| 32
| 0.724138
| 12
| 87
| 4.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.042254
| 0.183908
| 87
| 5
| 33
| 17.4
| 0.71831
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 0
| null | null | 0
| 0.333333
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
5400442d3d26d6c676eb637bd8a33189ec91b3e0
| 191
|
py
|
Python
|
funtions/lambda-map01.py
|
LBarros77/Python
|
283b383d9d14c8d7b907b80f03f7cdc5dbd1e8af
|
[
"MIT"
] | null | null | null |
funtions/lambda-map01.py
|
LBarros77/Python
|
283b383d9d14c8d7b907b80f03f7cdc5dbd1e8af
|
[
"MIT"
] | null | null | null |
funtions/lambda-map01.py
|
LBarros77/Python
|
283b383d9d14c8d7b907b80f03f7cdc5dbd1e8af
|
[
"MIT"
] | null | null | null |
lst2 = list(map(lambda x: 2 ** x, range(5)))
print(lst2)
for i in list(map(lambda x: x ** 2, lst2)):
print(i, end=" ")
print()
print(list(map(lambda x: 1 if x % 2 == 0 else 0, lst2)))
| 19.1
| 56
| 0.570681
| 38
| 191
| 2.868421
| 0.447368
| 0.192661
| 0.357798
| 0.385321
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.073333
| 0.21466
| 191
| 9
| 57
| 21.222222
| 0.653333
| 0
| 0
| 0
| 0
| 0
| 0.005236
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.666667
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
5423f9f471663312cef9d3f18da555d674078832
| 118
|
py
|
Python
|
config.py
|
mrlazeriim/LuckTheManager
|
218904ae7251aef3ac176d026214309d0176c881
|
[
"Apache-2.0"
] | null | null | null |
config.py
|
mrlazeriim/LuckTheManager
|
218904ae7251aef3ac176d026214309d0176c881
|
[
"Apache-2.0"
] | null | null | null |
config.py
|
mrlazeriim/LuckTheManager
|
218904ae7251aef3ac176d026214309d0176c881
|
[
"Apache-2.0"
] | null | null | null |
# group ids or account ids can be retrieved with @username_to_id_bot
BOT_TOKEN="<bot-token>"
BOT_OWNER=<bot-owner-id>
| 29.5
| 68
| 0.779661
| 22
| 118
| 3.954545
| 0.636364
| 0.183908
| 0.252874
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.110169
| 118
| 3
| 69
| 39.333333
| 0.828571
| 0.559322
| 0
| 0
| 0
| 0
| 0.22
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
542645c984b82c487f0ad78947fad0968ab8c3c9
| 202
|
py
|
Python
|
src/cpa/__init__.py
|
inigoalonso/cpa
|
9783d2e2fee07420692dda7c8ed71183f86b723c
|
[
"MIT"
] | null | null | null |
src/cpa/__init__.py
|
inigoalonso/cpa
|
9783d2e2fee07420692dda7c8ed71183f86b723c
|
[
"MIT"
] | null | null | null |
src/cpa/__init__.py
|
inigoalonso/cpa
|
9783d2e2fee07420692dda7c8ed71183f86b723c
|
[
"MIT"
] | null | null | null |
"""
Change Propagation Assessment
-----------------------------
A library for performing Change Propagation Assessment (CPA)
"""
__version__ = '0.0'
__all__=["something"]
from .cpa import something
| 15.538462
| 60
| 0.638614
| 20
| 202
| 6.05
| 0.7
| 0.280992
| 0.446281
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011364
| 0.128713
| 202
| 12
| 61
| 16.833333
| 0.676136
| 0.59901
| 0
| 0
| 0
| 0
| 0.166667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
583af72a986515268e80730cb461bd94bbf04d48
| 3,510
|
py
|
Python
|
odo/odo.py
|
farukht/odo
|
9fce6690b3666160681833540de6c55e922de5eb
|
[
"BSD-3-Clause"
] | 844
|
2015-08-22T02:18:40.000Z
|
2022-03-31T06:30:34.000Z
|
odo/odo.py
|
farukht/odo
|
9fce6690b3666160681833540de6c55e922de5eb
|
[
"BSD-3-Clause"
] | 321
|
2015-08-20T14:33:26.000Z
|
2022-01-30T22:42:20.000Z
|
odo/odo.py
|
farukht/odo
|
9fce6690b3666160681833540de6c55e922de5eb
|
[
"BSD-3-Clause"
] | 140
|
2015-09-05T01:32:13.000Z
|
2022-02-03T14:00:30.000Z
|
from .into import into
def odo(source, target, **kwargs):
""" Push one dataset into another
Parameters
----------
source: object or string
The source of your data. Either an object (e.g. DataFrame),
or a string ('filename.csv')
target: object or string or type
The target for where you want your data to go.
Either an object, (e.g. []), a type, (e.g. list)
or a string (e.g. 'postgresql://hostname::tablename')
raise_on_errors: bool (optional, defaults to False)
Raise exceptions rather than reroute around them
**kwargs:
keyword arguments to pass through to conversion functions.
Optional Keyword Arguments
--------------------------
Odo passes keyword arguments (like ``sep=';'``) down to the functions
that it uses to perform conversions (like ``pandas.read_csv``). Due to the
quantity of possible optional keyword arguments we can not list them here.
See the following documentation for your format
* AWS - http://odo.pydata.org/en/latest/aws.html
* CSV - http://odo.pydata.org/en/latest/csv.html
* JSON - http://odo.pydata.org/en/latest/json.html
* HDF5 - http://odo.pydata.org/en/latest/hdf5.html
* HDFS - http://odo.pydata.org/en/latest/hdfs.html
* Hive - http://odo.pydata.org/en/latest/hive.html
* SAS - http://odo.pydata.org/en/latest/sas.html
* SQL - http://odo.pydata.org/en/latest/sql.html
* SSH - http://odo.pydata.org/en/latest/ssh.html
* Mongo - http://odo.pydata.org/en/latest/mongo.html
* Spark - http://odo.pydata.org/en/latest/spark.html
Examples
--------
>>> L = odo((1, 2, 3), list) # Convert things into new things
>>> L
[1, 2, 3]
>>> _ = odo((4, 5, 6), L) # Append things onto existing things
>>> L
[1, 2, 3, 4, 5, 6]
>>> odo([('Alice', 1), ('Bob', 2)], 'myfile.csv') # doctest: +SKIP
Explanation
-----------
We can specify data with a Python object like a ``list``, ``DataFrame``,
``sqlalchemy.Table``, ``h5py.Dataset``, etc..
We can specify data with a string URI like ``'myfile.csv'``,
``'myfiles.*.json'`` or ``'sqlite:///data.db::tablename'``. These are
matched by regular expression. See the ``resource`` function for more
details on string URIs.
We can optionally specify datatypes with the ``dshape=`` keyword, providing
a datashape. This allows us to be explicit about types when mismatches
occur or when our data doesn't hold the whole picture. See the
``discover`` function for more information on ``dshape``.
>>> ds = 'var * {name: string, balance: float64}'
>>> odo([('Alice', 100), ('Bob', 200)], 'accounts.json', , dshape=ds) # doctest: +SKIP
We can optionally specify keyword arguments to pass down to relevant
conversion functions. For example, when converting a CSV file we might
want to specify delimiter
>>> odo('accounts.csv', list, has_header=True, delimiter=';') # doctest: +SKIP
These keyword arguments trickle down to whatever function ``into`` uses
convert this particular format, functions like ``pandas.read_csv``.
See Also
--------
odo.resource.resource - Specify things with strings
datashape.discover - Get datashape of data
odo.convert.convert - Convert things into new things
odo.append.append - Add things onto existing things
"""
return into(target, source, **kwargs)
| 38.152174
| 91
| 0.630769
| 481
| 3,510
| 4.590437
| 0.386694
| 0.034873
| 0.064764
| 0.07971
| 0.182971
| 0.138587
| 0
| 0
| 0
| 0
| 0
| 0.010328
| 0.227635
| 3,510
| 91
| 92
| 38.571429
| 0.804131
| 0.892023
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
58a2eb12e46b821df21da1a39d43661bc2d8793d
| 118
|
py
|
Python
|
rl-ros-agents/rl_ros_agents/utils/utils.py
|
FranklinBF/arena2D
|
5dce3f0c41cce94691bbc9ca4f6ded124de61030
|
[
"MIT"
] | 18
|
2020-08-02T07:25:24.000Z
|
2022-01-06T08:53:00.000Z
|
rl-ros-agents/rl_ros_agents/utils/utils.py
|
FranklinBF/arena2D
|
5dce3f0c41cce94691bbc9ca4f6ded124de61030
|
[
"MIT"
] | 4
|
2020-09-28T20:42:00.000Z
|
2020-10-10T01:41:43.000Z
|
rl-ros-agents/rl_ros_agents/utils/utils.py
|
Sirupli/arena2D
|
2214754fe8e9358fa8065be5187d73104949dc4f
|
[
"MIT"
] | 18
|
2020-08-15T19:37:48.000Z
|
2022-03-21T17:58:39.000Z
|
from datetime import datetime
def getTimeStr():
time = datetime.now()
return time.strftime("%Y_%m_%d_%H_%M")
| 19.666667
| 42
| 0.686441
| 17
| 118
| 4.529412
| 0.764706
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.169492
| 118
| 5
| 43
| 23.6
| 0.785714
| 0
| 0
| 0
| 0
| 0
| 0.118644
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
58a6fcca26ef21da9037733eabae72a366932f10
| 10,577
|
py
|
Python
|
google/ads/google_ads/v5/proto/services/campaign_draft_service_pb2_grpc.py
|
arammaliachi/google-ads-python
|
a4fe89567bd43eb784410523a6306b5d1dd9ee67
|
[
"Apache-2.0"
] | 1
|
2021-04-09T04:28:47.000Z
|
2021-04-09T04:28:47.000Z
|
google/ads/google_ads/v5/proto/services/campaign_draft_service_pb2_grpc.py
|
arammaliachi/google-ads-python
|
a4fe89567bd43eb784410523a6306b5d1dd9ee67
|
[
"Apache-2.0"
] | null | null | null |
google/ads/google_ads/v5/proto/services/campaign_draft_service_pb2_grpc.py
|
arammaliachi/google-ads-python
|
a4fe89567bd43eb784410523a6306b5d1dd9ee67
|
[
"Apache-2.0"
] | null | null | null |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from google.ads.google_ads.v5.proto.resources import campaign_draft_pb2 as google_dot_ads_dot_googleads__v5_dot_proto_dot_resources_dot_campaign__draft__pb2
from google.ads.google_ads.v5.proto.services import campaign_draft_service_pb2 as google_dot_ads_dot_googleads__v5_dot_proto_dot_services_dot_campaign__draft__service__pb2
from google.longrunning import operations_pb2 as google_dot_longrunning_dot_operations__pb2
class CampaignDraftServiceStub(object):
"""Proto file describing the Campaign Draft service.
Service to manage campaign drafts.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetCampaignDraft = channel.unary_unary(
'/google.ads.googleads.v5.services.CampaignDraftService/GetCampaignDraft',
request_serializer=google_dot_ads_dot_googleads__v5_dot_proto_dot_services_dot_campaign__draft__service__pb2.GetCampaignDraftRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v5_dot_proto_dot_resources_dot_campaign__draft__pb2.CampaignDraft.FromString,
)
self.MutateCampaignDrafts = channel.unary_unary(
'/google.ads.googleads.v5.services.CampaignDraftService/MutateCampaignDrafts',
request_serializer=google_dot_ads_dot_googleads__v5_dot_proto_dot_services_dot_campaign__draft__service__pb2.MutateCampaignDraftsRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v5_dot_proto_dot_services_dot_campaign__draft__service__pb2.MutateCampaignDraftsResponse.FromString,
)
self.PromoteCampaignDraft = channel.unary_unary(
'/google.ads.googleads.v5.services.CampaignDraftService/PromoteCampaignDraft',
request_serializer=google_dot_ads_dot_googleads__v5_dot_proto_dot_services_dot_campaign__draft__service__pb2.PromoteCampaignDraftRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.ListCampaignDraftAsyncErrors = channel.unary_unary(
'/google.ads.googleads.v5.services.CampaignDraftService/ListCampaignDraftAsyncErrors',
request_serializer=google_dot_ads_dot_googleads__v5_dot_proto_dot_services_dot_campaign__draft__service__pb2.ListCampaignDraftAsyncErrorsRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v5_dot_proto_dot_services_dot_campaign__draft__service__pb2.ListCampaignDraftAsyncErrorsResponse.FromString,
)
class CampaignDraftServiceServicer(object):
"""Proto file describing the Campaign Draft service.
Service to manage campaign drafts.
"""
def GetCampaignDraft(self, request, context):
"""Returns the requested campaign draft in full detail.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MutateCampaignDrafts(self, request, context):
"""Creates, updates, or removes campaign drafts. Operation statuses are
returned.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PromoteCampaignDraft(self, request, context):
"""Promotes the changes in a draft back to the base campaign.
This method returns a Long Running Operation (LRO) indicating if the
Promote is done. Use [Operations.GetOperation] to poll the LRO until it
is done. Only a done status is returned in the response. See the status
in the Campaign Draft resource to determine if the promotion was
successful. If the LRO failed, use
[CampaignDraftService.ListCampaignDraftAsyncErrors][google.ads.googleads.v5.services.CampaignDraftService.ListCampaignDraftAsyncErrors] to view the list of
error reasons.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListCampaignDraftAsyncErrors(self, request, context):
"""Returns all errors that occurred during CampaignDraft promote. Throws an
error if called before campaign draft is promoted.
Supports standard list paging.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_CampaignDraftServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetCampaignDraft': grpc.unary_unary_rpc_method_handler(
servicer.GetCampaignDraft,
request_deserializer=google_dot_ads_dot_googleads__v5_dot_proto_dot_services_dot_campaign__draft__service__pb2.GetCampaignDraftRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v5_dot_proto_dot_resources_dot_campaign__draft__pb2.CampaignDraft.SerializeToString,
),
'MutateCampaignDrafts': grpc.unary_unary_rpc_method_handler(
servicer.MutateCampaignDrafts,
request_deserializer=google_dot_ads_dot_googleads__v5_dot_proto_dot_services_dot_campaign__draft__service__pb2.MutateCampaignDraftsRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v5_dot_proto_dot_services_dot_campaign__draft__service__pb2.MutateCampaignDraftsResponse.SerializeToString,
),
'PromoteCampaignDraft': grpc.unary_unary_rpc_method_handler(
servicer.PromoteCampaignDraft,
request_deserializer=google_dot_ads_dot_googleads__v5_dot_proto_dot_services_dot_campaign__draft__service__pb2.PromoteCampaignDraftRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
'ListCampaignDraftAsyncErrors': grpc.unary_unary_rpc_method_handler(
servicer.ListCampaignDraftAsyncErrors,
request_deserializer=google_dot_ads_dot_googleads__v5_dot_proto_dot_services_dot_campaign__draft__service__pb2.ListCampaignDraftAsyncErrorsRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v5_dot_proto_dot_services_dot_campaign__draft__service__pb2.ListCampaignDraftAsyncErrorsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.ads.googleads.v5.services.CampaignDraftService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class CampaignDraftService(object):
"""Proto file describing the Campaign Draft service.
Service to manage campaign drafts.
"""
@staticmethod
def GetCampaignDraft(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/google.ads.googleads.v5.services.CampaignDraftService/GetCampaignDraft',
google_dot_ads_dot_googleads__v5_dot_proto_dot_services_dot_campaign__draft__service__pb2.GetCampaignDraftRequest.SerializeToString,
google_dot_ads_dot_googleads__v5_dot_proto_dot_resources_dot_campaign__draft__pb2.CampaignDraft.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def MutateCampaignDrafts(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/google.ads.googleads.v5.services.CampaignDraftService/MutateCampaignDrafts',
google_dot_ads_dot_googleads__v5_dot_proto_dot_services_dot_campaign__draft__service__pb2.MutateCampaignDraftsRequest.SerializeToString,
google_dot_ads_dot_googleads__v5_dot_proto_dot_services_dot_campaign__draft__service__pb2.MutateCampaignDraftsResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def PromoteCampaignDraft(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/google.ads.googleads.v5.services.CampaignDraftService/PromoteCampaignDraft',
google_dot_ads_dot_googleads__v5_dot_proto_dot_services_dot_campaign__draft__service__pb2.PromoteCampaignDraftRequest.SerializeToString,
google_dot_longrunning_dot_operations__pb2.Operation.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListCampaignDraftAsyncErrors(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/google.ads.googleads.v5.services.CampaignDraftService/ListCampaignDraftAsyncErrors',
google_dot_ads_dot_googleads__v5_dot_proto_dot_services_dot_campaign__draft__service__pb2.ListCampaignDraftAsyncErrorsRequest.SerializeToString,
google_dot_ads_dot_googleads__v5_dot_proto_dot_services_dot_campaign__draft__service__pb2.ListCampaignDraftAsyncErrorsResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
| 56.260638
| 185
| 0.747755
| 1,078
| 10,577
| 6.83859
| 0.148423
| 0.04924
| 0.037439
| 0.046799
| 0.759224
| 0.752713
| 0.741319
| 0.67363
| 0.660065
| 0.624797
| 0
| 0.007646
| 0.196275
| 10,577
| 187
| 186
| 56.561497
| 0.859546
| 0.125555
| 0
| 0.461538
| 1
| 0
| 0.102731
| 0.076192
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.030769
| 0.030769
| 0.161538
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
54565c9ec438b60c98b3ac2a3f3a100b445e6023
| 132
|
py
|
Python
|
flask-backend/palletes.py
|
Jroc561/Model-Tester
|
839c6ccd50eddd34255c0993a33e23a8ec2b2783
|
[
"MIT"
] | null | null | null |
flask-backend/palletes.py
|
Jroc561/Model-Tester
|
839c6ccd50eddd34255c0993a33e23a8ec2b2783
|
[
"MIT"
] | null | null | null |
flask-backend/palletes.py
|
Jroc561/Model-Tester
|
839c6ccd50eddd34255c0993a33e23a8ec2b2783
|
[
"MIT"
] | null | null | null |
#Colors to be used in the plots
color = ["#f94144","#f3722c","#f8961e","#f9c74f","#90be6d","#43aa8b","#577590"]
sns.palplot(color)
| 26.4
| 79
| 0.651515
| 18
| 132
| 4.777778
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.233333
| 0.090909
| 132
| 4
| 80
| 33
| 0.483333
| 0.227273
| 0
| 0
| 0
| 0
| 0.49
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
5485ea2017936e46ed432f4958e8115589e476e8
| 137
|
py
|
Python
|
distnet/utils/__init__.py
|
jeanollion/dlutils
|
ea419e79486e1212219dc06d39c3a4f4c305ff49
|
[
"Apache-2.0"
] | 4
|
2020-05-27T01:39:44.000Z
|
2021-09-03T18:20:33.000Z
|
distnet/utils/__init__.py
|
jeanollion/dlutils
|
ea419e79486e1212219dc06d39c3a4f4c305ff49
|
[
"Apache-2.0"
] | null | null | null |
distnet/utils/__init__.py
|
jeanollion/dlutils
|
ea419e79486e1212219dc06d39c3a4f4c305ff49
|
[
"Apache-2.0"
] | null | null | null |
name="utils"
from .callbacks import PatchedModelCheckpoint, PersistentReduceLROnPlateau
from .helpers import predict_average_flip_rotate
| 34.25
| 74
| 0.883212
| 14
| 137
| 8.428571
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.072993
| 137
| 3
| 75
| 45.666667
| 0.929134
| 0
| 0
| 0
| 0
| 0
| 0.036496
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
54b59c790cad2726c876c8f4b7fe2a8b2f9e04e0
| 90
|
py
|
Python
|
cid/__init__.py
|
zetahernandez/django-cid
|
1a41d1739ba768cecc5fbc2eede80db9a9cc2898
|
[
"BSD-3-Clause"
] | 14
|
2019-04-24T15:15:08.000Z
|
2022-03-23T17:27:14.000Z
|
cid/__init__.py
|
zetahernandez/django-cid
|
1a41d1739ba768cecc5fbc2eede80db9a9cc2898
|
[
"BSD-3-Clause"
] | 16
|
2015-04-12T23:59:32.000Z
|
2018-06-06T19:33:10.000Z
|
cid/__init__.py
|
Polyconseil/cid
|
595f64a51a71bd4a1d47eefdb56002d72629d603
|
[
"BSD-3-Clause"
] | 8
|
2015-07-03T20:37:12.000Z
|
2018-06-06T19:19:04.000Z
|
import pkg_resources
__version__ = pkg_resources.get_distribution('django-cid').version
| 18
| 66
| 0.833333
| 11
| 90
| 6.181818
| 0.727273
| 0.352941
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.077778
| 90
| 4
| 67
| 22.5
| 0.819277
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
49c29b1fea6f52343479865363875c8da0d85a89
| 796
|
py
|
Python
|
vendor/Twisted-10.0.0/doc/web/examples/xmlrpcclient.py
|
bopopescu/cc-2
|
37444fb16b36743c439b0d6c3cac2347e0cc0a94
|
[
"Apache-2.0"
] | 19
|
2015-05-01T19:59:03.000Z
|
2021-12-09T08:03:16.000Z
|
vendor/Twisted-10.0.0/doc/web/examples/xmlrpcclient.py
|
bopopescu/cc-2
|
37444fb16b36743c439b0d6c3cac2347e0cc0a94
|
[
"Apache-2.0"
] | 1
|
2020-08-02T15:40:49.000Z
|
2020-08-02T15:40:49.000Z
|
vendor/Twisted-10.0.0/doc/web/examples/xmlrpcclient.py
|
bopopescu/cc-2
|
37444fb16b36743c439b0d6c3cac2347e0cc0a94
|
[
"Apache-2.0"
] | 30
|
2015-03-25T19:40:07.000Z
|
2021-05-28T22:59:26.000Z
|
from twisted.web.xmlrpc import Proxy
from twisted.internet import reactor
def printValue(value):
print repr(value)
reactor.stop()
def printError(error):
print 'error', error
reactor.stop()
proxy = Proxy('http://advogato.org/XMLRPC')
proxy.callRemote('test.sumprod', 3, 5).addCallbacks(printValue, printError)
reactor.run()
proxy.callRemote('test.capitalize', 'moshe zadka').addCallbacks(printValue,
printError)
reactor.run()
proxy = Proxy('http://time.xmlrpc.com/RPC2')
proxy.callRemote('currentTime.getCurrentTime').addCallbacks(printValue, printError)
reactor.run()
proxy = Proxy('http://betty.userland.com/RPC2')
proxy.callRemote('examples.getStateName', 41).addCallbacks(printValue, printError)
reactor.run()
| 33.166667
| 83
| 0.701005
| 88
| 796
| 6.340909
| 0.443182
| 0.107527
| 0.229391
| 0.27957
| 0.360215
| 0.284946
| 0.200717
| 0.200717
| 0
| 0
| 0
| 0.008982
| 0.160804
| 796
| 23
| 84
| 34.608696
| 0.826347
| 0
| 0
| 0.3
| 0
| 0
| 0.217337
| 0.059045
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.1
| null | null | 0.45
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
49d079d0b22311bab4c0990e8bdea76dbd5d8f63
| 1,382
|
py
|
Python
|
test/foo.py
|
SKalt/sphinx_md_output
|
bbeaab428e497e55cd246de3ee3016ed3eb73bbd
|
[
"MIT"
] | null | null | null |
test/foo.py
|
SKalt/sphinx_md_output
|
bbeaab428e497e55cd246de3ee3016ed3eb73bbd
|
[
"MIT"
] | null | null | null |
test/foo.py
|
SKalt/sphinx_md_output
|
bbeaab428e497e55cd246de3ee3016ed3eb73bbd
|
[
"MIT"
] | null | null | null |
"""
Example docstring 1
* A thing.
* Another thing.
or
1. Item 1.
2. Item 2.
3. Item 3.
or
- Some.
- Thing.
- Different.
+------------+------------+-----------+
| Header 1 | Header 2 | Header 3 |
+============+============+===========+
| body row 1 | column 2 | column 3 |
+------------+------------+-----------+
| body row 2 | Cells may span columns.|
+------------+------------+-----------+
| body row 3 | Cells may | - Cells |
+------------+ span rows. | - contain |
| body row 4 | | - blocks. |
+------------+------------+-----------+
SIMPLE TABLE:
===== ===== ======
Inputs Output
------------ ------
A B A or B
===== ===== ======
False False False
True False True
False True True
True True True
===== ===== ======
`Docs for this project <http://packages.python.org/an_example_pypi_project/>`_
This is a statement.
.. warning::
Never, ever, use this code!
.. versionadded:: 0.0.1
It's okay to use this code.
"""
#import antigravity
def foo(a, b):
"""Does a thing.
:param a: 1
:param b: the word 'three'
:returns: 1
:rtype: int
"""
return 1
class bar(object):
""" Doesn't do anything
"""
def __init__(self, baz):
"""Init example.
:param baz: Whatever, man.
:returns: None
:rtype: None
"""
pass
a = 1
| 15.885057
| 78
| 0.437771
| 155
| 1,382
| 3.851613
| 0.522581
| 0.046901
| 0.060302
| 0.060302
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022795
| 0.269899
| 1,382
| 86
| 79
| 16.069767
| 0.56888
| 0.850217
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0.166667
| 0
| 0
| 0.833333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 4
|
49de4e5ded713b818555695bd2c5b7e644ca3360
| 91
|
py
|
Python
|
hello.py
|
gitChrisMoore/py-scaffold-1
|
31d214943c3d75bcd2f1c797d49a73ed83461c94
|
[
"MIT"
] | null | null | null |
hello.py
|
gitChrisMoore/py-scaffold-1
|
31d214943c3d75bcd2f1c797d49a73ed83461c94
|
[
"MIT"
] | null | null | null |
hello.py
|
gitChrisMoore/py-scaffold-1
|
31d214943c3d75bcd2f1c797d49a73ed83461c94
|
[
"MIT"
] | null | null | null |
def add(x, y):
return x+y
result = add(1, 2)
print(f"This is the sum: , {result}")
| 10.111111
| 37
| 0.56044
| 18
| 91
| 2.833333
| 0.777778
| 0.078431
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.029412
| 0.252747
| 91
| 8
| 38
| 11.375
| 0.720588
| 0
| 0
| 0
| 0
| 0
| 0.296703
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0.25
| 0.5
| 0.25
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 4
|
b7001c282dbe682e9146382ebfe8d532f9958c45
| 94
|
py
|
Python
|
test.py
|
PhilMcDaniel/discord-bot
|
326354c1d4c9488bf6aadf3000519fbe5eeb81a5
|
[
"MIT"
] | 2
|
2020-12-25T21:42:33.000Z
|
2020-12-27T01:09:00.000Z
|
test.py
|
PhilMcDaniel/discord-bot
|
326354c1d4c9488bf6aadf3000519fbe5eeb81a5
|
[
"MIT"
] | null | null | null |
test.py
|
PhilMcDaniel/discord-bot
|
326354c1d4c9488bf6aadf3000519fbe5eeb81a5
|
[
"MIT"
] | null | null | null |
import pyautogui
im1 = pyautogui.screenshot()
im2 = pyautogui.screenshot('my_screenshot.png')
| 23.5
| 47
| 0.797872
| 11
| 94
| 6.727273
| 0.636364
| 0.513514
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023256
| 0.085106
| 94
| 4
| 47
| 23.5
| 0.837209
| 0
| 0
| 0
| 0
| 0
| 0.178947
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
3f7a49932579d257bbaf6ab5301908160c22f1b6
| 129
|
py
|
Python
|
server/api/views/grips.py
|
yizhang7210/Syllable
|
0536763a21db9532fc73cd32d03a7732d73f4ab8
|
[
"MIT"
] | null | null | null |
server/api/views/grips.py
|
yizhang7210/Syllable
|
0536763a21db9532fc73cd32d03a7732d73f4ab8
|
[
"MIT"
] | 13
|
2018-09-29T21:34:25.000Z
|
2018-12-15T18:54:52.000Z
|
server/api/views/grips.py
|
yizhang7210/Syllable
|
0536763a21db9532fc73cd32d03a7732d73f4ab8
|
[
"MIT"
] | null | null | null |
# pylint: disable=unused-import
from grips.views.grips import GripDetailView, GripListView, \
GripSearchView, GripActionView
| 32.25
| 61
| 0.806202
| 13
| 129
| 8
| 0.846154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116279
| 129
| 3
| 62
| 43
| 0.912281
| 0.224806
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
3f8dadf5bec2d8aec5a2758a6aef720bda4519a2
| 813
|
py
|
Python
|
src/hw1/models.py
|
Phimos/PKU-Graph-Machine-Learning-2021-Fall
|
be3b8843426127f3c48a7d9e0db4a265afce31ad
|
[
"Apache-2.0"
] | null | null | null |
src/hw1/models.py
|
Phimos/PKU-Graph-Machine-Learning-2021-Fall
|
be3b8843426127f3c48a7d9e0db4a265afce31ad
|
[
"Apache-2.0"
] | null | null | null |
src/hw1/models.py
|
Phimos/PKU-Graph-Machine-Learning-2021-Fall
|
be3b8843426127f3c48a7d9e0db4a265afce31ad
|
[
"Apache-2.0"
] | 1
|
2022-01-06T04:25:02.000Z
|
2022-01-06T04:25:02.000Z
|
from typing import Optional
from torch import Tensor
from torch_geometric.nn import MessagePassing
class ProbabilisticRelationalClassifier(MessagePassing):
"""This class implements a probabilistic relational classifier.
"""
def __init__(self, aggr: Optional[str] = "mean", flow: str = "source_to_target", node_dim: int = -2):
super().__init__(aggr=aggr, flow=flow, node_dim=node_dim)
def message(self, x_j: Tensor) -> Tensor:
return super().message(x_j)
def forward(self, x: Tensor, edge_index) -> Tensor:
return self.propagate(edge_index, size=(x.size(0), x.size(0)), x=x)
def accuracy(pred: Tensor, target: Tensor) -> float:
"""Computes the accuracy of a prediction.
"""
pred = pred.argmax(dim=1)
return (pred == target).float().mean().item()
| 31.269231
| 105
| 0.683887
| 108
| 813
| 4.981481
| 0.481481
| 0.039033
| 0.022305
| 0.026022
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006015
| 0.182042
| 813
| 25
| 106
| 32.52
| 0.803008
| 0.127921
| 0
| 0
| 0
| 0
| 0.02886
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.307692
| false
| 0.153846
| 0.230769
| 0.153846
| 0.846154
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 4
|
3fb7ef40b1f754ceb6d91ed1f5f8b23fdec48ea3
| 2,968
|
py
|
Python
|
t/test_umash_fprint.py
|
backtrace-labs/umash
|
97466abbb12922839c6c101b73da2d61653b0f28
|
[
"MIT"
] | 108
|
2020-08-24T00:34:20.000Z
|
2022-03-13T08:43:22.000Z
|
t/test_umash_fprint.py
|
backtrace-labs/umash
|
97466abbb12922839c6c101b73da2d61653b0f28
|
[
"MIT"
] | 26
|
2020-08-25T06:08:05.000Z
|
2022-02-26T16:37:04.000Z
|
t/test_umash_fprint.py
|
backtrace-labs/umash
|
97466abbb12922839c6c101b73da2d61653b0f28
|
[
"MIT"
] | 7
|
2020-08-25T05:52:12.000Z
|
2022-03-05T02:31:38.000Z
|
"""
Test suite for the public fingerprinting function.
"""
from hypothesis import given, settings
import hypothesis.strategies as st
from umash import C, FFI
from umash_reference import umash, UmashKey
U64S = st.integers(min_value=0, max_value=2 ** 64 - 1)
FIELD = 2 ** 61 - 1
def repeats(min_size):
"""Repeats one byte n times."""
return st.builds(
lambda count, binary: binary * count,
st.integers(min_value=min_size, max_value=1024),
st.binary(min_size=1, max_size=1),
)
@given(
seed=U64S,
multipliers=st.lists(
st.integers(min_value=0, max_value=FIELD - 1), min_size=2, max_size=2
),
key=st.lists(
U64S,
min_size=C.UMASH_OH_PARAM_COUNT + C.UMASH_OH_TWISTING_COUNT,
max_size=C.UMASH_OH_PARAM_COUNT + C.UMASH_OH_TWISTING_COUNT,
),
data=st.binary() | repeats(1),
)
def test_public_umash_fprint(seed, multipliers, key, data):
"""Compare umash_fprint with two calls to the reference."""
expected = [
umash(UmashKey(poly=multipliers[0], oh=key), seed, data, secondary=False),
umash(UmashKey(poly=multipliers[1], oh=key), seed, data, secondary=True),
]
n_bytes = len(data)
block = FFI.new("char[]", n_bytes)
FFI.memmove(block, data, n_bytes)
params = FFI.new("struct umash_params[1]")
for i, multiplier in enumerate(multipliers):
params[0].poly[i][0] = (multiplier ** 2) % FIELD
params[0].poly[i][1] = multiplier
for i, param in enumerate(key):
params[0].oh[i] = param
actual = C.umash_fprint(params, seed, block, n_bytes)
assert [actual.hash[0], actual.hash[1]] == expected
@settings(deadline=None)
@given(
seed=U64S,
multipliers=st.lists(
st.integers(min_value=0, max_value=FIELD - 1), min_size=2, max_size=2
),
key=st.lists(
U64S,
min_size=C.UMASH_OH_PARAM_COUNT + C.UMASH_OH_TWISTING_COUNT,
max_size=C.UMASH_OH_PARAM_COUNT + C.UMASH_OH_TWISTING_COUNT,
),
byte=st.binary(min_size=1, max_size=1),
)
def test_public_umash_fprint_repeated(seed, multipliers, key, byte):
"""Compare umash_fprint with two calls to the reference, for n
repetitions of the input byte."""
params = FFI.new("struct umash_params[1]")
for i, multiplier in enumerate(multipliers):
params[0].poly[i][0] = (multiplier ** 2) % FIELD
params[0].poly[i][1] = multiplier
for i, param in enumerate(key):
params[0].oh[i] = param
for i in range(520):
data = byte * i
expected = [
umash(UmashKey(poly=multipliers[0], oh=key), seed, data, secondary=False),
umash(UmashKey(poly=multipliers[1], oh=key), seed, data, secondary=True),
]
n_bytes = len(data)
block = FFI.new("char[]", n_bytes)
FFI.memmove(block, data, n_bytes)
actual = C.umash_fprint(params, seed, block, n_bytes)
assert [actual.hash[0], actual.hash[1]] == expected
| 32.26087
| 86
| 0.644205
| 432
| 2,968
| 4.268519
| 0.199074
| 0.032538
| 0.034707
| 0.039046
| 0.760304
| 0.760304
| 0.733731
| 0.719089
| 0.693059
| 0.645336
| 0
| 0.026384
| 0.221024
| 2,968
| 91
| 87
| 32.615385
| 0.771194
| 0.074461
| 0
| 0.676056
| 0
| 0
| 0.020588
| 0
| 0
| 0
| 0
| 0
| 0.028169
| 1
| 0.042254
| false
| 0
| 0.056338
| 0
| 0.112676
| 0.056338
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
3fbb8db508ae542e19d90135b8bbebb119a30f39
| 374
|
py
|
Python
|
samples/array1d.py
|
daoshengmu/tensorflow-samples-
|
ac36657d62b142682937609e5cb1ca7893aabf0a
|
[
"MIT"
] | null | null | null |
samples/array1d.py
|
daoshengmu/tensorflow-samples-
|
ac36657d62b142682937609e5cb1ca7893aabf0a
|
[
"MIT"
] | null | null | null |
samples/array1d.py
|
daoshengmu/tensorflow-samples-
|
ac36657d62b142682937609e5cb1ca7893aabf0a
|
[
"MIT"
] | null | null | null |
import numpy as np
import tensorflow as tf
tensor_1d = np.array([1.3, 1, 4.0, 23.99])
tf_tensor = tf.convert_to_tensor(tensor_1d, dtype=tf.float64)
print tensor_1d
print tensor_1d[2]
print tensor_1d.ndim
print tensor_1d.shape
print tensor_1d.dtype
with tf.Session() as sess:
print sess.run(tf_tensor)
print sess.run(tf_tensor[0])
print sess.run(tf_tensor[2])
| 22
| 61
| 0.748663
| 71
| 374
| 3.760563
| 0.380282
| 0.209738
| 0.243446
| 0.157303
| 0.224719
| 0
| 0
| 0
| 0
| 0
| 0
| 0.065421
| 0.141711
| 374
| 16
| 62
| 23.375
| 0.766355
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.153846
| null | null | 0.615385
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
3fd1fd74c454a8b929308347654bd43534a82fc8
| 148
|
py
|
Python
|
api/source/testing/cases/home_case.py
|
1pkg/ReRe
|
83f77d2cece0fb5f6d7b86a395fcca7d4e16459f
|
[
"MIT"
] | 1
|
2019-12-17T10:31:48.000Z
|
2019-12-17T10:31:48.000Z
|
api/source/testing/cases/home_case.py
|
c-pkg/ReRe
|
83f77d2cece0fb5f6d7b86a395fcca7d4e16459f
|
[
"MIT"
] | null | null | null |
api/source/testing/cases/home_case.py
|
c-pkg/ReRe
|
83f77d2cece0fb5f6d7b86a395fcca7d4e16459f
|
[
"MIT"
] | 1
|
2019-04-29T08:19:36.000Z
|
2019-04-29T08:19:36.000Z
|
from .base_case import BaseCase
from actions import Home
class HomeCase(BaseCase):
def test_home_result(self):
return NotImplemented
| 16.444444
| 31
| 0.756757
| 19
| 148
| 5.736842
| 0.789474
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.195946
| 148
| 8
| 32
| 18.5
| 0.915966
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.4
| 0.2
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
|
0
| 4
|
3fe210aa3b197861f72d24f6b343469480aa6da8
| 68
|
py
|
Python
|
eva01/__init__.py
|
FlinnkDark/eva-01-bot
|
a98190b5d6fecf0a73abf646ca6d147e40d273ab
|
[
"MIT"
] | null | null | null |
eva01/__init__.py
|
FlinnkDark/eva-01-bot
|
a98190b5d6fecf0a73abf646ca6d147e40d273ab
|
[
"MIT"
] | null | null | null |
eva01/__init__.py
|
FlinnkDark/eva-01-bot
|
a98190b5d6fecf0a73abf646ca6d147e40d273ab
|
[
"MIT"
] | null | null | null |
from dotenv import load_dotenv
load_dotenv()
__version__ = "0.1.0"
| 13.6
| 30
| 0.764706
| 11
| 68
| 4.181818
| 0.636364
| 0.434783
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.050847
| 0.132353
| 68
| 4
| 31
| 17
| 0.728814
| 0
| 0
| 0
| 0
| 0
| 0.073529
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
3fe64936859950a220bae84890c6a32af6dc90b5
| 10,639
|
py
|
Python
|
gs_quant/analytics/processors/utility_processors.py
|
mlize/gs-quant
|
13aba5c362f4f9f8a78ca9288c5a3e026160ce55
|
[
"Apache-2.0"
] | 2
|
2021-06-22T12:14:38.000Z
|
2021-06-23T15:51:08.000Z
|
gs_quant/analytics/processors/utility_processors.py
|
mlize/gs-quant
|
13aba5c362f4f9f8a78ca9288c5a3e026160ce55
|
[
"Apache-2.0"
] | null | null | null |
gs_quant/analytics/processors/utility_processors.py
|
mlize/gs-quant
|
13aba5c362f4f9f8a78ca9288c5a3e026160ce55
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2019 Goldman Sachs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
from typing import Optional
import pandas as pd
from pandas import Series
from gs_quant.analytics.core.processor import BaseProcessor, DataCoordinateOrProcessor, DateOrDatetimeOrRDate
from gs_quant.analytics.core.processor_result import ProcessorResult
class LastProcessor(BaseProcessor):
def __init__(self,
a: DataCoordinateOrProcessor,
start: Optional[DateOrDatetimeOrRDate] = None,
end: Optional[DateOrDatetimeOrRDate] = None):
""" LastProcessor returns the last value of the series
:param a: DataCoordinate or BaseProcessor for the first coordinate
:param start: start date or time used in the underlying data query
:param end: end date or time used in the underlying data query
"""
super().__init__()
# coordinates
self.children['a'] = a
# datetime
self.start = start
self.end = end
def process(self) -> None:
""" Calculate the result and store it as the processor value """
a_data = self.children_data.get('a')
if isinstance(a_data, ProcessorResult):
if a_data.success and isinstance(a_data.data, Series):
self.value = ProcessorResult(True, pd.Series(a_data.data[-1:]))
def get_plot_expression(self):
pass
class AppendProcessor(BaseProcessor):
def __init__(self,
a: DataCoordinateOrProcessor,
b: DataCoordinateOrProcessor,
*,
start: Optional[DateOrDatetimeOrRDate] = None,
end: Optional[DateOrDatetimeOrRDate] = None):
""" AppendProcessor appends both a and b data series into one series
:param a: DataCoordinate or BaseProcessor for the first series
:param b: DataCoordinate or BaseProcessor for the second series
:param start: start date or time used in the underlying data query
:param end: end date or time used in the underlying data query
"""
super().__init__()
# coordinates
self.children['a'] = a
self.children['b'] = b
# datetime
self.start = start
self.end = end
def process(self) -> None:
a_data = self.children_data.get('a')
b_data = self.children_data.get('b')
if isinstance(a_data, ProcessorResult) and isinstance(b_data, ProcessorResult):
if a_data.success and b_data.success:
result = a_data.data.append(b_data.data)
self.value = ProcessorResult(True, result)
else:
self.value = ProcessorResult(False, "Processor does not have A and B data yet")
else:
self.value = ProcessorResult(False, "Processor does not have A and B data yet")
def get_plot_expression(self):
pass
class AdditionProcessor(BaseProcessor):
def __init__(self,
a: DataCoordinateOrProcessor,
*,
b: Optional[DataCoordinateOrProcessor] = None,
start: Optional[DateOrDatetimeOrRDate] = None,
end: Optional[DateOrDatetimeOrRDate] = None,
addend: Optional[float] = None):
""" AdditionProcessor adds two series or an addend to a series
:param a: DataCoordinate or BaseProcessor for the first series
:param b: DataCoordinate or BaseProcessor for the second series to add to the first
:param start: start date or time used in the underlying data query
:param end: end date or time used in the underlying data query
:param addend: number to add to all values in the series
"""
super().__init__()
# coordinates
self.children['a'] = a
self.children['b'] = b
# datetime
self.start = start
self.end = end
self.addend = addend
def process(self):
a_data = self.children_data.get('a')
if isinstance(a_data, ProcessorResult):
if not a_data.success:
self.value = a_data
return
if self.addend:
value = a_data.data.add(self.addend)
self.value = ProcessorResult(True, value)
return
b_data = self.children_data.get('b')
if isinstance(b_data, ProcessorResult):
if b_data.success:
value = a_data.data.add(b_data.data)
self.value = ProcessorResult(True, value)
else:
self.value = ProcessorResult(True, b_data.data)
def get_plot_expression(self):
pass
class SubtractionProcessor(BaseProcessor):
def __init__(self,
a: DataCoordinateOrProcessor,
b: Optional[DataCoordinateOrProcessor] = None,
start: Optional[DateOrDatetimeOrRDate] = None,
end: Optional[DateOrDatetimeOrRDate] = None,
subtrahend: Optional[float] = None):
""" SubtractionProcessor subtract two series or a subtrahend to a series
:param a: DataCoordinate or BaseProcessor for the first series
:param b: DataCoordinate or BaseProcessor for the second series to subtract to the first
:param start: start date or time used in the underlying data query
:param end: end date or time used in the underlying data query
:param subtrahend: number to subtract from all values in the series
"""
super().__init__()
# coordinates
self.children['a'] = a
self.children['b'] = b
# datetime
self.start = start
self.end = end
self.subtrahend = subtrahend
def process(self):
a_data = self.children_data.get('a')
if isinstance(a_data, ProcessorResult):
if not a_data.success:
self.value = a_data
return
if self.subtrahend:
value = a_data.data.sub(self.subtrahend)
self.value = ProcessorResult(True, value)
return
b_data = self.children_data.get('b')
if isinstance(b_data, ProcessorResult):
if b_data.success:
value = a_data.data.sub(b_data.data)
self.value = ProcessorResult(True, value)
else:
self.value = b_data
def get_plot_expression(self):
pass
class MultiplicationProcessor(BaseProcessor):
""" Multiply scalar or series together """
def __init__(self,
a: DataCoordinateOrProcessor,
b: Optional[DataCoordinateOrProcessor] = None,
start: Optional[DateOrDatetimeOrRDate] = None,
end: Optional[DateOrDatetimeOrRDate] = None,
factor: Optional[float] = None):
""" MultiplicationProcessor multiply two series or a factor to a series
:param a: DataCoordinate or BaseProcessor for the first series
:param b: DataCoordinate or BaseProcessor for the second series to multiply to the first
:param start: start date or time used in the underlying data query
:param end: end date or time used in the underlying data query
:param factor: number to multiply all values in the series
"""
super().__init__()
# coordinates
self.children['a'] = a
self.children['b'] = b
# datetime
self.start = start
self.end = end
self.factor = factor
def process(self):
a_data = self.children_data.get('a')
if isinstance(a_data, ProcessorResult):
if not a_data.success:
self.value = a_data
return
if self.factor:
value = a_data.data.mul(self.factor)
self.value = ProcessorResult(True, value)
return
b_data = self.children_data.get('b')
if isinstance(b_data, ProcessorResult):
if b_data.success:
value = a_data.data.mul(b_data.data)
self.value = ProcessorResult(True, value)
else:
self.value = b_data
def get_plot_expression(self):
pass
class DivisionProcessor(BaseProcessor):
def __init__(self,
a: DataCoordinateOrProcessor,
b: Optional[DataCoordinateOrProcessor] = None,
start: Optional[DateOrDatetimeOrRDate] = None,
end: Optional[DateOrDatetimeOrRDate] = None,
dividend: Optional[float] = None):
""" DivisionProcessor divides two series or divides a dividend to a series
:param a: DataCoordinate or BaseProcessor for the first series
:param b: DataCoordinate or BaseProcessor for the second series to multiply to the first
:param start: start date or time used in the underlying data query
:param end: end date or time used in the underlying data query
:param dividend: number to divide all values in the series
"""
super().__init__()
# coordinates
self.children['a'] = a
self.children['b'] = b
# datetime
self.start = start
self.end = end
self.dividend = dividend
def process(self):
a_data = self.children_data.get('a')
if isinstance(a_data, ProcessorResult):
if not a_data.success:
self.value = a_data
return
if self.dividend:
value = a_data.data.div(self.dividend)
self.value = ProcessorResult(True, value)
return
b_data = self.children_data.get('b')
if isinstance(b_data, ProcessorResult):
if b_data.success:
value = a_data.data.div(b_data.data)
self.value = ProcessorResult(True, value)
else:
self.value = b_data
def get_plot_expression(self):
pass
| 36.813149
| 109
| 0.602312
| 1,199
| 10,639
| 5.235196
| 0.130108
| 0.026286
| 0.020711
| 0.026764
| 0.75227
| 0.734587
| 0.720089
| 0.683448
| 0.677553
| 0.63183
| 0
| 0.001252
| 0.324373
| 10,639
| 288
| 110
| 36.940972
| 0.872009
| 0.280665
| 0
| 0.777143
| 0
| 0
| 0.013942
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.102857
| false
| 0.034286
| 0.028571
| 0
| 0.211429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
3feee17143e3897f855eecfb6659a58b60f41d65
| 50
|
py
|
Python
|
clean_tichu/play.py
|
lukaspestalozzi/Master_Semester_Project
|
4e71d4034ae3f5e7efa0864b48c6fd4d876fef4e
|
[
"MIT"
] | null | null | null |
clean_tichu/play.py
|
lukaspestalozzi/Master_Semester_Project
|
4e71d4034ae3f5e7efa0864b48c6fd4d876fef4e
|
[
"MIT"
] | null | null | null |
clean_tichu/play.py
|
lukaspestalozzi/Master_Semester_Project
|
4e71d4034ae3f5e7efa0864b48c6fd4d876fef4e
|
[
"MIT"
] | null | null | null |
"""
Starts a game against the computer
"""
# TODO
| 10
| 34
| 0.66
| 7
| 50
| 4.714286
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 50
| 5
| 35
| 10
| 0.825
| 0.8
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0.2
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
3ff44f9587bf13811b4db4fa081bc602613575db
| 342
|
py
|
Python
|
raster/exceptions.py
|
bpneumann/django-raster
|
74daf9d396f2332a2cd83723b7330e6b10d73b1c
|
[
"BSD-3-Clause"
] | null | null | null |
raster/exceptions.py
|
bpneumann/django-raster
|
74daf9d396f2332a2cd83723b7330e6b10d73b1c
|
[
"BSD-3-Clause"
] | null | null | null |
raster/exceptions.py
|
bpneumann/django-raster
|
74daf9d396f2332a2cd83723b7330e6b10d73b1c
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import unicode_literals
from django.core.exceptions import SuspiciousOperation
class RasterException(SuspiciousOperation):
"""Something raster related went wrong."""
class RasterAlgebraException(SuspiciousOperation):
"""Raster Algebra Evaluation Failed."""
class RasterAggregationException(Exception):
pass
| 21.375
| 54
| 0.80117
| 30
| 342
| 8.966667
| 0.766667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125731
| 342
| 15
| 55
| 22.8
| 0.899666
| 0.204678
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.166667
| 0.333333
| 0
| 0.833333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 4
|
b200bb556aa3dee453459f1ce18d630933e951dc
| 160
|
py
|
Python
|
entity_emailer/__init__.py
|
wesleykendall/django-entity-emailer
|
60d078a12cccc4912d0a2aab8c2d9710d9241f22
|
[
"MIT"
] | null | null | null |
entity_emailer/__init__.py
|
wesleykendall/django-entity-emailer
|
60d078a12cccc4912d0a2aab8c2d9710d9241f22
|
[
"MIT"
] | null | null | null |
entity_emailer/__init__.py
|
wesleykendall/django-entity-emailer
|
60d078a12cccc4912d0a2aab8c2d9710d9241f22
|
[
"MIT"
] | null | null | null |
# flake8: noqa
from .version import __version__
from .utils import get_medium, get_admin_source
default_app_config = 'entity_emailer.apps.EntityEmailerConfig'
| 26.666667
| 62
| 0.8375
| 21
| 160
| 5.904762
| 0.809524
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006944
| 0.1
| 160
| 5
| 63
| 32
| 0.854167
| 0.075
| 0
| 0
| 0
| 0
| 0.267123
| 0.267123
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
b20e6aa55d8883897b82b808929b5643375993e3
| 146
|
py
|
Python
|
swag_auth/box/urls.py
|
LikaloLLC/django-swag-auth
|
06fd027beca240ff50567a3be4bedee2a7e40a97
|
[
"BSD-3-Clause"
] | null | null | null |
swag_auth/box/urls.py
|
LikaloLLC/django-swag-auth
|
06fd027beca240ff50567a3be4bedee2a7e40a97
|
[
"BSD-3-Clause"
] | 6
|
2021-05-10T13:11:24.000Z
|
2021-09-08T13:35:46.000Z
|
swag_auth/box/urls.py
|
LikaloLLC/django-swag-auth
|
06fd027beca240ff50567a3be4bedee2a7e40a97
|
[
"BSD-3-Clause"
] | 2
|
2021-04-29T20:08:21.000Z
|
2021-11-17T19:21:42.000Z
|
from swag_auth.oauth2.urls import default_urlpatterns
from .connectors import BoxConnector
conn_urlpatterns = default_urlpatterns(BoxConnector)
| 24.333333
| 53
| 0.869863
| 17
| 146
| 7.235294
| 0.647059
| 0.292683
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007519
| 0.089041
| 146
| 5
| 54
| 29.2
| 0.917293
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
b74d97b93ffe88ada08a24d3a029d2b026a655c9
| 271
|
py
|
Python
|
dianping/entrypoint.py
|
GeoLibra/spiders
|
4c1611f7356c8aa7be4f280af27efe0b83cf0a99
|
[
"MIT"
] | null | null | null |
dianping/entrypoint.py
|
GeoLibra/spiders
|
4c1611f7356c8aa7be4f280af27efe0b83cf0a99
|
[
"MIT"
] | null | null | null |
dianping/entrypoint.py
|
GeoLibra/spiders
|
4c1611f7356c8aa7be4f280af27efe0b83cf0a99
|
[
"MIT"
] | null | null | null |
from scrapy.cmdline import execute
# 第三个参数是spider的名字
# execute(['scrapy','crawl','dpSpider'])
# 续爬模式,会自动生成一个crawls文件夹,用于存放断点文件
# execute('scrapy crawl dpSpider -s JOBDIR=crawls/dpSpider'.split())
'''
-L WARNING 去掉提示
'''
# 非续爬模式
execute('scrapy crawl dpSpider'.split())
| 24.636364
| 68
| 0.734317
| 31
| 271
| 6.419355
| 0.612903
| 0.19598
| 0.271357
| 0.39196
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103321
| 271
| 10
| 69
| 27.1
| 0.81893
| 0.583026
| 0
| 0
| 0
| 0
| 0.256098
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
b788d0b8757cfc8b50900518b8f3bc1570378617
| 372
|
py
|
Python
|
tests/cj2020/r1a/square_dance_test.py
|
marccarre/google-code-jam
|
3dbae59dff2055e3c660edea808d421a2210488c
|
[
"Apache-2.0"
] | null | null | null |
tests/cj2020/r1a/square_dance_test.py
|
marccarre/google-code-jam
|
3dbae59dff2055e3c660edea808d421a2210488c
|
[
"Apache-2.0"
] | null | null | null |
tests/cj2020/r1a/square_dance_test.py
|
marccarre/google-code-jam
|
3dbae59dff2055e3c660edea808d421a2210488c
|
[
"Apache-2.0"
] | null | null | null |
from pytest import main
from cj2020.r1a.square_dance import interest_level
def test_interest_level():
assert interest_level([[15]]) == 15
assert interest_level([
[1, 1, 1],
[1, 2, 1],
[1, 1, 1],
]) == 16
assert interest_level([[3, 1, 2]]) == 14
assert interest_level([[1, 2, 3]]) == 14
if __name__ == '__main__':
main()
| 20.666667
| 50
| 0.575269
| 52
| 372
| 3.807692
| 0.403846
| 0.393939
| 0.383838
| 0.20202
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109091
| 0.260753
| 372
| 17
| 51
| 21.882353
| 0.610909
| 0
| 0
| 0.153846
| 0
| 0
| 0.021505
| 0
| 0
| 0
| 0
| 0
| 0.307692
| 1
| 0.076923
| true
| 0
| 0.153846
| 0
| 0.230769
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
b79074284172cfd1e5f94e0a8ff3a8dc82b3c359
| 53
|
py
|
Python
|
tutorials/eboutique/microservices/product/src/commands/__init__.py
|
bhardwajRahul/minos-python
|
bad7a280ad92680abdeab01d1214688279cf6316
|
[
"MIT"
] | 247
|
2022-01-24T14:55:30.000Z
|
2022-03-25T12:06:17.000Z
|
tutorials/eboutique/microservices/product/src/commands/__init__.py
|
bhardwajRahul/minos-python
|
bad7a280ad92680abdeab01d1214688279cf6316
|
[
"MIT"
] | 168
|
2022-01-24T14:54:31.000Z
|
2022-03-31T09:31:09.000Z
|
tutorials/eboutique/microservices/product/src/commands/__init__.py
|
bhardwajRahul/minos-python
|
bad7a280ad92680abdeab01d1214688279cf6316
|
[
"MIT"
] | 21
|
2022-02-06T17:25:58.000Z
|
2022-03-27T04:50:29.000Z
|
from .services import (
ProductCommandService,
)
| 13.25
| 26
| 0.735849
| 4
| 53
| 9.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.188679
| 53
| 3
| 27
| 17.666667
| 0.906977
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
b7a4d0e9ae414d11f65f91781f492766caf1fefd
| 1,156
|
py
|
Python
|
flask_zs/mixins.py
|
codeif/flask-zs
|
33ea4dbf97edced895e9a6eac7cbfeb6a659f6cb
|
[
"MIT"
] | 5
|
2019-12-19T09:30:20.000Z
|
2022-01-07T17:53:52.000Z
|
flask_zs/mixins.py
|
codeif/flask-zs
|
33ea4dbf97edced895e9a6eac7cbfeb6a659f6cb
|
[
"MIT"
] | null | null | null |
flask_zs/mixins.py
|
codeif/flask-zs
|
33ea4dbf97edced895e9a6eac7cbfeb6a659f6cb
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from sqlalchemy import Boolean, Column, DateTime, String
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.ext.hybrid import hybrid_property
from werkzeug.security import check_password_hash, generate_password_hash
class TimestampMixin:
@declared_attr
def created_at(cls):
return Column(DateTime, default=datetime.now)
@declared_attr
def updated_at(cls):
return Column(DateTime, default=datetime.now, onupdate=datetime.now)
class LoginMixin:
@declared_attr
def _password(cls):
return Column("password", String(191), comment="login password")
@declared_attr
def login_allowed(cls):
return Column(Boolean, server_default="0")
@hybrid_property
def password(self):
return self._password
# raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, value):
self._password = generate_password_hash(value)
def check_password(self, password):
if not self._password:
return False
return check_password_hash(self._password, password)
| 28.9
| 76
| 0.724913
| 137
| 1,156
| 5.934307
| 0.357664
| 0.073801
| 0.073801
| 0.04182
| 0.105781
| 0.105781
| 0.105781
| 0.105781
| 0
| 0
| 0
| 0.004324
| 0.199827
| 1,156
| 39
| 77
| 29.641026
| 0.874595
| 0.051903
| 0
| 0.137931
| 0
| 0
| 0.021024
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.241379
| false
| 0.37931
| 0.172414
| 0.172414
| 0.724138
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 4
|
b7c7277838b42dde0f52d3fb79df2a0cf8051f5f
| 148
|
py
|
Python
|
gala/integrate/pyintegrators/__init__.py
|
akeemlh/gala
|
0fdaf9159bccc59af2a3525f2926e04501754f48
|
[
"MIT"
] | 86
|
2016-05-19T21:58:43.000Z
|
2022-03-22T14:56:37.000Z
|
gala/integrate/pyintegrators/__init__.py
|
akeemlh/gala
|
0fdaf9159bccc59af2a3525f2926e04501754f48
|
[
"MIT"
] | 170
|
2016-06-27T14:10:26.000Z
|
2022-03-10T22:52:39.000Z
|
gala/integrate/pyintegrators/__init__.py
|
akeemlh/gala
|
0fdaf9159bccc59af2a3525f2926e04501754f48
|
[
"MIT"
] | 66
|
2016-09-13T07:31:29.000Z
|
2022-03-08T15:08:45.000Z
|
from .dopri853 import DOPRI853Integrator
from .rk5 import RK5Integrator
from .leapfrog import LeapfrogIntegrator
from .ruth4 import Ruth4Integrator
| 29.6
| 40
| 0.864865
| 16
| 148
| 8
| 0.625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075758
| 0.108108
| 148
| 4
| 41
| 37
| 0.893939
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
b7ce9c0c17443def7a3240f385a11fa01b341d81
| 4,134
|
py
|
Python
|
tests/test_class_compara.py
|
thobiast/fundosbr
|
997a0fe6439aa89b2d7884f31bed8730f6d1c525
|
[
"MIT"
] | 7
|
2020-12-23T17:01:14.000Z
|
2021-05-31T12:20:10.000Z
|
tests/test_class_compara.py
|
thobiast/fundosbr
|
997a0fe6439aa89b2d7884f31bed8730f6d1c525
|
[
"MIT"
] | 4
|
2020-12-18T17:18:08.000Z
|
2021-05-24T17:36:37.000Z
|
tests/test_class_compara.py
|
thobiast/fundosbr
|
997a0fe6439aa89b2d7884f31bed8730f6d1c525
|
[
"MIT"
] | 2
|
2021-05-31T12:20:11.000Z
|
2022-02-07T14:38:59.000Z
|
# -*- coding: utf-8 -*-
"""Test Compara class."""
import pytest
from unittest.mock import patch, Mock
from io import StringIO
import pandas as pd
from fundosbr import fundosbr
@pytest.fixture
def df_informe():
data_csv = StringIO(
"""CNPJ_FUNDO;DT_COMPTC;VL_TOTAL;VL_QUOTA;VL_PATRIM_LIQ;CAPTC_DIA;RESG_DIA;NR_COTST
11.000.000/0000-00;2020-02-01;1234.51;10.00000;1111111113.61;1.00;0.00;10
11.000.000/0000-00;2020-02-03;1234.52;12.00000;1111111113.62;2.00;0.00;11
11.000.000/0000-00;2020-02-15;1234.53;14.00000;1111111113.63;0.00;0.00;12
11.000.000/0000-00;2020-02-22;1234.54;12.00000;1111111113.64;4.00;1.00;13
11.000.000/0000-00;2020-02-23;1234.55;16.00000;1111111113.65;1.00;2.00;14
11.000.000/0000-00;2020-03-02;1234.51;10.00000;1111111113.61;1.00;0.00;15
11.000.000/0000-00;2020-03-10;1234.51;12.00000;1111111113.61;1.00;1.00;16
11.000.000/0000-00;2020-03-12;1234.51;16.00000;1111111113.61;1.00;0.00;17
11.000.000/0000-00;2020-03-29;1234.51;18.00000;1111111113.61;1.00;0.00;18
11.000.000/0000-00;2020-04-01;1234.51;16.00000;1111111113.61;0.00;0.00;17
11.000.000/0000-00;2020-04-12;1234.51;18.00000;1111111113.61;0.00;4.00;19
11.000.000/0000-00;2020-04-18;1234.51;22.00000;1111111113.61;1.00;0.00;21
11.000.000/0000-00;2020-04-30;1234.51;28.00000;1111111113.61;1.00;0.00;25
22.000.000/0000-00;2020-02-01;1234.51;20.00000;1111111113.61;1.00;0.00;10
22.000.000/0000-00;2020-02-03;1234.52;22.00000;1111111113.62;2.00;0.00;11
22.000.000/0000-00;2020-02-15;1234.53;24.00000;1111111113.63;0.00;0.00;12
22.000.000/0000-00;2020-02-22;1234.54;22.00000;1111111113.64;4.00;1.00;13
22.000.000/0000-00;2020-02-23;1234.55;26.00000;1111111113.65;1.00;2.00;14
22.000.000/0000-00;2020-03-02;1234.51;20.00000;1111111113.61;1.00;0.00;15
22.000.000/0000-00;2020-03-10;1234.51;22.00000;1111111113.61;1.00;1.00;16
22.000.000/0000-00;2020-03-12;1234.51;26.00000;1111111113.61;1.00;0.00;17
22.000.000/0000-00;2020-03-29;1234.51;28.00000;1111111113.61;1.00;0.00;18
22.000.000/0000-00;2020-04-01;1234.51;26.00000;1111111113.61;0.00;0.00;17
22.000.000/0000-00;2020-04-12;1234.51;28.00000;1111111113.61;0.00;4.00;19
22.000.000/0000-00;2020-04-18;1234.51;22.00000;1111111113.61;1.00;0.00;21
22.000.000/0000-00;2020-04-30;1234.51;12.00000;1111111113.61;1.00;0.00;25"""
)
df = pd.read_csv(
data_csv,
sep=";",
encoding="ISO-8859-1",
index_col=["CNPJ_FUNDO", "DT_COMPTC"],
parse_dates=True,
)
return df
def test_calc_rentabilidade_periodo(df_informe):
expected_result = pd.DataFrame(
{"Rentabilidade": [180.0, -40.0]},
index=["11.000.000/0000-00", "22.000.000/0000-00"],
)
expected_result.index.name = "CNPJ_FUNDO"
fundosbr.log = Mock()
cadastral = Mock()
cadastral.fundo_social_nome = Mock(return_value="TESTE")
informe = Mock()
compara = fundosbr.Compara(cadastral, informe)
with patch.object(compara.informe, "pd_df", df_informe):
x = compara.calc_rentabilidade_periodo()
pd.testing.assert_frame_equal(x, expected_result)
def test_rentabilidade_mensal(df_informe):
expected_result = """CNPJ_FUNDO 11.000.000/0000-00 22.000.000/0000-00
Data
2020-03-31 12.50 7.69
2020-04-30 55.56 -57.14"""
fundosbr.log = Mock()
cadastral = Mock()
cadastral.fundo_social_nome = Mock(return_value="TESTE")
informe = Mock()
compara = fundosbr.Compara(cadastral, informe)
with patch.object(compara.informe, "pd_df", df_informe):
x = compara.calc_rentabilidade_mensal()
assert x.to_string(float_format="{:.2f}".format) == expected_result
def test_denom_social_cnpjs(df_informe):
nome_fundo = "meu fundo"
expected_result = df_informe.copy()
expected_result["Denominacao social"] = nome_fundo
fundosbr.log = Mock()
cadastral = Mock()
cadastral.fundo_social_nome = Mock(return_value=nome_fundo)
informe = Mock()
compara = fundosbr.Compara(cadastral, informe)
x = compara.adiciona_denom_social(df_informe)
pd.testing.assert_frame_equal(x, expected_result)
| 41.34
| 91
| 0.697146
| 779
| 4,134
| 3.612323
| 0.172015
| 0.063966
| 0.10661
| 0.127932
| 0.729211
| 0.729211
| 0.724236
| 0.691187
| 0.506397
| 0.320185
| 0
| 0.423396
| 0.125302
| 4,134
| 99
| 92
| 41.757576
| 0.354812
| 0.01016
| 0
| 0.327273
| 0
| 0
| 0.166185
| 0
| 0
| 0
| 0
| 0
| 0.054545
| 1
| 0.072727
| false
| 0
| 0.090909
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
4d00e5c7663ee47941ba969ae728dbdc78935e41
| 37,068
|
py
|
Python
|
tests/app/views/test_login.py
|
pebblecode/cirrus-buyer-frontend
|
506c45eab09fa9538c0eb05643e24feecdcca56f
|
[
"MIT"
] | null | null | null |
tests/app/views/test_login.py
|
pebblecode/cirrus-buyer-frontend
|
506c45eab09fa9538c0eb05643e24feecdcca56f
|
[
"MIT"
] | null | null | null |
tests/app/views/test_login.py
|
pebblecode/cirrus-buyer-frontend
|
506c45eab09fa9538c0eb05643e24feecdcca56f
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from __future__ import unicode_literals
from cirrus.email import send_email
from dmapiclient import HTTPError
from dmapiclient.audit import AuditTypes
from dmutils.email import generate_token
from ...helpers import BaseApplicationTest
from lxml import html
import mock
EMAIL_EMPTY_ERROR = "You must provide an email address"
EMAIL_INVALID_ERROR = "You must provide a valid email address"
EMAIL_SENT_MESSAGE = "If the email address you've entered belongs to a Digital Marketplace account, we'll send a link to reset the password." # noqa
PASSWORD_EMPTY_ERROR = "You must provide your password"
PASSWORD_INVALID_ERROR = "Passwords must be between 10 and 50 characters"
PASSWORD_MISMATCH_ERROR = "The passwords you entered do not match"
NEW_PASSWORD_EMPTY_ERROR = "You must enter a new password"
NEW_PASSWORD_CONFIRM_EMPTY_ERROR = "Please confirm your new password"
USER_CREATION_EMAIL_ERROR = "Failed to send user creation email."
PASSWORD_RESET_EMAIL_ERROR = "Failed to send password reset."
TOKEN_CREATED_BEFORE_PASSWORD_LAST_CHANGED_ERROR = "This password reset link is invalid."
USER_LINK_EXPIRED_ERROR = "The link you used to create an account may have expired."
class TestLogin(BaseApplicationTest):
def setup(self):
super(TestLogin, self).setup()
data_api_client_config = {'authenticate_user.return_value': self.user(
123, "email@email.com", 1234, 'name', 'name'
)}
self._data_api_client = mock.patch(
'app.main.views.login.data_api_client', **data_api_client_config
)
self.data_api_client_mock = self._data_api_client.start()
def teardown(self):
self._data_api_client.stop()
def test_should_show_login_page(self):
res = self.client.get("/login")
assert res.status_code == 200
assert "Log in to Inoket" in res.get_data(as_text=True)
def test_should_redirect_to_supplier_dashboard_on_supplier_login(self):
res = self.client.post("/login", data={
'email_address': 'valid@email.com',
'password': '1234567890'
})
assert res.status_code == 302
assert res.location == 'http://localhost/suppliers'
assert 'Secure;' in res.headers['Set-Cookie']
@mock.patch('app.main.views.login.data_api_client')
def test_should_redirect_to_homepage_on_buyer_login(self, data_api_client):
with self.app.app_context():
data_api_client.authenticate_user.return_value = self.user(123, "email@email.com", None, None, 'Name')
res = self.client.post("/login", data={
'email_address': 'valid@email.com',
'password': '1234567890'
})
assert res.status_code == 302
assert res.location == 'http://localhost/'
assert 'Secure;' in res.headers['Set-Cookie']
def test_should_redirect_logged_in_supplier_to_supplier_dashboard(self):
self.login_as_supplier()
res = self.client.get("/login")
assert res.status_code == 302
assert res.location == 'http://localhost/suppliers'
def test_should_redirect_logged_in_buyer_to_homepage(self):
self.login_as_buyer()
res = self.client.get("/login")
assert res.status_code == 302
assert res.location == 'http://localhost/'
def test_should_redirect_logged_in_admin_to_admin_dashboard(self):
self.login_as_admin()
res = self.client.get("/login")
assert res.status_code == 302
assert res.location == 'http://localhost/admin'
def test_should_redirect_logged_in_admin_to_next_url_if_admin_app(self):
self.login_as_admin()
res = self.client.get("/login?next=/admin/foo-bar")
assert res.status_code == 302
assert res.location == 'http://localhost/admin/foo-bar'
def test_should_redirect_logged_in_supplier_to_next_url_if_supplier_app(self):
self.login_as_supplier()
res = self.client.get("/login?next=/suppliers/foo-bar")
assert res.status_code == 302
assert res.location == 'http://localhost/suppliers/foo-bar'
def test_should_redirect_to_supplier_dashboard_if_next_url_not_supplier_app(self):
self.login_as_supplier()
res = self.client.get("/login?next=/foo-bar")
assert res.status_code == 302
assert res.location == 'http://localhost/suppliers'
def test_should_strip_whitespace_surrounding_login_email_address_field(self):
self.client.post("/login", data={
'email_address': ' valid@email.com ',
'password': '1234567890'
})
self.data_api_client_mock.authenticate_user.assert_called_with('valid@email.com', '1234567890')
def test_should_not_strip_whitespace_surrounding_login_password_field(self):
self.client.post("/login", data={
'email_address': 'valid@email.com',
'password': ' 1234567890 '
})
self.data_api_client_mock.authenticate_user.assert_called_with(
'valid@email.com', ' 1234567890 ')
def test_ok_next_url_redirects_supplier_on_login(self):
res = self.client.post("/login?next=/suppliers/bar-foo",
data={
'email_address': 'valid@email.com',
'password': '1234567890'
})
assert res.status_code == 302
assert res.location == 'http://localhost/suppliers/bar-foo'
@mock.patch('app.main.views.login.data_api_client')
def test_ok_next_url_redirects_buyer_on_login(self, data_api_client):
with self.app.app_context():
data_api_client.authenticate_user.return_value = self.user(123, "email@email.com", None, None, 'Name')
res = self.client.post("/login?next=/bar-foo",
data={
'email_address': 'valid@email.com',
'password': '1234567890'
})
assert res.status_code == 302
assert res.location == 'http://localhost/bar-foo'
def test_bad_next_url_takes_supplier_user_to_dashboard(self):
res = self.client.post("/login?next=http://badness.com",
data={
'email_address': 'valid@email.com',
'password': '1234567890'
})
assert res.status_code == 302
assert res.location == 'http://localhost/suppliers'
@mock.patch('app.main.views.login.data_api_client')
def test_bad_next_url_takes_buyer_user_to_homepage(self, data_api_client):
with self.app.app_context():
data_api_client.authenticate_user.return_value = self.user(123, "email@email.com", None, None, 'Name')
res = self.client.post("/login?next=http://badness.com",
data={
'email_address': 'valid@email.com',
'password': '1234567890'
})
assert res.status_code == 302
assert res.location == 'http://localhost/'
def test_should_have_cookie_on_redirect(self):
with self.app.app_context():
self.app.config['SESSION_COOKIE_DOMAIN'] = '127.0.0.1'
self.app.config['SESSION_COOKIE_SECURE'] = True
res = self.client.post("/login", data={
'email_address': 'valid@email.com',
'password': '1234567890'
})
cookie_value = self.get_cookie_by_name(res, 'dm_session')
assert cookie_value['dm_session'] is not None
assert cookie_value['Secure; HttpOnly; Path'] == '/'
assert cookie_value["Domain"] == "127.0.0.1"
def test_should_redirect_to_login_on_logout(self):
res = self.client.get('/logout')
assert res.status_code == 302
assert res.location == 'http://localhost/login'
@mock.patch('app.main.views.login.data_api_client')
def test_should_return_a_403_for_invalid_login(self, data_api_client):
data_api_client.authenticate_user.return_value = None
res = self.client.post("/login", data={
'email_address': 'valid@email.com',
'password': '1234567890'
})
assert self.strip_all_whitespace("Make sure you've entered the right email address and password") \
in self.strip_all_whitespace(res.get_data(as_text=True))
assert res.status_code == 403
def test_should_be_validation_error_if_no_email_or_password(self):
res = self.client.post("/login", data={})
content = self.strip_all_whitespace(res.get_data(as_text=True))
assert res.status_code == 400
assert self.strip_all_whitespace(EMAIL_EMPTY_ERROR) in content
assert self.strip_all_whitespace(PASSWORD_EMPTY_ERROR) in content
def test_should_be_validation_error_if_invalid_email(self):
res = self.client.post("/login", data={
'email_address': 'invalid',
'password': '1234567890'
})
content = self.strip_all_whitespace(res.get_data(as_text=True))
assert res.status_code == 400
assert self.strip_all_whitespace(EMAIL_INVALID_ERROR) in content
class TestResetPassword(BaseApplicationTest):
_user = None
def setup(self):
super(TestResetPassword, self).setup()
data_api_client_config = {'get_user.return_value': self.user(
123, "email@email.com", 1234, 'name', 'Name'
)}
self._user = {
"user": 123,
"email": 'email@email.com',
}
self._data_api_client = mock.patch(
'app.main.views.login.data_api_client', **data_api_client_config
)
self.data_api_client_mock = self._data_api_client.start()
def teardown(self):
self._data_api_client.stop()
def test_email_should_not_be_empty(self):
res = self.client.post("/reset-password", data={})
content = self.strip_all_whitespace(res.get_data(as_text=True))
assert res.status_code == 400
assert self.strip_all_whitespace(EMAIL_EMPTY_ERROR) in content
def test_email_should_be_valid(self):
res = self.client.post("/reset-password", data={
'email_address': 'invalid'
})
content = self.strip_all_whitespace(res.get_data(as_text=True))
assert res.status_code == 400
assert self.strip_all_whitespace(EMAIL_INVALID_ERROR) in content
@mock.patch('app.main.views.login.send_email')
def test_redirect_to_same_page_on_success(self, send_email):
res = self.client.post("/reset-password", data={
'email_address': 'email@email.com'
})
assert res.status_code == 302
assert res.location == 'http://localhost/reset-password'
@mock.patch('app.main.views.login.send_email')
def test_show_email_sent_message_on_success(self, send_email):
res = self.client.post("/reset-password", data={
'email_address': 'email@email.com'
}, follow_redirects=True)
assert res.status_code == 200
content = self.strip_all_whitespace(res.get_data(as_text=True))
assert self.strip_all_whitespace(EMAIL_SENT_MESSAGE) in content
@mock.patch('app.main.views.login.send_email')
def test_should_strip_whitespace_surrounding_reset_password_email_address_field(self, send_email):
self.client.post("/reset-password", data={
'email_address': ' email@email.com'
})
self.data_api_client_mock.get_user.assert_called_with(email_address='email@email.com')
def test_email_should_be_decoded_from_token(self):
with self.app.app_context():
token = generate_token(
self._user,
self.app.config['SECRET_KEY'],
self.app.config['RESET_PASSWORD_SALT'])
url = '/reset-password/{}'.format(token)
res = self.client.get(url)
assert res.status_code == 200
assert "Reset password for email@email.com" in res.get_data(as_text=True)
def test_password_should_not_be_empty(self):
with self.app.app_context():
token = generate_token(
self._user,
self.app.config['SECRET_KEY'],
self.app.config['RESET_PASSWORD_SALT'])
url = '/reset-password/{}'.format(token)
res = self.client.post(url, data={
'password': '',
'confirm_password': ''
})
assert res.status_code == 400
assert NEW_PASSWORD_EMPTY_ERROR in res.get_data(as_text=True)
assert NEW_PASSWORD_CONFIRM_EMPTY_ERROR in res.get_data(as_text=True)
def test_password_should_be_over_ten_chars_long(self):
with self.app.app_context():
token = generate_token(
self._user,
self.app.config['SECRET_KEY'],
self.app.config['RESET_PASSWORD_SALT'])
url = '/reset-password/{}'.format(token)
res = self.client.post(url, data={
'password': '123456789',
'confirm_password': '123456789'
})
assert res.status_code == 400
assert PASSWORD_INVALID_ERROR in res.get_data(as_text=True)
def test_password_should_be_under_51_chars_long(self):
with self.app.app_context():
token = generate_token(
self._user,
self.app.config['SECRET_KEY'],
self.app.config['RESET_PASSWORD_SALT'])
url = '/reset-password/{}'.format(token)
res = self.client.post(url, data={
'password':
'123456789012345678901234567890123456789012345678901',
'confirm_password':
'123456789012345678901234567890123456789012345678901'
})
assert res.status_code == 400
assert PASSWORD_INVALID_ERROR in res.get_data(as_text=True)
def test_passwords_should_match(self):
with self.app.app_context():
token = generate_token(
self._user,
self.app.config['SECRET_KEY'],
self.app.config['RESET_PASSWORD_SALT'])
url = '/reset-password/{}'.format(token)
res = self.client.post(url, data={
'password': '1234567890',
'confirm_password': '0123456789'
})
assert res.status_code == 400
assert PASSWORD_MISMATCH_ERROR in res.get_data(as_text=True)
def test_redirect_to_login_page_on_success(self):
with self.app.app_context():
token = generate_token(
self._user,
self.app.config['SECRET_KEY'],
self.app.config['RESET_PASSWORD_SALT'])
url = '/reset-password/{}'.format(token)
res = self.client.post(url, data={
'password': '1234567890',
'confirm_password': '1234567890'
})
assert res.status_code == 302
assert res.location == 'http://localhost/login'
def test_should_not_strip_whitespace_surrounding_reset_password_password_field(self):
with self.app.app_context():
token = generate_token(
self._user,
self.app.config['SECRET_KEY'],
self.app.config['RESET_PASSWORD_SALT'])
url = '/reset-password/{}'.format(token)
self.client.post(url, data={
'password': ' 1234567890',
'confirm_password': ' 1234567890'
})
self.data_api_client_mock.update_user_password.assert_called_with(
self._user.get('user'), ' 1234567890', self._user.get('email'))
@mock.patch('app.main.views.login.data_api_client')
def test_token_created_before_last_updated_password_cannot_be_used(
self, data_api_client
):
with self.app.app_context():
data_api_client.get_user.return_value = self.user(
123, "email@email.com", 1234, 'email', 'Name', is_token_valid=False
)
token = generate_token(
self._user,
self.app.config['SECRET_KEY'],
self.app.config['RESET_PASSWORD_SALT'])
url = '/reset-password/{}'.format(token)
res = self.client.post(url, data={
'password': '1234567890',
'confirm_password': '1234567890'
}, follow_redirects=True)
assert res.status_code == 200
assert TOKEN_CREATED_BEFORE_PASSWORD_LAST_CHANGED_ERROR in res.get_data(as_text=True)
@mock.patch('app.main.views.login.send_email')
def test_should_call_send_email_with_correct_params(
self, send_email
):
with self.app.app_context():
self.app.config['RESET_PASSWORD_EMAIL_SUBJECT'] = "SUBJECT"
self.app.config['RESET_PASSWORD_EMAIL_FROM'] = "EMAIL FROM"
self.app.config['RESET_PASSWORD_EMAIL_NAME'] = "EMAIL NAME"
res = self.client.post(
'/reset-password',
data={'email_address': 'email@email.com'}
)
assert res.status_code == 302
send_email.assert_called_once_with(
"email@email.com",
mock.ANY,
"SUBJECT",
"EMAIL FROM",
"EMAIL NAME",
["password-resets"]
)
@mock.patch('app.main.views.login.send_email')
def test_should_be_an_error_if_send_email_fails(
self, send_email
):
with self.app.app_context():
send_email.side_effect = Exception(Exception('API is down'))
res = self.client.post(
'/reset-password',
data={'email_address': 'email@email.com'}
)
assert res.status_code == 503
assert PASSWORD_RESET_EMAIL_ERROR in res.get_data(as_text=True)
class TestLoginFormsNotAutofillable(BaseApplicationTest):
def _forms_and_inputs_not_autofillable(
self, url, expected_title, expected_lede=None
):
response = self.client.get(url)
assert response.status_code == 200
document = html.fromstring(response.get_data(as_text=True))
page_title = document.xpath(
'//main[@id="content"]//h1/text()')[0].strip()
assert expected_title == page_title
if expected_lede:
page_lede = document.xpath(
'//main[@id="content"]//p[@class="lede"]/text()')[0].strip()
assert expected_lede == page_lede
forms = document.xpath('//main[@id="content"]//form')
for form in forms:
assert form.get('autocomplete') == "off"
non_hidden_inputs = form.xpath('//input[@type!="hidden"]')
for input in non_hidden_inputs:
if input.get('type') != 'submit':
assert input.get('autocomplete') == "off"
def test_login_form_and_inputs_not_autofillable(self):
self._forms_and_inputs_not_autofillable(
"/login",
"Log in to Inoket"
)
def test_request_password_reset_form_and_inputs_not_autofillable(self):
self._forms_and_inputs_not_autofillable(
"/reset-password",
"Reset password"
)
@mock.patch('app.main.views.login.data_api_client')
def test_reset_password_form_and_inputs_not_autofillable(
self, data_api_client
):
data_api_client.get_user.return_value = self.user(
123, "email@email.com", 1234, 'email', 'name'
)
with self.app.app_context():
token = generate_token(
{
"user": 123,
"email": 'email@email.com',
},
self.app.config['SECRET_KEY'],
self.app.config['RESET_PASSWORD_SALT'])
url = '/reset-password/{}'.format(token)
self._forms_and_inputs_not_autofillable(
url,
"Reset password",
"Reset password for email@email.com"
)
class TestBuyersCreation(BaseApplicationTest):
def test_should_get_create_buyer_form_ok(self):
res = self.client.get('/buyers/create')
assert res.status_code == 200
assert 'Create a buyer account' in res.get_data(as_text=True)
@mock.patch('app.main.views.login.send_email')
@mock.patch('app.main.views.login.data_api_client')
def test_should_be_able_to_submit_valid_email_address(self, data_api_client, send_email):
res = self.client.post(
'/buyers/create',
data={'email_address': 'valid@test.gov.uk'},
follow_redirects=True
)
assert res.status_code == 200
assert 'Activate your account' in res.get_data(as_text=True)
def test_should_raise_validation_error_for_invalid_email_address(self):
res = self.client.post(
'/buyers/create',
data={'email_address': 'not-an-email-address'},
follow_redirects=True
)
assert res.status_code == 400
data = res.get_data(as_text=True)
assert 'Create a buyer account' in data
assert 'You must provide a valid email address' in data
def test_should_raise_validation_error_for_empty_email_address(self):
res = self.client.post(
'/buyers/create',
data={},
follow_redirects=True
)
assert res.status_code == 400
data = res.get_data(as_text=True)
assert 'Create a buyer account' in data
assert 'You must provide an email address' in data
@mock.patch('app.main.views.login.data_api_client')
def test_should_show_error_page_for_unrecognised_email_domain(self, data_api_client):
data_api_client.is_email_address_with_valid_buyer_domain.return_value = False
res = self.client.post(
'/buyers/create',
data={'email_address': 'kev@ymail.com'},
follow_redirects=True
)
assert res.status_code == 400
data = res.get_data(as_text=True)
assert "You must use a public sector email address" in data
assert "The email you used doesn't belong to a recognised public sector domain." in data
@mock.patch('app.main.views.login.data_api_client')
@mock.patch('app.main.views.login.send_email')
def test_should_503_if_email_fails_to_send(self, send_email, data_api_client):
data_api_client.is_email_address_with_valid_buyer_domain.return_value = True
send_email.side_effect = Exception("Arrrgh")
res = self.client.post(
'/buyers/create',
data={'email_address': 'valid@test.gov.uk'},
follow_redirects=True
)
assert res.status_code == 503
assert USER_CREATION_EMAIL_ERROR in res.get_data(as_text=True)
@mock.patch('app.main.views.login.send_email')
@mock.patch('app.main.views.login.data_api_client')
def test_should_create_audit_event_when_email_sent(self, data_api_client, send_email):
res = self.client.post(
'/buyers/create',
data={'email_address': 'valid@test.gov.uk'},
follow_redirects=True
)
assert res.status_code == 200
data_api_client.create_audit_event.assert_called_with(audit_type=AuditTypes.invite_user,
data={'invitedEmail': 'valid@test.gov.uk'})
class TestCreateUser(BaseApplicationTest):
def _generate_token(self, email_address='test@email.com'):
return generate_token(
{
'email_address': email_address
},
self.app.config['SHARED_EMAIL_KEY'],
self.app.config['INVITE_EMAIL_SALT']
)
def test_should_be_an_error_for_invalid_token(self):
token = "1234"
res = self.client.get(
'/create-user/{}'.format(token)
)
assert res.status_code == 400
def test_should_be_an_error_for_missing_token(self):
res = self.client.get('/create-user')
assert res.status_code == 404
def test_should_be_an_error_for_missing_token_trailing_slash(self):
res = self.client.get('/create-user/')
assert res.status_code == 301
assert res.location == 'http://localhost/create-user'
@mock.patch('app.main.views.login.data_api_client')
def test_should_be_an_error_for_invalid_token_contents(self, data_api_client):
token = generate_token(
{
'this_is_not_expected': 1234
},
self.app.config['SHARED_EMAIL_KEY'],
self.app.config['INVITE_EMAIL_SALT']
)
res = self.client.get(
'/create-user/{}'.format(token)
)
assert res.status_code == 400
assert data_api_client.get_user.called is False
def test_should_be_a_bad_request_if_token_expired(self):
res = self.client.get(
'create-user/12345'
)
assert res.status_code == 400
assert USER_LINK_EXPIRED_ERROR in res.get_data(as_text=True)
@mock.patch('app.main.views.login.data_api_client')
def test_should_render_create_user_page_if_user_does_not_exist(self, data_api_client):
data_api_client.get_user.return_value = None
token = self._generate_token()
res = self.client.get(
'create-user/{}'.format(token)
)
assert res.status_code == 200
for message in [
"Create a new Digital Marketplace account",
"test@email.com",
'<input type="submit" class="button-save" value="Create account" />',
'<form autocomplete="off" action="/create-user/%s" method="POST" id="createUserForm">' % token
]:
assert message in res.get_data(as_text=True)
def test_should_be_an_error_if_invalid_token_on_submit(self):
res = self.client.post(
'/create-user/invalidtoken',
data={
'password': '123456789',
'name': 'name',
'email_address': 'valid@test.com'}
)
assert res.status_code == 400
assert USER_LINK_EXPIRED_ERROR in res.get_data(as_text=True)
assert (
'<input type="submit" class="button-save" value="Create contributor account" />'
not in res.get_data(as_text=True)
)
def test_should_be_an_error_if_missing_name_and_password(self):
token = self._generate_token()
res = self.client.post(
'/create-user/{}'.format(token),
data={}
)
assert res.status_code == 400
for message in [
"You must enter a name",
"You must enter a password"
]:
assert message in res.get_data(as_text=True)
def test_should_be_an_error_if_too_short_name_and_password(self):
token = self._generate_token()
res = self.client.post(
'/create-user/{}'.format(token),
data={
'password': "123456789",
'name': ""
}
)
assert res.status_code == 400
for message in [
"You must enter a name",
"Passwords must be between 10 and 50 characters"
]:
assert message in res.get_data(as_text=True)
def test_should_be_an_error_if_too_long_name_and_password(self):
with self.app.app_context():
token = self._generate_token()
twofiftysix = "a" * 256
fiftyone = "a" * 51
res = self.client.post(
'/create-user/{}'.format(token),
data={
'password': fiftyone,
'name': twofiftysix
}
)
assert res.status_code == 400
for message in [
"Names must be between 1 and 255 characters",
"Passwords must be between 10 and 50 characters",
"Create a new Digital Marketplace account",
"test@email.com"
]:
assert message in res.get_data(as_text=True)
@mock.patch('app.main.views.login.data_api_client')
def test_should_return_an_error_if_user_exists_and_is_a_buyer(self, data_api_client):
data_api_client.get_user.return_value = self.user(123, 'test@email.com', None, None, 'Users name')
token = self._generate_token()
res = self.client.get(
'/create-user/{}'.format(token)
)
assert res.status_code == 400
assert "Account already exists" in res.get_data(as_text=True)
@mock.patch('app.main.views.login.data_api_client')
def test_should_return_an_error_with_admin_message_if_user_is_an_admin(self, data_api_client):
data_api_client.get_user.return_value = self.user(123, 'test@email.com', None, None, 'Users name', role='admin')
token = self._generate_token()
res = self.client.get(
'/create-user/{}'.format(token)
)
assert res.status_code == 400
assert "Account already exists" in res.get_data(as_text=True)
@mock.patch('app.main.views.login.data_api_client')
def test_should_return_an_error_with_locked_message_if_user_is_locked(self, data_api_client):
data_api_client.get_user.return_value = self.user(
123,
'test@email.com',
None,
None,
'Users name',
locked=True
)
token = self._generate_token()
res = self.client.get(
'/create-user/{}'.format(token)
)
assert res.status_code == 400
assert "Your account has been locked" in res.get_data(as_text=True)
@mock.patch('app.main.views.login.data_api_client')
def test_should_return_an_error_with_inactive_message_if_user_is_not_active(self, data_api_client):
data_api_client.get_user.return_value = self.user(
123,
'test@email.com',
None,
None,
'Users name',
active=False
)
token = self._generate_token()
res = self.client.get(
'/create-user/{}'.format(token)
)
assert res.status_code == 400
assert "Your account has been deactivated" in res.get_data(as_text=True)
@mock.patch('app.main.views.login.data_api_client')
def test_should_return_an_error_if_user_is_already_registered(self, data_api_client):
data_api_client.get_user.return_value = self.user(
123,
'test@email.com',
None,
None,
'Users name'
)
token = self._generate_token()
res = self.client.get(
'/create-user/{}'.format(token),
follow_redirects=True
)
assert res.status_code == 400
assert "Account already exists" in res.get_data(as_text=True)
@mock.patch('app.main.views.login.data_api_client')
def test_should_return_an_error_if_already_registered_as_a_supplier(self, data_api_client):
self.login_as_supplier()
data_api_client.get_user.return_value = self.user(
999,
'test@email.com',
1234,
'Supplier',
'Different users name'
)
token = self._generate_token()
res = self.client.get(
'/create-user/{}'.format(token)
)
assert res.status_code == 400
assert "Your email address is already registered as an account with ‘Supplier’." in res.get_data(as_text=True)
@mock.patch('app.main.views.login.data_api_client')
def test_should_return_an_error_if_user_is_already_logged_in(self, data_api_client):
self.login_as_supplier()
data_api_client.get_user.return_value = self.user(
123,
'email@email.com',
None,
None,
'Users name'
)
token = self._generate_token()
res = self.client.get(
'/create-user/{}'.format(token)
)
assert res.status_code == 400
assert "Account already exists" in res.get_data(as_text=True)
@mock.patch('app.main.views.login.data_api_client')
def test_should_create_user_if_user_does_not_exist(self, data_api_client):
data_api_client.get_user.return_value = None
token = self._generate_token()
res = self.client.post(
'/create-user/{}'.format(token),
data={
'password': 'validpassword',
'name': 'valid name'
}
)
data_api_client.create_user.assert_called_once_with({
'role': 'buyer',
'password': 'validpassword',
'emailAddress': 'test@email.com',
'name': 'valid name'
})
assert res.status_code == 302
assert res.location == 'http://localhost/'
@mock.patch('app.main.views.login.data_api_client')
def test_should_return_an_error_if_user_exists(self, data_api_client):
data_api_client.create_user.side_effect = HTTPError(mock.Mock(status_code=409))
token = self._generate_token()
res = self.client.post(
'/create-user/{}'.format(token),
data={
'password': 'validpassword',
'name': 'valid name'
}
)
data_api_client.create_user.assert_called_once_with({
'role': 'buyer',
'password': 'validpassword',
'emailAddress': 'test@email.com',
'name': 'valid name'
})
assert res.status_code == 400
@mock.patch('app.main.views.login.data_api_client')
def test_should_strip_whitespace_surrounding_create_user_name_field(self, data_api_client):
data_api_client.get_user.return_value = None
token = self._generate_token()
self.client.post(
'/create-user/{}'.format(token),
data={
'password': 'validpassword',
'name': ' valid name '
}
)
data_api_client.create_user.assert_called_once_with({
'role': mock.ANY,
'password': 'validpassword',
'emailAddress': mock.ANY,
'name': 'valid name'
})
@mock.patch('app.main.views.login.data_api_client')
def test_should_not_strip_whitespace_surrounding_create_user_password_field(self, data_api_client):
data_api_client.get_user.return_value = None
token = self._generate_token()
self.client.post(
'/create-user/{}'.format(token),
data={
'password': ' validpassword ',
'name': 'valid name '
}
)
data_api_client.create_user.assert_called_once_with({
'role': mock.ANY,
'password': ' validpassword ',
'emailAddress': mock.ANY,
'name': 'valid name'
})
@mock.patch('app.main.views.login.data_api_client')
def test_should_be_a_503_if_api_fails(self, data_api_client):
with self.app.app_context():
data_api_client.create_user.side_effect = HTTPError("bad email")
token = self._generate_token()
res = self.client.post(
'/create-user/{}'.format(token),
data={
'password': 'validpassword',
'name': 'valid name'
}
)
assert res.status_code == 503
class TestBuyerRoleRequired(BaseApplicationTest):
def test_login_required_for_buyer_pages(self):
with self.app.app_context():
res = self.client.get('/buyers')
assert res.status_code == 302
assert res.location == 'http://localhost/login?next=%2Fbuyers'
def test_supplier_cannot_access_buyer_pages(self):
with self.app.app_context():
self.login_as_supplier()
res = self.client.get('/buyers')
assert res.status_code == 302
assert res.location == 'http://localhost/login?next=%2Fbuyers'
self.assert_flashes('buyer-role-required', expected_category='error')
@mock.patch('app.buyers.views.buyers.data_api_client')
def test_buyer_pages_ok_if_logged_in_as_buyer(self, data_api_client):
with self.app.app_context():
self.login_as_buyer()
res = self.client.get('/buyers')
page_text = res.get_data(as_text=True)
assert res.status_code == 200
assert 'buyer@email.com' in page_text
assert 'Some Buyer' in page_text
| 38.332989
| 149
| 0.608287
| 4,434
| 37,068
| 4.780334
| 0.069689
| 0.031374
| 0.058266
| 0.053784
| 0.796424
| 0.754482
| 0.722117
| 0.682676
| 0.639885
| 0.611295
| 0
| 0.0254
| 0.283074
| 37,068
| 966
| 150
| 38.372671
| 0.772192
| 0.000486
| 0
| 0.603936
| 0
| 0.00246
| 0.20671
| 0.047507
| 0
| 0
| 0
| 0
| 0.174662
| 1
| 0.093481
| false
| 0.135301
| 0.00984
| 0.00123
| 0.113161
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 4
|
4d0abeb76e5f282dfb906a47e777d0aa2ebd5457
| 289
|
py
|
Python
|
custom_packages/CustomNeuralNetworks/CustomNeuralNetworks/__init__.py
|
davidelomeo/mangroves_deep_learning
|
27ce24fe183b65f054c1d6b41417a64355cd0c9c
|
[
"MIT"
] | null | null | null |
custom_packages/CustomNeuralNetworks/CustomNeuralNetworks/__init__.py
|
davidelomeo/mangroves_deep_learning
|
27ce24fe183b65f054c1d6b41417a64355cd0c9c
|
[
"MIT"
] | null | null | null |
custom_packages/CustomNeuralNetworks/CustomNeuralNetworks/__init__.py
|
davidelomeo/mangroves_deep_learning
|
27ce24fe183b65f054c1d6b41417a64355cd0c9c
|
[
"MIT"
] | null | null | null |
from .unet import * # noqa
from .vgg19_unet import * # noqa
from .resnet50_unet import * # noqa
from pkg_resources import get_distribution, DistributionNotFound
try:
__version__ = get_distribution(__name__).version
except DistributionNotFound:
# package is not installed
pass
| 26.272727
| 64
| 0.775087
| 34
| 289
| 6.205882
| 0.588235
| 0.14218
| 0.199052
| 0.255924
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016598
| 0.16609
| 289
| 10
| 65
| 28.9
| 0.858921
| 0.134948
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.125
| 0.5
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
|
0
| 4
|
4d0e23387fbb404605ab4d3dc5a65e213953dc6e
| 198
|
py
|
Python
|
zebrok/exceptions.py
|
kaypee90/zebrok
|
d3d855c8bac98c6e9ff92541f6aff4e0fe2b57f5
|
[
"MIT"
] | 13
|
2021-02-11T10:26:02.000Z
|
2021-09-08T23:39:44.000Z
|
zebrok/exceptions.py
|
kaypee90/zebrok
|
d3d855c8bac98c6e9ff92541f6aff4e0fe2b57f5
|
[
"MIT"
] | 11
|
2021-02-03T14:41:33.000Z
|
2022-03-17T03:31:40.000Z
|
zebrok/exceptions.py
|
kaypee90/zebrok
|
d3d855c8bac98c6e9ff92541f6aff4e0fe2b57f5
|
[
"MIT"
] | 1
|
2021-02-13T02:05:43.000Z
|
2021-02-13T02:05:43.000Z
|
class ZebrokNotImplementedError(NotImplementedError):
"""
Custom exception to be thrown when a derived class
fails to implement an abstract method of a base class
"""
pass
| 28.285714
| 58
| 0.70202
| 23
| 198
| 6.043478
| 0.826087
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.252525
| 198
| 6
| 59
| 33
| 0.939189
| 0.525253
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 4
|
4d5de301fbea67e47465fb3b23a30026abdf95cf
| 228
|
py
|
Python
|
sebastian/lilypond/write_lilypond.py
|
aisipos/sebastian
|
4e460c3aeab332b45c74fe78e65e76ec87d5cfa8
|
[
"MIT"
] | 47
|
2015-01-07T16:25:27.000Z
|
2022-03-07T07:21:27.000Z
|
sebastian/lilypond/write_lilypond.py
|
EQ4/sebastian
|
4e460c3aeab332b45c74fe78e65e76ec87d5cfa8
|
[
"MIT"
] | 16
|
2015-02-02T15:40:10.000Z
|
2016-02-01T13:03:45.000Z
|
sebastian/lilypond/write_lilypond.py
|
EQ4/sebastian
|
4e460c3aeab332b45c74fe78e65e76ec87d5cfa8
|
[
"MIT"
] | 10
|
2015-02-02T19:48:57.000Z
|
2021-03-19T17:45:17.000Z
|
def lily_format(seq):
return " ".join(point["lilypond"] for point in seq)
def output(seq):
return "{ %s }" % lily_format(seq)
def write(filename, seq):
with open(filename, "w") as f:
f.write(output(seq))
| 19
| 55
| 0.614035
| 34
| 228
| 4.058824
| 0.558824
| 0.144928
| 0.188406
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.219298
| 228
| 11
| 56
| 20.727273
| 0.775281
| 0
| 0
| 0
| 0
| 0
| 0.070175
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.428571
| false
| 0
| 0
| 0.285714
| 0.714286
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
4d62ce3318fdae5116a00b4cd3a6f7308375d188
| 322
|
py
|
Python
|
src/langs/tests/__init__.py
|
Shroud007/cappa
|
f820423374fa7914e66e4e16ad275fd53621c43c
|
[
"MIT"
] | null | null | null |
src/langs/tests/__init__.py
|
Shroud007/cappa
|
f820423374fa7914e66e4e16ad275fd53621c43c
|
[
"MIT"
] | null | null | null |
src/langs/tests/__init__.py
|
Shroud007/cappa
|
f820423374fa7914e66e4e16ad275fd53621c43c
|
[
"MIT"
] | null | null | null |
from src.langs.providers.python.tests import ProviderTestCase as PythonTestCase
from src.langs.providers.cpp.tests import ProviderTestCase as CppTestCase
from src.langs.providers.csharp.tests import ProviderTestCase as CSharpTestCase
if __name__ == '__main__':
PythonTestCase()
CppTestCase()
CSharpTestCase()
| 35.777778
| 79
| 0.810559
| 36
| 322
| 7.027778
| 0.472222
| 0.083004
| 0.142292
| 0.249012
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.118012
| 322
| 8
| 80
| 40.25
| 0.890845
| 0
| 0
| 0
| 0
| 0
| 0.024845
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.428571
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
4d861fed5ed0fc2b873d42bdd4bb19d2422ec702
| 53
|
py
|
Python
|
pypendency/lexer/__init__.py
|
Taschenbergerm/pypendency
|
d941f584cabd0e6acc56ec3df43be174198ae4b7
|
[
"Apache-2.0"
] | null | null | null |
pypendency/lexer/__init__.py
|
Taschenbergerm/pypendency
|
d941f584cabd0e6acc56ec3df43be174198ae4b7
|
[
"Apache-2.0"
] | 1
|
2021-06-23T15:05:40.000Z
|
2021-06-23T15:05:40.000Z
|
pypendency/lexer/__init__.py
|
Taschenbergerm/pypendency
|
d941f584cabd0e6acc56ec3df43be174198ae4b7
|
[
"Apache-2.0"
] | null | null | null |
from .lark import RelationLexer as LarkRelationLexer
| 26.5
| 52
| 0.867925
| 6
| 53
| 7.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113208
| 53
| 1
| 53
| 53
| 0.978723
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
4d89f5af00a32f2efd1de1b47fb48c866cdb1cdc
| 204
|
py
|
Python
|
eos/core/__init__.py
|
YSaxon/eos
|
0cebeb2fd2d1952d6bb0d040a22f909fd5ae6efd
|
[
"Beerware"
] | 168
|
2020-04-27T12:05:28.000Z
|
2022-03-29T15:50:37.000Z
|
eos/core/__init__.py
|
YSaxon/eos
|
0cebeb2fd2d1952d6bb0d040a22f909fd5ae6efd
|
[
"Beerware"
] | 10
|
2020-04-27T19:03:48.000Z
|
2021-12-02T22:24:11.000Z
|
eos/core/__init__.py
|
YSaxon/eos
|
0cebeb2fd2d1952d6bb0d040a22f909fd5ae6efd
|
[
"Beerware"
] | 20
|
2020-04-27T21:22:27.000Z
|
2022-01-13T13:27:18.000Z
|
"""
EOS core package.
"""
from .base import Base, EOSException
from .profiler import Profiler
from .symfony import Symfony
from .engine import Engine
from .cookies import RememberMe
from .eos import EOS
| 18.545455
| 36
| 0.779412
| 28
| 204
| 5.678571
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.151961
| 204
| 10
| 37
| 20.4
| 0.919075
| 0.083333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
4d8bf4c623838efcad67725f7eaf61a54cc9837f
| 1,791
|
py
|
Python
|
styleguide_example/common/types.py
|
kdkocev/Styleguide-Example
|
0514a7dd534b1eea2a0baa5e29d05a51ff8bc41c
|
[
"MIT"
] | 78
|
2020-07-07T07:11:15.000Z
|
2021-12-05T16:31:29.000Z
|
styleguide_example/common/types.py
|
chiemerieezechukwu/Styleguide-Example
|
a5e945b489ea3d9c88d9842f09189d48e7791cf5
|
[
"MIT"
] | 17
|
2020-07-08T12:03:39.000Z
|
2021-11-22T13:07:49.000Z
|
styleguide_example/common/types.py
|
chiemerieezechukwu/Styleguide-Example
|
a5e945b489ea3d9c88d9842f09189d48e7791cf5
|
[
"MIT"
] | 24
|
2020-07-11T08:59:41.000Z
|
2021-11-29T11:35:42.000Z
|
from typing import (
Generic,
Iterator,
Any,
TypeVar,
Optional,
Dict,
Tuple,
Union
)
from collections import Iterable
DjangoModel = TypeVar('DjangoModel')
class QuerySetType(Generic[DjangoModel], Iterable):
"""
This type represents django.db.models.QuerySet interface.
Defined Types:
DjangoModel - model instance
QuerysetType[DjangoModel] - Queryset of DjangoModel instances
Iterator[DjangoModel] - Iterator of DjangoModel instances
"""
def __iter__(self) -> Iterator[DjangoModel]: ...
def all(self) -> 'QuerySetType[DjangoModel]': ...
def order_by(self, *args: Any) -> 'QuerySetType[DjangoModel]': ...
def count(self) -> int: ...
def filter(self, **kwargs: Any) -> 'QuerySetType[DjangoModel]': ...
def exclude(self, **kwargs: Any) -> 'QuerySetType[DjangoModel]': ...
def get(self, **kwargs: Any) -> DjangoModel: ...
def annotate(self, **kwargs: Any) -> 'QuerySetType[DjangoModel]': ...
def first(self) -> Optional[DjangoModel]: ...
def update(self, **kwargs: Any) -> DjangoModel: ...
def delete(self, **kwargs: Any) -> Tuple[int, Dict[str, int]]: ...
def last(self) -> Optional[DjangoModel]: ...
def exists(self) -> bool: ...
def values(self, *args: Any) -> 'QuerySetType[DjangoModel]': ...
def values_list(self, *args: Any) -> 'QuerySetType[DjangoModel]': ...
def __getitem__(
self,
index: int
) -> Union[DjangoModel, "QuerySetType[DjangoModel]"]: ...
def __len__(self) -> int: ...
def __or__(
self,
qs: "QuerySetType[DjangoModel]"
) -> 'QuerySetType[DjangoModel]': ...
def __and__(
self,
qs: "QuerySetType[DjangoModel]"
) -> 'QuerySetType[DjangoModel]': ...
| 25.225352
| 73
| 0.610274
| 171
| 1,791
| 6.263158
| 0.345029
| 0.183007
| 0.218487
| 0.162465
| 0.360411
| 0.309991
| 0
| 0
| 0
| 0
| 0
| 0
| 0.226689
| 1,791
| 70
| 74
| 25.585714
| 0.773285
| 0.130095
| 0
| 0.170732
| 0
| 0
| 0.203801
| 0.196592
| 0
| 0
| 0
| 0
| 0
| 1
| 0.463415
| false
| 0
| 0.04878
| 0
| 0.536585
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
4d95c584757947eaa4c142a89c1fc64eb1bbe1f0
| 157
|
py
|
Python
|
poikit/model/baidu/hexagon.py
|
Civitasv/PoiKit
|
da806ed0b8b219fdd0ab945f88fb43f21c132263
|
[
"MIT"
] | null | null | null |
poikit/model/baidu/hexagon.py
|
Civitasv/PoiKit
|
da806ed0b8b219fdd0ab945f88fb43f21c132263
|
[
"MIT"
] | null | null | null |
poikit/model/baidu/hexagon.py
|
Civitasv/PoiKit
|
da806ed0b8b219fdd0ab945f88fb43f21c132263
|
[
"MIT"
] | null | null | null |
# -- coding: utf-8 --
import math
class Hexagon:
def __init__(self, center, radius) -> None:
self.center = center
self.radius = radius
| 17.444444
| 47
| 0.605096
| 19
| 157
| 4.789474
| 0.684211
| 0.21978
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008772
| 0.273885
| 157
| 8
| 48
| 19.625
| 0.789474
| 0.121019
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.2
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
4daba73a7f35d530a19196c98a26a633a05695a1
| 123
|
py
|
Python
|
bardhub/draftsong/admin.py
|
migdotcom/music-library
|
4648ea02e4b071c4a287eba09202045963992873
|
[
"MIT"
] | null | null | null |
bardhub/draftsong/admin.py
|
migdotcom/music-library
|
4648ea02e4b071c4a287eba09202045963992873
|
[
"MIT"
] | null | null | null |
bardhub/draftsong/admin.py
|
migdotcom/music-library
|
4648ea02e4b071c4a287eba09202045963992873
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import DraftSong
# Register your models here.
admin.site.register(DraftSong)
| 24.6
| 32
| 0.821138
| 17
| 123
| 5.941176
| 0.647059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113821
| 123
| 5
| 33
| 24.6
| 0.926606
| 0.211382
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
4daeb39d3966aa5720f716384c199c6886faf357
| 603
|
py
|
Python
|
ps_gym/__init__.py
|
mawbray/ps-gym
|
43c8798ed49fb9e566e3d11f1ad8db7c9b4119a4
|
[
"MIT"
] | null | null | null |
ps_gym/__init__.py
|
mawbray/ps-gym
|
43c8798ed49fb9e566e3d11f1ad8db7c9b4119a4
|
[
"MIT"
] | null | null | null |
ps_gym/__init__.py
|
mawbray/ps-gym
|
43c8798ed49fb9e566e3d11f1ad8db7c9b4119a4
|
[
"MIT"
] | null | null | null |
from gym.envs.registration import register, registry, make, spec
# Production Scheduling Environments
register(id='SingleStageParallel-v0',
entry_point='ps_gym.envs.singlestageparallel:SingleStageParallelMaster'
)
register(id='SingleStageParallel-v1',
entry_point='ps_gym.envs.singlestageparallel:SingleStageParallelSO1'
)
register(id='SingleStageParallel-v2',
entry_point='ps_gym.envs.singlestageparallel_large:SingleStageParallelLMaster'
)
register(id='SingleStageParallel-v3',
entry_point='ps_gym.envs.singlestageparallel_largestateexp:SingleStageParallelLStateExp'
)
| 27.409091
| 90
| 0.812604
| 58
| 603
| 8.275862
| 0.465517
| 0.072917
| 0.241667
| 0.125
| 0.316667
| 0.316667
| 0
| 0
| 0
| 0
| 0
| 0.009124
| 0.091211
| 603
| 21
| 91
| 28.714286
| 0.866788
| 0.056385
| 0
| 0
| 0
| 0
| 0.619485
| 0.619485
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.076923
| 0
| 0.076923
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
4db40547420ad5ab210d9876e8436335f9df4e6e
| 636
|
py
|
Python
|
fba/generator/style_net/build.py
|
hukkelas/full_body_anonymization
|
c61745b137c84ffb742ef6ab2f4721db4acf22b7
|
[
"MIT"
] | 27
|
2022-01-06T20:15:24.000Z
|
2022-03-29T11:54:49.000Z
|
fba/generator/style_net/build.py
|
hukkelas/full_body_anonymization
|
c61745b137c84ffb742ef6ab2f4721db4acf22b7
|
[
"MIT"
] | 2
|
2022-03-17T06:04:23.000Z
|
2022-03-25T08:50:57.000Z
|
fba/generator/style_net/build.py
|
hukkelas/full_body_anonymization
|
c61745b137c84ffb742ef6ab2f4721db4acf22b7
|
[
"MIT"
] | 2
|
2022-01-07T13:16:59.000Z
|
2022-01-16T02:10:50.000Z
|
from fba.utils import Registry, build_from_cfg
from torch import nn
STYLE_ENCODER_REGISTRY = Registry("STYLE_ENCODER_REGISTRY")
def build_stylenet(style_cfg, **kwargs):
return build_from_cfg(style_cfg, STYLE_ENCODER_REGISTRY, **kwargs)
@STYLE_ENCODER_REGISTRY.register_module
class NoneStyle(nn.Module):
def __init__(
self, feature_sizes_enc, feature_sizes_dec, feature_sizes_mid, **kwargs
):
super().__init__()
self.num_conv = len(feature_sizes_enc) + len(feature_sizes_dec) + len(feature_sizes_mid)
def forward(self, *args, **kwargs):
return iter([{}] * self.num_conv)
| 31.8
| 96
| 0.72327
| 84
| 636
| 5.02381
| 0.392857
| 0.170616
| 0.189573
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.174528
| 636
| 20
| 97
| 31.8
| 0.80381
| 0
| 0
| 0
| 0
| 0
| 0.034537
| 0.034537
| 0
| 0
| 0
| 0
| 0
| 1
| 0.214286
| false
| 0
| 0.142857
| 0.142857
| 0.571429
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
4dbba0aa2709fce12ab63c2b97699a6c52e5eb18
| 45
|
py
|
Python
|
src/pkgcore/__init__.py
|
filmor/pkgcore
|
ddd17f893b69b423e5385bd3fee7b5bffd14ad5b
|
[
"BSD-3-Clause"
] | null | null | null |
src/pkgcore/__init__.py
|
filmor/pkgcore
|
ddd17f893b69b423e5385bd3fee7b5bffd14ad5b
|
[
"BSD-3-Clause"
] | null | null | null |
src/pkgcore/__init__.py
|
filmor/pkgcore
|
ddd17f893b69b423e5385bd3fee7b5bffd14ad5b
|
[
"BSD-3-Clause"
] | null | null | null |
__title__ = 'pkgcore'
__version__ = '0.12.7'
| 15
| 22
| 0.688889
| 6
| 45
| 3.833333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102564
| 0.133333
| 45
| 2
| 23
| 22.5
| 0.487179
| 0
| 0
| 0
| 0
| 0
| 0.288889
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
4dcce982902387563951c482dc8c3a4ccd58d7f6
| 36
|
py
|
Python
|
python/testData/completion/rPowSignature.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2018-12-29T09:53:39.000Z
|
2018-12-29T09:53:42.000Z
|
python/testData/completion/rPowSignature.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/completion/rPowSignature.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
class Cl(object):
def __rpo<caret>
| 18
| 18
| 0.722222
| 6
| 36
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.138889
| 36
| 2
| 18
| 18
| 0.774194
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
4df8f2bce0177a00ff182dd5c66676e9d0e11fdf
| 158
|
py
|
Python
|
livingTree/__init__.py
|
AdeBC/living-tree-toolk
|
e7f4312395c55279171913314b67629bd0a643d9
|
[
"MIT"
] | 4
|
2020-04-07T13:57:56.000Z
|
2021-08-04T00:25:47.000Z
|
livingTree/__init__.py
|
AdeBC/living-tree-toolkit
|
e7f4312395c55279171913314b67629bd0a643d9
|
[
"MIT"
] | null | null | null |
livingTree/__init__.py
|
AdeBC/living-tree-toolkit
|
e7f4312395c55279171913314b67629bd0a643d9
|
[
"MIT"
] | null | null | null |
from .LineageTracker import LineageTracker
from .SuperTree import SuperTree
from .TaxaRetriever import TaxaRetriever
from .TreeBuilder import TreeBuilder
| 31.6
| 43
| 0.848101
| 16
| 158
| 8.375
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.126582
| 158
| 4
| 44
| 39.5
| 0.971014
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
1282c00196c46f5f4e49b0cdc314465a5a12b3f2
| 49
|
py
|
Python
|
python/testData/intentions/convertVariadicParamSeveralCallsWithSameDefaultValue_after.py
|
Sajaki/intellij-community
|
6748af2c40567839d11fd652ec77ba263c074aad
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/intentions/convertVariadicParamSeveralCallsWithSameDefaultValue_after.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2
|
2022-02-19T09:45:05.000Z
|
2022-02-27T20:32:55.000Z
|
python/testData/intentions/convertVariadicParamSeveralCallsWithSameDefaultValue_after.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
def bar(foo=0, **kwargs):
b = foo
c = foo
| 16.333333
| 25
| 0.489796
| 9
| 49
| 2.666667
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.030303
| 0.326531
| 49
| 3
| 26
| 16.333333
| 0.69697
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.333333
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.