hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a1de0aee1776a327595eeed787b438fc8b5f2ac7
| 91
|
py
|
Python
|
src/antidote/utils.py
|
keelerm84/antidote
|
a30d488cd6d3421e50a2414bc9a20af052d3b821
|
[
"MIT"
] | null | null | null |
src/antidote/utils.py
|
keelerm84/antidote
|
a30d488cd6d3421e50a2414bc9a20af052d3b821
|
[
"MIT"
] | null | null | null |
src/antidote/utils.py
|
keelerm84/antidote
|
a30d488cd6d3421e50a2414bc9a20af052d3b821
|
[
"MIT"
] | null | null | null |
def is_compiled() -> bool:
from ._internal.wrapper import compiled
return compiled
| 22.75
| 43
| 0.725275
| 11
| 91
| 5.818182
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.197802
| 91
| 3
| 44
| 30.333333
| 0.876712
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
62cbb31add21eed8390a1358754a061e20edbc17
| 188
|
py
|
Python
|
post_office/apps.py
|
jimmyye/django-post_office
|
bd12880d6b31da8aeca39276b453c9265dff96c5
|
[
"MIT"
] | 4
|
2017-03-15T14:44:15.000Z
|
2019-07-24T12:54:37.000Z
|
post_office/apps.py
|
jimmyye/django-post_office
|
bd12880d6b31da8aeca39276b453c9265dff96c5
|
[
"MIT"
] | null | null | null |
post_office/apps.py
|
jimmyye/django-post_office
|
bd12880d6b31da8aeca39276b453c9265dff96c5
|
[
"MIT"
] | 4
|
2019-05-24T16:48:08.000Z
|
2020-05-13T07:58:10.000Z
|
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class PostOfficeConfig(AppConfig):
name = 'post_office'
verbose_name = _("Post Office")
| 23.5
| 55
| 0.771277
| 23
| 188
| 6.086957
| 0.695652
| 0.142857
| 0.2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.154255
| 188
| 7
| 56
| 26.857143
| 0.880503
| 0
| 0
| 0
| 0
| 0
| 0.117021
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
62d4ea588e88787cc77298c1c6427b24562fd049
| 63
|
py
|
Python
|
create_python_app/create_files.py
|
averak/create-python-app
|
77551bd8ab7fa0e5c23079a62f61ab00953d3d23
|
[
"MIT"
] | 1
|
2021-03-26T07:49:29.000Z
|
2021-03-26T07:49:29.000Z
|
create_python_app/create_files.py
|
averak/create-python-app
|
77551bd8ab7fa0e5c23079a62f61ab00953d3d23
|
[
"MIT"
] | null | null | null |
create_python_app/create_files.py
|
averak/create-python-app
|
77551bd8ab7fa0e5c23079a62f61ab00953d3d23
|
[
"MIT"
] | null | null | null |
import os
os.path.join(os.path.dirname(__file__), 'template')
| 15.75
| 51
| 0.746032
| 10
| 63
| 4.3
| 0.7
| 0.27907
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.079365
| 63
| 3
| 52
| 21
| 0.741379
| 0
| 0
| 0
| 0
| 0
| 0.126984
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
c50e1d0ad66bd72526fee6838c0a08b67309b319
| 25
|
py
|
Python
|
gcpds/databases/BCI_Competition_IV/__init__.py
|
UN-GCPDS/GCPDS.databases
|
706549e1e893ff00e451a054f6235aaf18aebdf3
|
[
"BSD-2-Clause"
] | null | null | null |
gcpds/databases/BCI_Competition_IV/__init__.py
|
UN-GCPDS/GCPDS.databases
|
706549e1e893ff00e451a054f6235aaf18aebdf3
|
[
"BSD-2-Clause"
] | null | null | null |
gcpds/databases/BCI_Competition_IV/__init__.py
|
UN-GCPDS/GCPDS.databases
|
706549e1e893ff00e451a054f6235aaf18aebdf3
|
[
"BSD-2-Clause"
] | 1
|
2021-07-29T16:36:17.000Z
|
2021-07-29T16:36:17.000Z
|
from . import Dataset_2a
| 12.5
| 24
| 0.8
| 4
| 25
| 4.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.047619
| 0.16
| 25
| 1
| 25
| 25
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
c52b48a3b686571a1062d7e8289059a6d936ddfd
| 149
|
py
|
Python
|
vnpy/api/qdp/__init__.py
|
black0144/vnpy
|
0d0ea30dad14a0150f7500ff9a62528030321426
|
[
"MIT"
] | 5
|
2019-01-17T12:14:14.000Z
|
2021-05-30T10:24:42.000Z
|
vnpy/api/qdp/__init__.py
|
black0144/vnpy
|
0d0ea30dad14a0150f7500ff9a62528030321426
|
[
"MIT"
] | 1
|
2018-06-12T10:08:24.000Z
|
2018-06-12T10:08:24.000Z
|
vnpy/api/qdp/__init__.py
|
black0144/vnpy
|
0d0ea30dad14a0150f7500ff9a62528030321426
|
[
"MIT"
] | 5
|
2019-03-26T03:17:45.000Z
|
2019-11-05T08:08:18.000Z
|
# encoding: UTF-8
from __future__ import absolute_import
from .vnqdpmd import MdApi
from .vnqdptd import TdApi
from .qdp_data_type import defineDict
| 24.833333
| 38
| 0.832215
| 22
| 149
| 5.318182
| 0.681818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007692
| 0.127517
| 149
| 6
| 39
| 24.833333
| 0.892308
| 0.100671
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
3dc93c2bc1dbcc57f6db6dc22feed78bdde349ba
| 961
|
py
|
Python
|
firehole/algorithms/__init__.py
|
xSumner/firehole
|
50007fdf3d71cfe3a2c2aa76d2043bca1b52a05b
|
[
"Apache-2.0"
] | 4
|
2020-06-23T08:27:07.000Z
|
2021-05-18T06:59:03.000Z
|
firehole/algorithms/__init__.py
|
xSumner/firehole
|
50007fdf3d71cfe3a2c2aa76d2043bca1b52a05b
|
[
"Apache-2.0"
] | null | null | null |
firehole/algorithms/__init__.py
|
xSumner/firehole
|
50007fdf3d71cfe3a2c2aa76d2043bca1b52a05b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# There are import oderwise
from firehole.algorithms.flashtext import *
from firehole.algorithms.weight import *
from firehole.algorithms.ahp import *
from firehole.algorithms.similarity import *
from firehole.algorithms.convert import *
import firehole.algorithms.flashtext
import firehole.algorithms.weight
import firehole.algorithms.ahp
import firehole.algorithms.similarity
import firehole.algorithms.convert
# Need to test with Numpy, when available
# weight
from firehole.algorithms.weight import (Entropy, COV)
from firehole.algorithms.ahp import parse
# Keyword extraction and replace
from firehole.algorithms.flashtext import KeywordProcessor
# calculate the text similarity
from firehole.algorithms.similarity import (BM25Plus, BM25L, BM25Okapi)
from firehole.algorithms.similarity import (Simhash, SimhashIndex)
# convert between different format
from firehole.algorithms.convert import (convertID)
| 31
| 71
| 0.823101
| 116
| 961
| 6.818966
| 0.396552
| 0.364096
| 0.305942
| 0.141593
| 0.490518
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00813
| 0.104058
| 961
| 30
| 72
| 32.033333
| 0.910569
| 0.216441
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
9aa3922eec8bf104db6d3ca8ca2d5215c7bf6d05
| 353
|
py
|
Python
|
api_atelier/clients/tests/conftest.py
|
Kubiniet/Api-Atelier-DRF
|
1b1697c375ca6901e08ef225b93a01d98d18fd48
|
[
"MIT"
] | null | null | null |
api_atelier/clients/tests/conftest.py
|
Kubiniet/Api-Atelier-DRF
|
1b1697c375ca6901e08ef225b93a01d98d18fd48
|
[
"MIT"
] | 7
|
2022-02-23T02:26:50.000Z
|
2022-03-28T02:33:04.000Z
|
api_atelier/clients/tests/conftest.py
|
Kubiniet/Api-Atelier-DRF
|
1b1697c375ca6901e08ef225b93a01d98d18fd48
|
[
"MIT"
] | null | null | null |
import pytest
from api_atelier.users.tests.factories import AdminFactory
from .factories import ClientFactory, ServiceFactory
@pytest.fixture
def admin_creation():
return AdminFactory.create()
@pytest.fixture
def client_creation():
return ClientFactory.create()
@pytest.fixture
def service_creation():
return ServiceFactory.create()
| 16.809524
| 58
| 0.787535
| 39
| 353
| 7.025641
| 0.487179
| 0.142336
| 0.175182
| 0.160584
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130312
| 353
| 20
| 59
| 17.65
| 0.892508
| 0
| 0
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.25
| 0.25
| 0.75
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
9ab8acb6d26d7a391e8af61bb5a97d3c35855a60
| 101
|
py
|
Python
|
qcelemental/molutil/__init__.py
|
dgasmith/QCElemental
|
cd1eeeffd8655368d5fa884047f1e8eddc4c1988
|
[
"BSD-3-Clause"
] | null | null | null |
qcelemental/molutil/__init__.py
|
dgasmith/QCElemental
|
cd1eeeffd8655368d5fa884047f1e8eddc4c1988
|
[
"BSD-3-Clause"
] | null | null | null |
qcelemental/molutil/__init__.py
|
dgasmith/QCElemental
|
cd1eeeffd8655368d5fa884047f1e8eddc4c1988
|
[
"BSD-3-Clause"
] | null | null | null |
from .align import B787, compute_scramble, kabsch_align
from .connectivity import guess_connectivity
| 33.666667
| 55
| 0.861386
| 13
| 101
| 6.461538
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.032967
| 0.09901
| 101
| 2
| 56
| 50.5
| 0.89011
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b16f2ac22673942be6dfb728d833d4623f315cd3
| 249
|
py
|
Python
|
collective/collective_group/mpi_collective_group.py
|
fustinose/ray-scalable-ml-design
|
9bc01ab76ec7f6d9615fdc5d88ff9e67254e43fb
|
[
"Apache-2.0"
] | null | null | null |
collective/collective_group/mpi_collective_group.py
|
fustinose/ray-scalable-ml-design
|
9bc01ab76ec7f6d9615fdc5d88ff9e67254e43fb
|
[
"Apache-2.0"
] | null | null | null |
collective/collective_group/mpi_collective_group.py
|
fustinose/ray-scalable-ml-design
|
9bc01ab76ec7f6d9615fdc5d88ff9e67254e43fb
|
[
"Apache-2.0"
] | null | null | null |
from collective.collective_group.base_collective_group import BaseGroup
# TODO(Dacheng): implement this
class MPIGroup(BaseGroup):
def __init__(self, world_size, rank, group_name):
BaseGroup.__init__(self, world_size, rank, group_name)
| 35.571429
| 71
| 0.787149
| 32
| 249
| 5.65625
| 0.59375
| 0.165746
| 0.143646
| 0.187845
| 0.331492
| 0.331492
| 0.331492
| 0
| 0
| 0
| 0
| 0
| 0.128514
| 249
| 6
| 72
| 41.5
| 0.834101
| 0.116466
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.75
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
b1735b13a8e0c20478387a416441f0e6d2d19d77
| 937
|
py
|
Python
|
src/django_scim/constants.py
|
horida/django-scim2
|
76f20e6fdeb3b8cb71ef41bc913ac3c878d90ece
|
[
"MIT"
] | null | null | null |
src/django_scim/constants.py
|
horida/django-scim2
|
76f20e6fdeb3b8cb71ef41bc913ac3c878d90ece
|
[
"MIT"
] | null | null | null |
src/django_scim/constants.py
|
horida/django-scim2
|
76f20e6fdeb3b8cb71ef41bc913ac3c878d90ece
|
[
"MIT"
] | null | null | null |
import re
ENCODING = 'utf-8'
SCIM_CONTENT_TYPE = 'application/scim+json'
VALID_PATCH_OPS = ('add', 'remove', 'replace')
class SchemaURI(object):
ERROR = 'urn:ietf:params:scim:api:messages:2.0:Error'
LIST_RESPONSE = 'urn:ietf:params:scim:api:messages:2.0:ListResponse'
SERACH_REQUEST = 'urn:ietf:params:scim:api:messages:2.0:SearchRequest'
NOT_SERACH_REQUEST = 'urn:ietf:params:scim:api:messages:2.0:NotSearchRequest'
PATCH_OP = 'urn:ietf:params:scim:api:messages:2.0:PatchOp'
USER = 'urn:ietf:params:scim:schemas:core:2.0:User'
ENTERPRISE_URN = 'urn:ietf:params:scim:schemas:extension:enterprise'
ENTERPRISE_USER = 'urn:ietf:params:scim:schemas:extension:enterprise:2.0:User'
GROUP = 'urn:ietf:params:scim:schemas:core:2.0:Group'
RESOURCE_TYPE = 'urn:ietf:params:scim:schemas:core:2.0:ResourceType'
SERVICE_PROVIDER_CONFIG = 'urn:ietf:params:scim:schemas:core:2.0:ServiceProviderConfig'
| 40.73913
| 91
| 0.742796
| 139
| 937
| 4.906475
| 0.352518
| 0.112903
| 0.209677
| 0.274194
| 0.571848
| 0.571848
| 0.560117
| 0.434018
| 0.1261
| 0.1261
| 0
| 0.02503
| 0.104589
| 937
| 22
| 92
| 42.590909
| 0.787843
| 0
| 0
| 0
| 0
| 0
| 0.626068
| 0.603632
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.0625
| 0
| 0.8125
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
492cfbf6fdfcbb72d4396059784dd47cc0479c49
| 165
|
py
|
Python
|
user_details/track.py
|
vrn25/College-Predictor
|
2a0cdc830bb1563482dc20846998f344a5f2b336
|
[
"MIT"
] | 3
|
2020-01-20T17:00:44.000Z
|
2022-01-11T15:19:46.000Z
|
user_details/track.py
|
vrn25/COLLEGE-PREDICTOR
|
2a0cdc830bb1563482dc20846998f344a5f2b336
|
[
"MIT"
] | null | null | null |
user_details/track.py
|
vrn25/COLLEGE-PREDICTOR
|
2a0cdc830bb1563482dc20846998f344a5f2b336
|
[
"MIT"
] | 1
|
2022-03-03T09:46:05.000Z
|
2022-03-03T09:46:05.000Z
|
list_of_users=[] # it stores the list of usernames in form of strings
def fun(l1):
global list_of_users
list_of_users=l1
def fun2():
return list_of_users
| 20.625
| 70
| 0.739394
| 30
| 165
| 3.8
| 0.533333
| 0.263158
| 0.385965
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022556
| 0.193939
| 165
| 8
| 71
| 20.625
| 0.834586
| 0.30303
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.166667
| 0.5
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 5
|
4968add40274301654743d9ee8176284bdaddd58
| 115
|
py
|
Python
|
evkit/rl/algo/__init__.py
|
joel99/midlevel-reps
|
f0b4a4d8ccf09a0488cd18af24723172aff99446
|
[
"MIT"
] | 120
|
2019-04-22T04:45:28.000Z
|
2022-03-23T01:53:17.000Z
|
evkit/rl/algo/__init__.py
|
joel99/midlevel-reps
|
f0b4a4d8ccf09a0488cd18af24723172aff99446
|
[
"MIT"
] | 14
|
2019-06-12T08:21:21.000Z
|
2021-08-25T15:36:58.000Z
|
evkit/rl/algo/__init__.py
|
joel99/midlevel-reps
|
f0b4a4d8ccf09a0488cd18af24723172aff99446
|
[
"MIT"
] | 19
|
2019-06-19T07:00:36.000Z
|
2022-03-24T07:18:30.000Z
|
from .a2c_acktr import A2C_ACKTR
from .ppo import PPO
from .ppo_replay import PPOReplay
from .deepq import QLearner
| 28.75
| 33
| 0.834783
| 19
| 115
| 4.894737
| 0.473684
| 0.172043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02
| 0.130435
| 115
| 4
| 34
| 28.75
| 0.91
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
4975ac56e99f61667ef1b135a7ebbf875a7240c8
| 177
|
py
|
Python
|
tests/testcase.py
|
daviskregers/notion-calendar-to-google-calendar
|
996bab7b8e633636fdfd326fe8c8ce4e369ffb8c
|
[
"MIT"
] | null | null | null |
tests/testcase.py
|
daviskregers/notion-calendar-to-google-calendar
|
996bab7b8e633636fdfd326fe8c8ce4e369ffb8c
|
[
"MIT"
] | 4
|
2022-02-20T15:09:37.000Z
|
2022-02-20T15:28:03.000Z
|
tests/testcase.py
|
daviskregers/notion-calendar-to-google-calendar
|
996bab7b8e633636fdfd326fe8c8ce4e369ffb8c
|
[
"MIT"
] | null | null | null |
import os
import sys
import unittest
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + "/../src")
class TestCase(unittest.TestCase):
# maxDiff = None
pass
| 17.7
| 72
| 0.711864
| 24
| 177
| 5.083333
| 0.625
| 0.098361
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.146893
| 177
| 9
| 73
| 19.666667
| 0.807947
| 0.079096
| 0
| 0
| 0
| 0
| 0.043478
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.166667
| 0.5
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 5
|
4979d0500ce951c837cd4763693707ee56ef2887
| 407
|
py
|
Python
|
mysite/polls/views.py
|
cs-fullstack-fall-2018/django-intro1-psanon19
|
0ae36780fd664313a011e7a219bc401b158fe93f
|
[
"Apache-2.0"
] | null | null | null |
mysite/polls/views.py
|
cs-fullstack-fall-2018/django-intro1-psanon19
|
0ae36780fd664313a011e7a219bc401b158fe93f
|
[
"Apache-2.0"
] | null | null | null |
mysite/polls/views.py
|
cs-fullstack-fall-2018/django-intro1-psanon19
|
0ae36780fd664313a011e7a219bc401b158fe93f
|
[
"Apache-2.0"
] | null | null | null |
from django.http import HttpResponse
def nothing(request):
return HttpResponse("This is a bad request. Use one of the other routes (language, system, or ide)")
def language(request):
return HttpResponse("My favorite Language is Javascript")
def system(request):
return HttpResponse("My favorite system is Linux")
def ide(request):
return HttpResponse("My favorite IDE is Intellij")
| 21.421053
| 104
| 0.742015
| 55
| 407
| 5.490909
| 0.509091
| 0.172185
| 0.331126
| 0.268212
| 0.347682
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.174447
| 407
| 18
| 105
| 22.611111
| 0.89881
| 0
| 0
| 0
| 0
| 0
| 0.406404
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.444444
| false
| 0
| 0.111111
| 0.444444
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
b8e323f87bca33af117b9718cfb1d454532bb028
| 3,865
|
py
|
Python
|
tests/test_bpe.py
|
dpressel/vecxx
|
19f8285b7d0e8f37701bcc0ae8f6a45a58e324ca
|
[
"Apache-2.0"
] | 2
|
2021-05-17T14:05:35.000Z
|
2021-06-29T18:43:11.000Z
|
tests/test_bpe.py
|
tzellman/vecxx
|
58829f261f6bc9a939f5fef161af8d36a75555c3
|
[
"Apache-2.0"
] | 6
|
2021-05-19T18:14:59.000Z
|
2021-06-16T14:57:15.000Z
|
tests/test_bpe.py
|
tzellman/vecxx
|
58829f261f6bc9a939f5fef161af8d36a75555c3
|
[
"Apache-2.0"
] | 1
|
2021-05-17T14:05:22.000Z
|
2021-05-17T14:05:22.000Z
|
import os
import pytest
import numpy as np
from vecxx import *
TEST_DATA = os.path.join(os.path.realpath(os.path.dirname(__file__)), "test_data")
TEST_SENTENCE = "My name is Dan . I am from Ann Arbor , Michigan , in Washtenaw County"
TEST_SENTENCE_GOLD = "<GO> my name is dan . i am from ann ar@@ bor , michigan , in wash@@ ten@@ aw county <EOS>"
TEST_IDS_GOLD = [1, 30, 265, 14, 2566, 5, 8, 158, 63, 10940, 525, 18637, 7, 3685, 7, 18, 14242, 1685, 2997, 4719, 2]
TEST_N_SENTENCES = ["My name is Dan .", "I am from Ann Arbor , Michigan .", "in Washtenaw County"]
TEST_N_IDS_GOLD = [
[1, 30, 265, 14, 2566, 5, 2],
[1, 8, 158, 63, 10940, 525, 18637, 7, 3685, 5, 2],
[1, 18, 14242, 1685, 2997, 4719, 2]
]
def test_pieces():
bpe = BPEVocab(
vocab_file=os.path.join(TEST_DATA, "vocab.30k"),
codes_file=os.path.join(TEST_DATA, "codes.30k")
)
vec = VocabVectorizer(bpe, transform=str.lower, emit_begin_tok=["<GO>"], emit_end_tok=["<EOS>"])
sentence = ' '.join(vec.convert_to_pieces(TEST_SENTENCE.split()))
assert sentence == TEST_SENTENCE_GOLD
def test_pieces_map():
bpe = BPEVocab(
vocab_file=os.path.join(TEST_DATA, "vocab.30k"),
codes_file=os.path.join(TEST_DATA, "codes.30k")
)
vec = VocabMapVectorizer(bpe, transform=str.lower, emit_begin_tok=["<GO>"], emit_end_tok=["<EOS>"])
map_tokens = [{"text": s} for s in TEST_SENTENCE.split()]
sentence = ' '.join(vec.convert_to_pieces(map_tokens))
assert sentence == TEST_SENTENCE_GOLD
def test_bpe_lookup():
bpe = BPEVocab(
vocab_file=os.path.join(TEST_DATA, "vocab.30k"),
codes_file=os.path.join(TEST_DATA, "codes.30k")
)
toks = TEST_SENTENCE_GOLD.split()
ids = [bpe.lookup(s, str.lower) for s in toks]
assert ids == TEST_IDS_GOLD
def test_ids():
bpe = BPEVocab(
vocab_file=os.path.join(TEST_DATA, "vocab.30k"),
codes_file=os.path.join(TEST_DATA, "codes.30k")
)
vec = VocabVectorizer(bpe, transform=str.lower, emit_begin_tok=["<GO>"], emit_end_tok=["<EOS>"])
v, l = vec.convert_to_ids(TEST_SENTENCE.split())
assert v == TEST_IDS_GOLD
assert l == len(TEST_IDS_GOLD)
v, l = vec.convert_to_ids(TEST_SENTENCE.split(), 128)
assert v[:l] == TEST_IDS_GOLD
assert np.sum(v[l+1:]) == 0
assert l == len(TEST_IDS_GOLD)
v, l = vec.convert_to_ids(TEST_SENTENCE.split(), 5)
assert v == TEST_IDS_GOLD[:5]
assert l == 5
def test_ids_stack():
bpe = BPEVocab(
vocab_file=os.path.join(TEST_DATA, "vocab.30k"),
codes_file=os.path.join(TEST_DATA, "codes.30k")
)
vec = VocabVectorizer(bpe, transform=str.lower, emit_begin_tok=["<GO>"], emit_end_tok=["<EOS>"])
nv, nl = vec.convert_to_ids_stack([t.split() for t in TEST_N_SENTENCES], 12)
nv = np.array(nv).reshape((len(TEST_N_SENTENCES), 12))
for v, l, t in zip(nv, nl, TEST_N_IDS_GOLD):
assert len(v[:l]) == len(t)
assert all([a == b for a, b in zip(v[:l], t)])
nv, nl = vec.convert_to_ids_stack([t.split() for t in TEST_N_SENTENCES], 5)
nv = np.array(nv).reshape((len(TEST_N_SENTENCES), 5))
for v, l, t in zip(nv, nl, TEST_N_IDS_GOLD):
assert len(v[:l]) == 5
assert all([a == b for a, b in zip(v[:l], t[:5])])
def test_ids_map():
bpe = BPEVocab(
vocab_file=os.path.join(TEST_DATA, "vocab.30k"),
codes_file=os.path.join(TEST_DATA, "codes.30k")
)
vec = VocabMapVectorizer(bpe, transform=str.lower, emit_begin_tok=["<GO>"], emit_end_tok=["<EOS>"])
map_tokens = [{"text": s} for s in TEST_SENTENCE.split()]
v, l = vec.convert_to_ids(map_tokens)
assert v == TEST_IDS_GOLD
assert l == len(TEST_IDS_GOLD)
v, l = vec.convert_to_ids(map_tokens, 128)
assert v[:l] == TEST_IDS_GOLD
assert np.sum(v[l+1:]) == 0
assert l == len(TEST_IDS_GOLD)
| 37.892157
| 116
| 0.63881
| 636
| 3,865
| 3.660377
| 0.163522
| 0.03866
| 0.055842
| 0.072165
| 0.837629
| 0.829897
| 0.786942
| 0.755155
| 0.693299
| 0.648625
| 0
| 0.053295
| 0.198965
| 3,865
| 101
| 117
| 38.267327
| 0.698643
| 0
| 0
| 0.458824
| 0
| 0.011765
| 0.102743
| 0
| 0
| 0
| 0
| 0
| 0.223529
| 1
| 0.070588
| false
| 0
| 0.047059
| 0
| 0.117647
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b8eea482f88b23111bdbe4b4858d9c50f9464845
| 19,780
|
py
|
Python
|
child_management/migrations/0021_auto_20210805_1913.py
|
waicindia/clms-prototype
|
8c32c440ca8a132e9fc70a3d94f27333f957a4f3
|
[
"MIT"
] | null | null | null |
child_management/migrations/0021_auto_20210805_1913.py
|
waicindia/clms-prototype
|
8c32c440ca8a132e9fc70a3d94f27333f957a4f3
|
[
"MIT"
] | null | null | null |
child_management/migrations/0021_auto_20210805_1913.py
|
waicindia/clms-prototype
|
8c32c440ca8a132e9fc70a3d94f27333f957a4f3
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.2 on 2021-08-05 19:13
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('child_management', '0020_auto_20210805_1911'),
]
operations = [
migrations.RunSQL('drop view if exists rep_child_baseline_report'),
migrations.RunSQL("""create or replace view rep_child_baseline_report as
SELECT mds.id AS state_id,
mds.name AS state_name,
mdd.id AS district_id,
mdd.name AS district_name,
sh.id AS shelter_home_id,
sh.name AS shelter_home_name,
ch.case_number,
COALESCE(ch.first_name, ''::character varying) AS first_name,
COALESCE(ch.middle_name, ''::character varying) AS middle_name,
COALESCE(ch.last_name, ''::character varying) AS last_name,
CASE
WHEN ch.dob IS NULL THEN ''::text
ELSE to_char(ch.dob::timestamp with time zone, 'DD-MM-YYYY'::text)
END AS dob,
CASE
WHEN ch.sex = 1 THEN 'Male'::text
WHEN ch.sex = 2 THEN 'Female'::text
WHEN ch.sex = 3 THEN 'Intersex'::text
WHEN ch.sex = 4 THEN 'Transgender'::text
WHEN ch.sex = 5 THEN 'Other'::text
ELSE ''::text
END AS gender,
cc.classification,
CASE
WHEN cfh.flagged_status = 1 THEN 'Yes'::text
ELSE 'No'::text
END AS reco_adoption_inquiry,
csh.admission_number,
CASE
WHEN csh.date_of_admission IS NULL THEN ''::text
ELSE to_char(csh.date_of_admission::timestamp with time zone, 'DD-MM-YYYY'::text)
END AS date_of_admission,
cg.name AS guardian_name,
mdr.name AS guardian_relation,
CASE
WHEN fv.most_recent_visit_date IS NULL THEN 'No Visits'::text
ELSE to_char(fv.most_recent_visit_date::timestamp with time zone, 'DD-MM-YYYY'::text)
END AS guardian_most_recent_visit,
CASE
WHEN cch.last_review_date IS NULL THEN ''::text
ELSE to_char(cch.last_review_date::timestamp with time zone, 'DD-MM-YYYY'::text)
END AS last_review_date,
CASE
WHEN ch.cwc_started_the_process_of_declaring IS NULL THEN ''::text
ELSE to_char(ch.cwc_started_the_process_of_declaring::timestamp with time zone, 'DD-MM-YYYY'::text)
END AS cwc_started_adoption_inquiry,
ch.cwc_order_number,
CASE
WHEN ch.date_declaring_child_free_for_adoption IS NULL THEN ''::text
ELSE to_char(ch.date_declaring_child_free_for_adoption::timestamp with time zone, 'DD-MM-YYYY'::text)
END AS date_declaring_child_free_for_adoption,
ch.remarks
FROM child_management_child ch
JOIN ( SELECT row_number() OVER (PARTITION BY child_management_childshelterhomerelation.child_id ORDER BY child_management_childshelterhomerelation.date_of_admission DESC, child_management_childshelterhomerelation.id DESC) AS shelter_num,
child_management_childshelterhomerelation.shelter_home_id,
child_management_childshelterhomerelation.child_id,
child_management_childshelterhomerelation.admission_number,
child_management_childshelterhomerelation.date_of_admission
FROM child_management_childshelterhomerelation
WHERE child_management_childshelterhomerelation.active = 2) csh ON csh.child_id = ch.id AND csh.shelter_num = 1
JOIN master_data_shelterhome sh ON sh.id = csh.shelter_home_id
JOIN master_data_district mdd ON mdd.id = sh.district_id
JOIN master_data_state mds ON mds.id = mdd.state_id
LEFT JOIN ( SELECT x1.child_id,
string_agg(x2.name::text, ', '::text) AS classification
FROM child_management_child_child_classification x1
JOIN master_data_childclassification x2 ON x1.childclassification_id = x2.id AND x2.active = 2
GROUP BY x1.child_id) cc ON ch.id = cc.child_id
LEFT JOIN ( SELECT child_management_guardian.child_id,
row_number() OVER (PARTITION BY child_management_guardian.child_id ORDER BY child_management_guardian.id DESC) AS guardian_num,
child_management_guardian.name,
child_management_guardian.relationship_id
FROM child_management_guardian) cg ON cg.child_id = ch.id AND cg.guardian_num = 1
LEFT JOIN master_data_relationship mdr ON mdr.id = cg.relationship_id
LEFT JOIN ( SELECT child_management_childcwchistory.child_id,
max(child_management_childcwchistory.last_date_of_cwc_order_or_review) AS last_review_date
FROM child_management_childcwchistory
GROUP BY child_management_childcwchistory.child_id) cch ON cch.child_id = ch.id
LEFT JOIN ( SELECT child_management_childflaggedhistory.child_id,
row_number() OVER (PARTITION BY child_management_childflaggedhistory.child_id ORDER BY child_management_childflaggedhistory.flagged_date DESC, child_management_childflaggedhistory.id DESC) AS flagging_num,
child_management_childflaggedhistory.flagged_status
FROM child_management_childflaggedhistory
WHERE child_management_childflaggedhistory.active = 2) cfh ON cfh.child_id = ch.id AND cfh.flagging_num = 1
LEFT JOIN ( SELECT child_management_familyvisit.child_id,
max(child_management_familyvisit.date_of_visit) AS most_recent_visit_date
FROM child_management_familyvisit
GROUP BY child_management_familyvisit.child_id) fv ON ch.id = fv.child_id"""),
migrations.RunSQL('drop view if exists rep_child_details_view'),
migrations.RunSQL("""create or replace view rep_child_details_view as
SELECT concat(COALESCE(ch.first_name, ''::character varying), ' ', COALESCE(ch.middle_name, ' '::character varying), ' ', COALESCE(ch.last_name, ''::character varying)) AS child_name,
ch.case_number,
CASE
WHEN ch.dob IS NULL THEN ''::text
ELSE to_char(ch.dob::timestamp with time zone, 'DD-MM-YYYY'::text)
END AS dob,
date_part('year'::text, age(now()::timestamp without time zone, ch.dob::timestamp without time zone)) AS age_year,
date_part('month'::text, age(now()::timestamp without time zone, ch.dob::timestamp without time zone)) AS age_plus_months,
CASE
WHEN ch.sex = 1 THEN 'Male'::text
WHEN ch.sex = 2 THEN 'Female'::text
WHEN ch.sex = 3 THEN 'Transgender'::text
WHEN ch.sex = 4 THEN 'Intersex'::text
WHEN ch.sex = 5 THEN 'Other'::text
ELSE ''::text
END AS gender,
CASE
WHEN fh.flagged_date IS NULL THEN 'NA'::text
ELSE to_char(fh.flagged_date::timestamp with time zone, 'DD-MM-YYYY'::text)
END AS date_flagged_for_adpotion_inquiry,
CASE
WHEN fh.flagged_date IS NULL THEN '-1'::integer::double precision
ELSE date_part('year'::text, age(now()::timestamp without time zone, fh.flagged_date::timestamp without time zone))
END AS adoption_inquiry_pending_years,
CASE
WHEN fh.flagged_date IS NULL THEN '-1'::integer::double precision
ELSE date_part('month'::text, age(now()::timestamp without time zone, fh.flagged_date::timestamp without time zone))
END AS adoption_inquiry_pending_months,
CASE
WHEN fv.most_recent_visit_date IS NOT NULL THEN to_char(fv.most_recent_visit_date::timestamp with time zone, 'DD-MM-YYYY'::text)
ELSE 'No Family Visits'::text
END AS last_family_visit,
CASE
WHEN cg.child_id IS NULL THEN 'No'::text
ELSE 'Yes'::text
END AS guardian_listed,
cc.classification,
CASE
WHEN cs.stay_in_months IS NULL THEN 'NA'::text
WHEN cs.stay_in_months = 0::double precision AND cs.additional_days < 30::double precision THEN '< 1 month'::text
ELSE ((
CASE
WHEN (cs.stay_in_months + (cs.additional_days / 30::double precision)::integer::double precision) >= 12::double precision AND (cs.stay_in_months + (cs.additional_days / 30::double precision)::integer::double precision) < 24::double precision THEN floor(((cs.stay_in_months + floor(cs.additional_days / 30::double precision)::numeric::integer::double precision) / 12::double precision)::numeric) || ' year and '::text
WHEN (cs.stay_in_months + (cs.additional_days / 30::double precision)::integer::double precision) >= 24::double precision THEN floor(((cs.stay_in_months + floor((cs.additional_days / 30::double precision)::numeric)::integer::double precision) / 12::double precision)::numeric) || ' years and '::text
ELSE ''::text
END || ((cs.stay_in_months + floor((cs.additional_days / 30::double precision)::numeric)::integer::double precision)::integer % 12)) || ' month'::text) ||
CASE
WHEN ((cs.stay_in_months + floor((cs.additional_days / 30::double precision)::numeric)::integer::double precision)::integer % 12) > 1 THEN 's'::text
ELSE ''::text
END
END AS total_shelter_home_stay,
CASE
WHEN cr.num_months_last_review IS NULL THEN 'NA'::text
WHEN cr.num_months_last_review = 0::double precision THEN '< 1 month'::text
WHEN cr.num_months_last_review > 0::double precision THEN ((
CASE
WHEN cr.num_months_last_review >= 12::double precision AND cr.num_months_last_review < 24::double precision THEN floor((cr.num_months_last_review / 12::double precision)::numeric) || ' year and '::text
WHEN cr.num_months_last_review >= 24::double precision THEN floor((cr.num_months_last_review / 12::double precision)::numeric) || ' years and '::text
ELSE ''::text
END || (cr.num_months_last_review::integer % 12)) || ' month'::text) ||
CASE
WHEN (cr.num_months_last_review::integer % 12) > 1 THEN 's'::text
ELSE ''::text
END
ELSE NULL::text
END AS last_cwc_review_duration,
fh.flagging_reason,
CASE
WHEN csh.date_of_admission IS NULL THEN 'NA'::text
ELSE to_char(csh.date_of_admission::timestamp with time zone, 'DD-MM-YYYY'::text)
END AS date_of_admission,
csh.admission_number,
sh.name AS shelter_home_name,
sh.id AS shelter_home_id,
mdd.name AS district_name,
mdd.id AS district_id,
mds.name AS state_name,
mds.id AS state_id,
ch.remarks
FROM child_management_child ch
JOIN ( SELECT row_number() OVER (PARTITION BY child_management_childshelterhomerelation.child_id ORDER BY child_management_childshelterhomerelation.date_of_admission DESC, child_management_childshelterhomerelation.id DESC) AS shelter_num,
child_management_childshelterhomerelation.shelter_home_id,
child_management_childshelterhomerelation.child_id,
child_management_childshelterhomerelation.admission_number,
child_management_childshelterhomerelation.date_of_admission
FROM child_management_childshelterhomerelation
WHERE child_management_childshelterhomerelation.active = 2) csh ON csh.child_id = ch.id AND csh.shelter_num = 1
JOIN master_data_shelterhome sh ON sh.id = csh.shelter_home_id
JOIN master_data_district mdd ON mdd.id = sh.district_id
JOIN master_data_state mds ON mds.id = mdd.state_id
LEFT JOIN ( SELECT child_management_familyvisit.child_id,
max(child_management_familyvisit.date_of_visit) AS most_recent_visit_date
FROM child_management_familyvisit
GROUP BY child_management_familyvisit.child_id) fv ON ch.id = fv.child_id
LEFT JOIN ( SELECT DISTINCT child_management_guardian.child_id
FROM child_management_guardian
WHERE child_management_guardian.active = 2) cg ON ch.id = cg.child_id
LEFT JOIN ( SELECT x1.child_id,
string_agg(x2.name::text, ', '::text) AS classification
FROM child_management_child_child_classification x1
JOIN master_data_childclassification x2 ON x1.childclassification_id = x2.id AND x2.active = 2
GROUP BY x1.child_id) cc ON ch.id = cc.child_id
LEFT JOIN ( SELECT dash_child_cci_stay_view.child_id,
COALESCE(sum(dash_child_cci_stay_view.stay_in_months), 0::double precision) AS stay_in_months,
COALESCE(sum(dash_child_cci_stay_view.additional_days), 0::double precision) AS additional_days
FROM dash_child_cci_stay_view
GROUP BY dash_child_cci_stay_view.child_id) cs ON cs.child_id = ch.id
LEFT JOIN dash_child_days_lastreview_view cr ON cr.child_id = ch.id
LEFT JOIN ( SELECT child_management_childflaggedhistory.child_id,
row_number() OVER (PARTITION BY child_management_childflaggedhistory.child_id ORDER BY child_management_childflaggedhistory.flagged_date DESC, child_management_childflaggedhistory.id DESC) AS flag_num,
child_management_childflaggedhistory.reason_for_flagging AS flagging_reason,
child_management_childflaggedhistory.flagged_date,
child_management_childflaggedhistory.flagged_status
FROM child_management_childflaggedhistory) fh ON fh.child_id = ch.id AND fh.flag_num = 1
WHERE fh.flagged_status = 1"""),
]
| 89.909091
| 464
| 0.470627
| 1,821
| 19,780
| 4.844042
| 0.102142
| 0.100329
| 0.079583
| 0.023807
| 0.828024
| 0.7905
| 0.710804
| 0.655708
| 0.611155
| 0.572724
| 0
| 0.012018
| 0.474166
| 19,780
| 219
| 465
| 90.319635
| 0.836073
| 0.002275
| 0
| 0.516432
| 1
| 0.13615
| 0.985811
| 0.254852
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.004695
| 0
| 0.018779
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
770cc3692193f9bef84a8136c095505733e2de22
| 131
|
py
|
Python
|
cellx/utils.py
|
nthndy/cellx
|
56a22099beeba59401d6882b6d6b0010718c0376
|
[
"MIT"
] | 3
|
2020-10-26T12:24:49.000Z
|
2021-08-09T18:29:48.000Z
|
cellx/utils.py
|
nthndy/cellx
|
56a22099beeba59401d6882b6d6b0010718c0376
|
[
"MIT"
] | 36
|
2020-10-26T12:21:17.000Z
|
2022-03-11T09:20:51.000Z
|
cellx/utils.py
|
nthndy/cellx
|
56a22099beeba59401d6882b6d6b0010718c0376
|
[
"MIT"
] | 6
|
2020-07-27T21:33:55.000Z
|
2021-03-15T17:17:21.000Z
|
import enum
class CallableEnum(enum.Enum):
"""CallableEnum class"""
def __call__(self, x):
return self.value(x)
| 14.555556
| 30
| 0.641221
| 16
| 131
| 5
| 0.625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.229008
| 131
| 8
| 31
| 16.375
| 0.792079
| 0.137405
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
77207882b4cbd81ede56905cc1825dd5ae33ea93
| 167
|
py
|
Python
|
tests/data/test.py
|
kokkonisd/locstats
|
6efe924d254a6257ab0bc9a5ed9d7b573e30f570
|
[
"MIT"
] | 5
|
2019-09-07T21:27:30.000Z
|
2022-02-06T18:01:05.000Z
|
locstats/tests/dummy_data/test.py
|
thanasispe/locstats
|
91ca3cce69810bbd6ed2a882a96f13f6c09fce8f
|
[
"MIT"
] | 12
|
2019-08-21T10:33:30.000Z
|
2021-12-09T22:49:23.000Z
|
locstats/tests/dummy_data/test.py
|
thanasispe/locstats
|
91ca3cce69810bbd6ed2a882a96f13f6c09fce8f
|
[
"MIT"
] | 5
|
2019-08-22T00:17:42.000Z
|
2022-02-06T18:03:39.000Z
|
#!/usr/bin/env python3
'''
This is a
multiline
comment
'''
print("This is some dummy code") # Hi # This shouldn't count as another comment '''neither should this'''
| 16.7
| 105
| 0.688623
| 26
| 167
| 4.423077
| 0.807692
| 0.104348
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007194
| 0.167665
| 167
| 9
| 106
| 18.555556
| 0.820144
| 0.712575
| 0
| 0
| 0
| 0
| 0.605263
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
621e678b97fbd14b932d3173e097005296018e1b
| 84
|
py
|
Python
|
python/testData/refactoring/move/importSlash/after/src/tmp.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/refactoring/move/importSlash/after/src/tmp.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/refactoring/move/importSlash/after/src/tmp.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
from file1 import function_1
from file2 import function_2
function_1()
function_2()
| 16.8
| 28
| 0.833333
| 14
| 84
| 4.714286
| 0.5
| 0.424242
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081081
| 0.119048
| 84
| 5
| 29
| 16.8
| 0.810811
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
626a5007a75b352c971b8def571b144a1b3fa23c
| 804
|
py
|
Python
|
pymc4/distributions/tensorflow/transforms.py
|
byblian/pymc4
|
5de890ed7f22de878eb48c92d3e9b8fe87c25e61
|
[
"Apache-2.0"
] | null | null | null |
pymc4/distributions/tensorflow/transforms.py
|
byblian/pymc4
|
5de890ed7f22de878eb48c92d3e9b8fe87c25e61
|
[
"Apache-2.0"
] | null | null | null |
pymc4/distributions/tensorflow/transforms.py
|
byblian/pymc4
|
5de890ed7f22de878eb48c92d3e9b8fe87c25e61
|
[
"Apache-2.0"
] | null | null | null |
from pymc4.distributions import abstract
from tensorflow_probability import bijectors as tfb
__all__ = ["Log"]
class Log(abstract.transforms.Log):
def __init__(self):
# NOTE: We actually need the inverse to match PyMC3, do we?
self._backend_transform = tfb.Exp()
def forward(self, x):
return self._backend_transform.inverse(x)
def inverse(self, z):
return self._backend_transform.forward(z)
def forward_log_det_jacobian(self, x):
return self._backend_transform.inverse_log_det_jacobian(
x, self._backend_transform.inverse_min_event_ndims
)
def inverse_log_det_jacobian(self, z):
return self._backend_transform.forward_log_det_jacobian(
z, self._backend_transform.forward_min_event_ndims
)
| 29.777778
| 67
| 0.708955
| 104
| 804
| 5.086538
| 0.375
| 0.145558
| 0.26465
| 0.196597
| 0.287335
| 0.287335
| 0.287335
| 0
| 0
| 0
| 0
| 0.00317
| 0.215174
| 804
| 26
| 68
| 30.923077
| 0.835182
| 0.070896
| 0
| 0
| 0
| 0
| 0.004027
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.277778
| false
| 0
| 0.111111
| 0.222222
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
6564e4f23d5fbedc9143b410281a0f450654d5b0
| 65
|
py
|
Python
|
test.py
|
james-salafatinos/finviz_news_scraper
|
7fa60e3a03f0fe7b5d10ee61fbb09875da7e23ae
|
[
"MIT"
] | 5
|
2020-12-12T15:46:14.000Z
|
2021-11-15T09:54:40.000Z
|
test.py
|
james-salafatinos/finviz_news_scraper
|
7fa60e3a03f0fe7b5d10ee61fbb09875da7e23ae
|
[
"MIT"
] | null | null | null |
test.py
|
james-salafatinos/finviz_news_scraper
|
7fa60e3a03f0fe7b5d10ee61fbb09875da7e23ae
|
[
"MIT"
] | null | null | null |
import pandas as pd
print(pd.read_pickle('data/obj/2020-11-22'))
| 21.666667
| 44
| 0.753846
| 13
| 65
| 3.692308
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 0.076923
| 65
| 3
| 44
| 21.666667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0.287879
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
657a6a667e87ea211f7947225f22ef3730eeecdd
| 43
|
py
|
Python
|
chapter-06/sample002.py
|
krastin/pp-cs3.0
|
502be9aac2d84215db176864e443c219e5e26591
|
[
"MIT"
] | null | null | null |
chapter-06/sample002.py
|
krastin/pp-cs3.0
|
502be9aac2d84215db176864e443c219e5e26591
|
[
"MIT"
] | null | null | null |
chapter-06/sample002.py
|
krastin/pp-cs3.0
|
502be9aac2d84215db176864e443c219e5e26591
|
[
"MIT"
] | null | null | null |
print(dir(__builtins__))
help(__builtins__)
| 21.5
| 24
| 0.837209
| 5
| 43
| 5.6
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023256
| 43
| 2
| 25
| 21.5
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
659128e3f4502196d0d87a44be7052d614dd035f
| 145
|
py
|
Python
|
checkov/terraform/tag_providers/azure.py
|
antonblr/checkov
|
9415c6593c537945c08f7a19f28bdd8b96966f67
|
[
"Apache-2.0"
] | 3
|
2021-04-19T17:17:21.000Z
|
2021-09-06T06:31:09.000Z
|
checkov/terraform/tag_providers/azure.py
|
antonblr/checkov
|
9415c6593c537945c08f7a19f28bdd8b96966f67
|
[
"Apache-2.0"
] | 16
|
2021-03-09T07:38:38.000Z
|
2021-06-09T03:53:55.000Z
|
checkov/terraform/tag_providers/azure.py
|
antonblr/checkov
|
9415c6593c537945c08f7a19f28bdd8b96966f67
|
[
"Apache-2.0"
] | 1
|
2021-03-07T07:23:39.000Z
|
2021-03-07T07:23:39.000Z
|
from checkov.common.util.type_forcers import force_dict
def get_resource_tags(entity_config):
return force_dict(entity_config.get('tags'))
| 24.166667
| 55
| 0.813793
| 22
| 145
| 5.045455
| 0.727273
| 0.162162
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096552
| 145
| 5
| 56
| 29
| 0.847328
| 0
| 0
| 0
| 0
| 0
| 0.027586
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
|
0
| 5
|
659f3214377154e8f5948452ff384f7d68e26431
| 129
|
py
|
Python
|
models/__init__.py
|
cjliux/mdst.c2f
|
5617624b25ddaa11ffbc07401d3fe0276ca220d5
|
[
"BSD-3-Clause"
] | 2
|
2020-07-17T12:12:35.000Z
|
2020-09-12T14:28:55.000Z
|
models/__init__.py
|
cjliux/mdst.c2f
|
5617624b25ddaa11ffbc07401d3fe0276ca220d5
|
[
"BSD-3-Clause"
] | null | null | null |
models/__init__.py
|
cjliux/mdst.c2f
|
5617624b25ddaa11ffbc07401d3fe0276ca220d5
|
[
"BSD-3-Clause"
] | null | null | null |
from .AutoBase import AutoBase
from .TRADE import TRADE
from .C2F_A import C2F_A
from .C2F_A2 import C2F_A2
from .ONT import ONT
| 21.5
| 30
| 0.806202
| 24
| 129
| 4.166667
| 0.333333
| 0.14
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.055046
| 0.155039
| 129
| 5
| 31
| 25.8
| 0.862385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
65e3d64eb173efee0ca82d08ff3c01c05bd631c2
| 54
|
py
|
Python
|
pytaxa/examples/__init__.py
|
sckott/pytaxa
|
ea9f47dfbb3bf5bba53d82eb2bc7116051af87fb
|
[
"MIT"
] | 9
|
2018-06-14T23:32:01.000Z
|
2019-09-29T00:42:59.000Z
|
pytaxa/examples/__init__.py
|
sckott/pytaxa
|
ea9f47dfbb3bf5bba53d82eb2bc7116051af87fb
|
[
"MIT"
] | 16
|
2018-06-26T21:43:30.000Z
|
2018-07-07T01:18:04.000Z
|
pytaxa/examples/__init__.py
|
sckott/pytaxa
|
ea9f47dfbb3bf5bba53d82eb2bc7116051af87fb
|
[
"MIT"
] | 1
|
2018-08-05T21:49:11.000Z
|
2018-08-05T21:49:11.000Z
|
# -*- coding: utf-8 -*-
from .eg import eg_hierarchy
| 13.5
| 28
| 0.62963
| 8
| 54
| 4.125
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022727
| 0.185185
| 54
| 3
| 29
| 18
| 0.727273
| 0.388889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
65e49c157df7ce4f9fbd9ff60ee8e632ffa259a4
| 302
|
py
|
Python
|
dev/Gems/CloudGemDefectReporter/v1/AWS/common-code/Python/requests_oauthlib/compliance_fixes/__init__.py
|
jeikabu/lumberyard
|
07228c605ce16cbf5aaa209a94a3cb9d6c1a4115
|
[
"AML"
] | 1,738
|
2017-09-21T10:59:12.000Z
|
2022-03-31T21:05:46.000Z
|
dev/Gems/CloudGemDefectReporter/v1/AWS/common-code/Python/requests_oauthlib/compliance_fixes/__init__.py
|
jeikabu/lumberyard
|
07228c605ce16cbf5aaa209a94a3cb9d6c1a4115
|
[
"AML"
] | 427
|
2017-09-29T22:54:36.000Z
|
2022-02-15T19:26:50.000Z
|
dev/Gems/CloudGemDefectReporter/v1/AWS/common-code/Python/requests_oauthlib/compliance_fixes/__init__.py
|
jeikabu/lumberyard
|
07228c605ce16cbf5aaa209a94a3cb9d6c1a4115
|
[
"AML"
] | 671
|
2017-09-21T08:04:01.000Z
|
2022-03-29T14:30:07.000Z
|
from __future__ import absolute_import
from .facebook import facebook_compliance_fix
from .fitbit import fitbit_compliance_fix
from .linkedin import linkedin_compliance_fix
from .slack import slack_compliance_fix
from .mailchimp import mailchimp_compliance_fix
from .weibo import weibo_compliance_fix
| 33.555556
| 47
| 0.884106
| 41
| 302
| 6.097561
| 0.292683
| 0.312
| 0.34
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096026
| 302
| 8
| 48
| 37.75
| 0.915751
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
0297484b67cabdff00f90fd53883f6fbf711b62a
| 399
|
py
|
Python
|
code/models/__init__.py
|
ShawnCheung/Attention-depth
|
e461f3b107e34ff5740aacfd7c7d7baa6f5e9312
|
[
"MIT"
] | 87
|
2019-01-30T03:06:24.000Z
|
2022-03-30T06:36:49.000Z
|
code/models/__init__.py
|
ShawnCheung/Attention-depth
|
e461f3b107e34ff5740aacfd7c7d7baa6f5e9312
|
[
"MIT"
] | 6
|
2019-02-22T08:58:32.000Z
|
2021-05-21T09:28:13.000Z
|
code/models/__init__.py
|
ShawnCheung/Attention-depth
|
e461f3b107e34ff5740aacfd7c7d7baa6f5e9312
|
[
"MIT"
] | 17
|
2019-02-18T08:49:34.000Z
|
2022-01-31T10:30:58.000Z
|
from .model import ResNet
from .sadecoder import SADecoder
from .losses import OrdinalRegression2d, CrossEntropy2d, OhemCrossEntropy2d, AttentionLoss2d
from .get_network import create_network
from .get_lossfunc import create_lossfunc
__all__ = ['ResNet', 'SADecoder', 'create_network', 'create_lossfunc',
'OrdinalRegression2d', 'CrossEntropy2d', 'OhemCrossEntropy2d', 'AttentionLoss2d']
| 44.333333
| 92
| 0.802005
| 38
| 399
| 8.157895
| 0.394737
| 0.212903
| 0.329032
| 0.425806
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022599
| 0.112782
| 399
| 8
| 93
| 49.875
| 0.853107
| 0
| 0
| 0
| 0
| 0
| 0.275689
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.714286
| 0
| 0.714286
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
02cc0675aef3a40a273586d92e3b45bda5dff9c7
| 87,974
|
py
|
Python
|
tools/nntool/quantization/symmetric/kernels/rnn.py
|
00-01/gap_sdk
|
25444d752b26ccf0b848301c381692d77172852c
|
[
"Apache-2.0"
] | 118
|
2018-05-22T08:45:59.000Z
|
2022-03-30T07:00:45.000Z
|
tools/nntool/quantization/symmetric/kernels/rnn.py
|
00-01/gap_sdk
|
25444d752b26ccf0b848301c381692d77172852c
|
[
"Apache-2.0"
] | 213
|
2018-07-25T02:37:32.000Z
|
2022-03-30T18:04:01.000Z
|
tools/nntool/quantization/symmetric/kernels/rnn.py
|
00-01/gap_sdk
|
25444d752b26ccf0b848301c381692d77172852c
|
[
"Apache-2.0"
] | 76
|
2018-07-04T08:19:27.000Z
|
2022-03-24T09:58:05.000Z
|
# Copyright (C) 2020 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import logging
import math
from typing import Mapping
import numpy as np
from graph.types import LSTMParameters, RNNParameters
from graph.types.rnn import GRUParameters
from quantization.kernels.kernel_base import KernelBase, params_type, qrec_type
from quantization.new_qrec import QRec
from quantization.qtype import QType
from utils.at_norm import at_norm
from utils.diag_collector import DiagCollector
from utils.sigmoid_tanh_lut import sigmoid_lut, tanh_lut
LOG = logging.getLogger("nntool." + __name__)
# for debugging this can be switched to np.in64
INT_DTYPE = np.int32
# Another TANH and SIGMOID approx -> less precise
# def exp_taylor_quant(x, qtype, order='third'):
# ONE_OVER_3 = qtype.quantize(np.array([1.0 / 3.0]))
# ONE = qtype.quantize(np.array([1]))
# x2 = (x.astype(np.int32)*x) >> qtype.q
# x3 = (x2*x) >> qtype.q
# if order == 'third':
# x3_over_6_plus_x2_over_2 = (((x3 * ONE_OVER_3) >> qtype.q) + x2) >> 1
# return ONE + ((ONE * (x + x3_over_6_plus_x2_over_2)) >> qtype.q)
# x4 = (x3*x) >> qtype.q
# if order == 'fourth':
# x4_over_4 = x4>>2
# x4_over_24_plus_x3_over_6_plus_x2_over_2 = ((((x4_over_4 + x3) * ONE_OVER_3) >> qtype.q) + x2) >> 1
# return ONE + ((ONE * (x + x4_over_24_plus_x3_over_6_plus_x2_over_2)) >> qtype.q)
# def quant_tanh(x, qtype, k=3):
# K = qtype.quantize(np.array([k])).astype(np.int32)
# ONE = qtype.quantize(np.array([1])).astype(np.int32)
# result_neg = ((ONE-exp_taylor_quant(-2*x, qtype).astype(np.int32)).astype(np.int32)<<qtype.q)//(ONE+exp_taylor_quant(-2*x, qtype))
# result_pos = ((ONE-exp_taylor_quant(2*x, qtype).astype(np.int32)).astype(np.int32)<<qtype.q)//(ONE+exp_taylor_quant(2*x, qtype))
# return np.where(x<(-K), -ONE, np.where(x>K, ONE, np.where(x<0, result_neg, -result_pos)))
# def quant_sigmoid(x, qtype):
# ONE = qtype.quantize(np.array([1])).astype(np.int32)
# return np.where(x>0, (exp_taylor_quant(x, qtype) << qtype.q) // (ONE + exp_taylor_quant(x, qtype)),
# (ONE << qtype.q) // (ONE + exp_taylor_quant(-x, qtype)))
def abs_clip(arr: np.ndarray, abs_limit):
return np.clip(arr, -abs_limit, abs_limit)
def relu(x, qtype):
del qtype
return np.minimum(x, 0)
def sigmoid(x, qtype):
x = qtype.dequantize(x)
pos_mask = (x >= 0)
neg_mask = (x < 0)
z = np.zeros_like(x)
z[pos_mask] = np.exp(-x[pos_mask])
z[neg_mask] = np.exp(x[neg_mask])
top = np.ones_like(x)
top[neg_mask] = z[neg_mask]
return qtype.quantize(top / (1 + z))
def hsigmoid(x, qtype):
x = x.astype(np.int32)
relued = np.maximum(0,
np.minimum(qtype.quantize(np.array([3])) + x,
qtype.quantize(np.array([6]))))
relued *= qtype.quantize(np.array(1/6))
relued += (1 << (qtype.q - 1))
relued >>= qtype.q
return relued
def mean_stddev_normalization(arr: np.ndarray):
mean = np.mean(arr)
variance = np.sum(np.square(arr - mean)) / arr.size()
stddev_inv = 1.0 / np.sqrt(variance + 1e-8)
return (arr - mean) * stddev_inv
def htanh(x, qtype):
return np.minimum(
np.maximum(x, qtype.quantize(np.array([-1]))),
qtype.quantize(np.array([1])))
def tanh(x, qtype):
return qtype.quantize(np.tanh(qtype.dequantize(x)))
def clip_and_execute(act_fn):
def fn(val, qtype):
return act_fn(np.clip(val, -math.pow(2, 17), math.pow(2, 17)-1).astype(np.int32), qtype)
return fn
def get_activation(name, use_hard):
if name == 'relu':
return relu
if name == 'sigmoid':
return hsigmoid if use_hard else clip_and_execute(sigmoid_lut)
if name == 'tanh':
return htanh if use_hard else clip_and_execute(tanh_lut)
raise NotImplementedError("This activation is not implemented")
class RnnSymmetricMixin():
@classmethod
def execute(cls, params,
in_tensors,
qrec: QRec,
**kwargs):
del kwargs
in_tensor = qrec.prepare_inputs(
params, in_tensors, ktype="symmetric")[0]
args = {params.INPUT_NAMES[idx]: [in_tensors[idx], qrec.in_qs[idx]]
for idx in range(1, len(in_tensors))}
if params.revert:
in_tensor = np.flip(in_tensor, axis=0)
assert in_tensor.shape[0] == params.n_input_cells, "input shape incorrect - n_input_cells"
assert in_tensor.shape[1] == params.n_inputs, "input shape incorrect - n_inputs"
out_tensor = np.zeros(
[params.n_output_cells, params.n_states], dtype=qrec.out_qs[0].dtype)
out_idx = 0
new_c_state = None
for idx in range(params.n_cells):
if isinstance(params, LSTMParameters):
res, new_c_state = cls.step_kernel(
params, args, idx, in_tensor, qrec)
else:
res = cls.step_kernel(params, args, idx, in_tensor, qrec)
if idx >= (params.n_cells - params.n_output_cells):
out_tensor[out_idx] = res
out_idx += 1
if params.revert:
out_tensor = np.flip(out_tensor, axis=0)
if params.output_directions:
out_tensor = np.expand_dims(out_tensor, 0)
if new_c_state is not None:
return [out_tensor, new_c_state]
return [out_tensor]
def scale_rnn_input(qrec: QRec,
weighted_input_tensor: np.ndarray,
axis: int,
key='i_2_a_q'):
# For AT model creation this should not be set. This is just for simulation
# i.e. input scale == state scale == output scale
# scale input_scale * input_weights to state_scale * recurrent_weights_scale
weighted_input_tensor = weighted_input_tensor.astype(np.int32)
return qrec.cache[key].apply_scales(weighted_input_tensor, axis)
def scale_rnn_output(qrec,
state_tensor: np.ndarray,
axis: int):
o_q = qrec.out_qs[0]
# scale state_scale to output_scale
return o_q.clip(qrec.cache['s_2_o_q'].apply_scales(state_tensor, axis))
def scale_rnn_state(qrec,
state_tensor: np.ndarray,
axis: int):
# scale state_scale * recurrent_weights_scale to internal_scale
return qrec.cache['s_2_s_q'].apply_scales(state_tensor, axis)
def weights_zp(weights, zp):
return -np.sum(weights * zp, axis=1)
@params_type(RNNParameters)
@qrec_type('scaled')
class RNNSymmetric(RnnSymmetricMixin, KernelBase):
@classmethod
def step_kernel(cls, params: GRUParameters,
args: Mapping[str, np.ndarray],
idx: int,
input_tensor: np.ndarray,
qrec):
if args['i_state'][1].dtype == np.uint8:
return cls.step_kernelu8_u8(params, args, idx, input_tensor, qrec)
if args['i_state'][1].dtype == np.uint16:
return cls.step_kernelu16_u8(params, args, idx, input_tensor, qrec)
if args['i_state'][1].dtype == np.int16:
return cls.step_kernel16_8(params, args, idx, input_tensor, qrec)
return cls.step_kernel8_8(params, args, idx, input_tensor, qrec)
@classmethod
def step_kernelu8_u8(cls, params: GRUParameters,
args: Mapping[str, np.ndarray],
idx: int,
input_tensor: np.ndarray,
qrec):
scales = qrec.cache['scales']
# For each cell: compute input_weight * input if there is an input
if idx < params.n_input_cells:
# calculate weights zero point * input
inp_weights = args['i_2_i_w'][0].astype(INT_DTYPE)
in_t = input_tensor[idx].astype(INT_DTYPE)
input_gate_scratch = - \
np.sum(in_t * args['i_2_i_w'][1].zero_point.astype(INT_DTYPE))
# now calculate gate
input_gate_scratch = input_gate_scratch + inp_weights.dot(in_t)
DiagCollector.record(
'input_in_inputscale', input_gate_scratch, scale=scales['inp_before_scale'], node=params)
input_gate_scratch = input_gate_scratch * \
qrec.cache['i_2_s_q'].qbiases
input_gate_scratch = input_gate_scratch + \
args['i_b'][1].attr.interleaved_values[0]
input_gate_scratch = input_gate_scratch >> qrec.cache['i_2_s_q'].qnorms
DiagCollector.record(
'input_in_statescale', input_gate_scratch, scale=scales['inp_after_scale'], node=params)
# state * state weights
DiagCollector.record(
'state', args['i_state'][0], scale=None, node=params)
DiagCollector.record(
'state_weights', args['r_2_i_w'][0], scale=None, node=params)
state_weights = args['r_2_i_w'][0].astype(INT_DTYPE)
state_t = args['i_state'][0].astype(INT_DTYPE)
# input_gate_scratch is streamed in subtract calculate weights zero point * state
input_gate_scratch_state = input_gate_scratch - \
np.sum(state_t * args['r_2_i_w'][1].zero_point.astype(INT_DTYPE))
# Now calculate gate
input_gate_scratch_state += state_weights.dot(
args['i_state'][0].astype(INT_DTYPE))
DiagCollector.record(
'h_state_post_streamin', input_gate_scratch_state, scale=scales['inp_after_scale'], node=params)
# scale to state scale
input_gate_scratch = input_gate_scratch_state * \
qrec.cache['s_2_s_q'].qbiases
# biases are added before norm - this includes the state zero point offset
input_gate_scratch += args['i_b'][0]
input_gate_scratch = input_gate_scratch >> qrec.cache['s_2_s_q'].qnorms
DiagCollector.record(
'h_state_preact', input_gate_scratch, scale=scales['act_input_scale'], node=params)
# apply activation at state scale
input_gate_scratch = get_activation(params.activation, params.hard_act)(
input_gate_scratch, qrec.cache['act_qtype'])
DiagCollector.record(
'h_state_prescale', input_gate_scratch, scale=scales['int_scale'], node=params)
# scale the state scale to the output scale
o_q = qrec.out_qs[0]
# scale state_scale to output_scale
output_gate_scratch = np.maximum(np.minimum(
qrec.cache['s_2_o_q'].apply_scales(input_gate_scratch, 0), 127), -128)
output_gate_scratch = output_gate_scratch.astype(
np.uint8) + o_q.zero_point.astype(np.uint8)
DiagCollector.record(
'h_state_out', output_gate_scratch, scale=scales['out_scale'], zero_point=o_q.zero_point.astype(np.uint8), node=params)
# store the state
args['i_state'][0] = output_gate_scratch.copy()
return output_gate_scratch
@classmethod
def step_kernelu16_u8(cls, params: GRUParameters,
args: Mapping[str, np.ndarray],
idx: int,
input_tensor: np.ndarray,
qrec):
scales = qrec.cache['scales']
# For each cell: compute input_weight * input if there is an input
if idx < params.n_input_cells:
# scale result to recurrent_weight * input_state scale
inp_weights = args['i_2_i_w'][0].astype(INT_DTYPE)
in_t = input_tensor[idx].astype(INT_DTYPE)
# i_zp_b contains the input zero_point offset
# the weights zp offset is calculated as in NE16
DiagCollector.record(
'input_biases', args['i_b'][1].attr.interleaved_values[0], scale=scales['inp_before_scale'], node=params)
input_gate_scratch = args['i_b'][1].attr.interleaved_values[0].astype(
INT_DTYPE) - np.sum(in_t * args['i_2_i_w'][1].zero_point.astype(INT_DTYPE))
DiagCollector.record(
'input_zero_correction', input_gate_scratch, scale=scales['inp_before_scale'], node=params)
input_gate_scratch += inp_weights.dot(in_t)
DiagCollector.record(
'input_in_inputscale', input_gate_scratch, scale=scales['inp_before_scale'], node=params)
input_gate_scratch = at_norm(
input_gate_scratch, qrec.cache['i_2_s_q'].pre_normalization)
input_gate_scratch = input_gate_scratch * \
qrec.cache['i_2_s_q'].qbiases
input_gate_scratch = at_norm(
input_gate_scratch, qrec.cache['i_2_s_q'].qnorms)
DiagCollector.record(
'input_preact', input_gate_scratch, scale=scales['act_input_scale'], node=params)
# For each cell: compute recurrent_weight * input_state
state_weights = args['r_2_i_w'][0].astype(INT_DTYPE)
DiagCollector.record(
'state_weights', args['r_2_i_w'][0], scale=None, node=params)
state_t = args['i_state'][0].astype(INT_DTYPE)
DiagCollector.record(
'state', state_t, scale=None, node=params)
# i_b contains the state zero_point offset + the combined bias in state*weights scale
# the weights zp offset is calculated as in NE16
DiagCollector.record(
'state_biases', args['i_b'][0], scale=scales['inp_after_scale'], node=params)
input_gate_scratch_state = args['i_b'][0] - np.sum(
state_t * args['r_2_i_w'][1].zero_point.astype(INT_DTYPE))
DiagCollector.record(
'state_zero_correction', input_gate_scratch_state, scale=scales['inp_after_scale'], node=params)
input_gate_scratch_state += state_weights.dot(state_t)
DiagCollector.record(
'state_prod', input_gate_scratch_state, scale=scales['inp_after_scale'], node=params)
# scale to state scale
input_gate_scratch_state = at_norm(
input_gate_scratch_state, qrec.cache['s_2_s_q'].pre_normalization)
input_gate_scratch_state = input_gate_scratch_state * \
qrec.cache['s_2_s_q'].qbiases
# biases are added before norm
input_gate_scratch_state = at_norm(
input_gate_scratch_state, qrec.cache['s_2_s_q'].qnorms)
DiagCollector.record(
'h_state_only_postscale', input_gate_scratch_state, scale=scales['act_input_scale'], node=params)
input_gate_scratch = input_gate_scratch+input_gate_scratch_state
DiagCollector.record(
'h_state_preact', input_gate_scratch, scale=scales['act_input_scale'], node=params)
# apply activation at state scale
input_gate_scratch = get_activation(params.activation, False)(
input_gate_scratch, args['i_state'][1])
DiagCollector.record(
'h_state_prescale', input_gate_scratch, scale=scales['int_scale'], node=params)
# scale the state scale to the output scale
o_q = qrec.out_qs[0]
# scale state_scale to output_scale - clip signed
output_gate_scratch = np.clip(qrec.cache['s_2_o_q'].apply_scales(
input_gate_scratch, 0), -32768, 32767)
# move to unsigned
output_gate_scratch = output_gate_scratch.astype(
np.uint16) + o_q.zero_point.astype(np.uint16)
DiagCollector.record(
'h_state_out', output_gate_scratch, scale=scales['out_scale'], zero_point=o_q.zero_point.astype(np.uint16), node=params)
# store the state
args['i_state'][0] = output_gate_scratch.copy()
return output_gate_scratch
@classmethod
def step_kernel8_8(cls, params: GRUParameters,
args: Mapping[str, np.ndarray],
idx: int,
input_tensor: np.ndarray,
qrec):
# These two sections could be combined by stacking the weights horizontally
# and the input and state vertically
scales = qrec.cache['scales']
# For each cell: compute input_weight * input if there is an input
if idx < params.n_input_cells:
# scale result to recurrent_weight * input_state scale
input_gate_scratch = scale_rnn_input(
qrec,
args['i_2_i_w'][0].astype(INT_DTYPE).dot(
input_tensor[idx].astype(INT_DTYPE)),
0)
# biases already in recurrent_weight * input_state scale
input_gate_scratch_state = args['i_b'][0].copy()
# For each cell: compute recurrent_weight * input_state
input_gate_scratch_state += args['r_2_i_w'][0].astype(
INT_DTYPE).dot(args['i_state'][0].astype(INT_DTYPE))
# scale to state scale
input_gate_scratch = scale_rnn_state(qrec,
input_gate_scratch+input_gate_scratch_state,
0)
# apply activation at state scale
input_gate_scratch = get_activation(params.activation, params.hard_act)(
input_gate_scratch, args['i_state'][1])
DiagCollector.record(
'h_state_prescale', input_gate_scratch, scale=scales['int_scale'], node=params)
# scale the state scale to the output scale
output_gate_scratch = scale_rnn_output(qrec, input_gate_scratch, 0)
DiagCollector.record(
'h_state_out', output_gate_scratch, scale=scales['out_scale'], node=params)
# store the state
args['i_state'][0] = output_gate_scratch.copy()
return output_gate_scratch
@classmethod
def step_kernel16_8(cls, params: GRUParameters,
args: Mapping[str, np.ndarray],
idx: int,
input_tensor: np.ndarray,
qrec):
# These two sections could be combined by stacking the weights horizontally
# and the input and state vertically
scales = qrec.cache['scales']
# For each cell: compute input_weight * input if there is an input
if idx < params.n_input_cells:
# scale result to recurrent_weight * input_state scale
input_gate_scratch = scale_rnn_input(
qrec,
args['i_2_i_w'][0].astype(INT_DTYPE).dot(
input_tensor[idx].astype(INT_DTYPE)),
0)
# biases already in recurrent_weight * input_state scale
input_gate_scratch_state = args['i_b'][0].copy()
# For each cell: compute recurrent_weight * input_state
input_gate_scratch_state += args['r_2_i_w'][0].astype(
INT_DTYPE).dot(args['i_state'][0].astype(INT_DTYPE))
# scale to state scale
input_gate_scratch = input_gate_scratch + scale_rnn_state(qrec,
input_gate_scratch_state,
0)
# apply activation at state scale
input_gate_scratch = get_activation(params.activation, params.hard_act)(
input_gate_scratch, args['i_state'][1])
DiagCollector.record(
'h_state_prescale', input_gate_scratch, scale=scales['int_scale'], node=params)
# scale the state scale to the output scale
output_gate_scratch = qrec.out_qs[0].clip(input_gate_scratch)
DiagCollector.record(
'h_state_out', output_gate_scratch, scale=scales['out_scale'], node=params)
# store the state
args['i_state'][0] = output_gate_scratch.copy()
return output_gate_scratch
def scale_to(qrec,
var,
tensor: np.ndarray,
axis: int):
qtype = qrec.cache[var]
return qtype.apply_scales(tensor, axis)
def internal_qtype(qrec):
return qrec.cache.get('i_qtype') or QType(bits=8, q=7, signed=True)
def scale_gru_z_input2_z_HtxW(qrec, tensor: np.ndarray, axis: int):
return scale_to(qrec, 'i_2_z_WR_q', tensor, axis)
def scale_gru_r_input2_r_HtxW(qrec, tensor: np.ndarray, axis: int):
return scale_to(qrec, 'i_2_r_WR_q', tensor, axis)
def scale_gru_h_input2_h_HtxW(qrec, tensor: np.ndarray, axis: int):
return scale_to(qrec, 'i_2_h_WR_q', tensor, axis)
def scale_gru_z_internal(qrec, tensor: np.ndarray, axis: int):
return scale_to(qrec, 'z_WR_2_int_q', tensor, axis)
def scale_gru_r_internal(qrec, tensor: np.ndarray, axis: int):
return scale_to(qrec, 'r_WR_2_int_q', tensor, axis)
def scale_gru_h_internal(qrec, tensor: np.ndarray, axis: int):
return scale_to(qrec, 'h_WR_2_int_q', tensor, axis)
def clipshort(x):
return np.clip(x, -math.pow(2, 15), math.pow(2, 15) - 1).astype(np.int16)
def clipushort(x):
return np.clip(x, 0, math.pow(2, 16) - 1).astype(np.uint16)
@params_type(GRUParameters)
@qrec_type('scaled')
class GRUSymmetric(RnnSymmetricMixin, KernelBase):
@classmethod
def step_kernel(cls, params: GRUParameters,
args: Mapping[str, np.ndarray],
idx: int,
input_tensor: np.ndarray,
qrec):
if args['h_state'][1].dtype == np.uint8:
return cls.step_kernelu8_u8(params, args, idx, input_tensor, qrec)
if args['h_state'][1].dtype == np.uint16:
return cls.step_kernelu16_u8(params, args, idx, input_tensor, qrec)
if args['h_state'][1].dtype == np.int16:
return cls.step_kernel16_8(params, args, idx, input_tensor, qrec)
return cls.step_kernel8_8(params, args, idx, input_tensor, qrec)
@classmethod
def step_kernelu8_u8(cls, params: GRUParameters,
args: Mapping[str, np.ndarray],
idx: int,
input_tensor: np.ndarray,
qrec):
gate_scratch = {}
scales = qrec.cache['scales']
# TODO - set zero points
DiagCollector.record(
'h_state', args['h_state'][0], scale=scales['state'][0], node=params, zero_point=128)
DiagCollector.record(
'input', input_tensor[idx], scale=scales['in'][0], node=params, zero_point=qrec.in_qs[0].zero_point)
in_tensor = input_tensor[idx].astype(INT_DTYPE)
state_tensor = args['h_state'][0].astype(INT_DTYPE)
# for gate in ['z', 'h', 'r']:
# DiagCollector.record(f'{gate}_weigths', args[f'r_2_{gate}_w'][0],
# scale=args[f'r_2_{gate}_w'][1].scale,
# node=params,
# zero_point=args[f'r_2_{gate}_w'][1].zero_point)
if idx < params.n_input_cells:
for gate in ['z', 'r']:
# NE16 8 bit
gate_scratch[gate] = np.sum(in_tensor * -args[f'w_2_{gate}_w'][1].zero_point.astype(INT_DTYPE))
gate_scratch[gate] += args[f'w_2_{gate}_w'][0].astype(INT_DTYPE).dot(in_tensor)
# add zero offset bias + norm rounding in i_2_gate_q
# scales to r * r_w of gate
DiagCollector.record(f'{gate}_gate_inp_before_scale', gate_scratch[gate],
scale=scales['i'][gate], node=params)
gate_scratch[gate] = gate_scratch[gate] * \
qrec.cache[f'w_2_{gate}_q'].qbiases
gate_scratch[gate] = gate_scratch[gate] + \
args[f'{gate}_b'][1].attr.interleaved_values[0]
gate_scratch[gate] = gate_scratch[gate] >> qrec.cache[f'w_2_{gate}_q'].qnorms
DiagCollector.record(f'{gate}_gate_inp', gate_scratch[gate],
scale=scales['r'][gate], node=params)
for gate in ['z', 'h', 'r'] if params.linear_before_reset else ['z', 'r']:
# NE16 8 bit with streamin
# calculate gate on recurrent
# TODO - recurrent gate is not being properly calculated
if gate in gate_scratch:
gate_scratch[gate] += np.sum(
state_tensor * -args[f'r_2_{gate}_w'][1].zero_point.astype(INT_DTYPE))
else:
gate_scratch[gate] = np.sum(
state_tensor * -args[f'r_2_{gate}_w'][1].zero_point.astype(INT_DTYPE))
gate_scratch[gate] += args[f'r_2_{gate}_w'][0].astype(INT_DTYPE).dot(state_tensor)
# scales to Q12
prefix = 'r_' if gate == 'h' else ''
if gate in ['h']:
DiagCollector.record('h_gate_state_before_scale', gate_scratch[gate] + args[f'{prefix}{gate}_b'][0]/qrec.cache[f'r_2_{gate}_q'].qbiases,
scale=scales['r'][gate], node=params)
gate_scratch[gate] = gate_scratch[gate] * qrec.cache[f'r_2_{gate}_q'].qbiases
gate_scratch[gate] = gate_scratch[gate] + args[f'{prefix}{gate}_b'][0]
gate_scratch[gate] = gate_scratch[gate] >> qrec.cache[f'r_2_{gate}_q'].qnorms
if gate in ['h']:
DiagCollector.record('h_gate_state', gate_scratch[gate],
scale=scales['act_in'], node=params)
elif gate in ['z', 'r']:
DiagCollector.record(f'{gate}_gate', gate_scratch[gate],
scale=scales['act_in'], node=params)
# pipelined on other cores
gate_scratch[gate] = get_activation(params.activation_zr, params.hard_act)(
gate_scratch[gate], internal_qtype(qrec))
DiagCollector.record(f'{gate}_gate_sigmoid', gate_scratch[gate],
scale=scales['act_out'], node=params)
if params.linear_before_reset:
# haddamard on state after linear
# ht = g(Xt*(Wh^T) + (rt (.) (Ht-1*(Rh^T) + Rbh)) + Wbh) # when linear_before_reset != 0
# Q15 * Q3.12 >> 15 -> Q12
# r is guaranteed to be in Q15 with no overflow
# h (contains recurrent only) needs to be saturated to a Q3.12
gate_scratch['h'] = clipshort(gate_scratch['h']) * gate_scratch['r']
DiagCollector.record(
'hr_haddamard', gate_scratch['h'],
scale=scales['act_in'] * scales['act_out'],
node=params)
gate_scratch['h'] = at_norm(gate_scratch['h'], scales['act_out_q'])
DiagCollector.record(
'hr_haddamard_an', gate_scratch['h'],
scale=scales['act_in'],
node=params)
else:
# haddamard on state before linear
# r_gate_scratch = (rt (.) Ht-1)*(Rh^T) + Rbh + Wbh
# r is in Q15 signed. state is in Q7
# Clip and norm to 8 bit unsigned ready for NE16 input
# Could look at doing this in 16 bit on NE16 if accuracy is poor but then scaling will need to be
# manual, bias streamed in, etc.
# Needs r ready so do that first
gate_scratch['hs'] = np.clip(at_norm((state_tensor + 128).astype(np.int8) * gate_scratch['r'], 15) + 128, 0, 255).astype(np.uint8)
DiagCollector.record(
'hr_haddamard', gate_scratch['hs'],
scale=math.pow(2, -7),
zero_point=128,
node=params)
gate_scratch['h'] = np.sum(
gate_scratch['hs'] * -args['r_2_r_w'][1].zero_point.astype(INT_DTYPE))
gate_scratch['h'] += args['r_2_h_w'][0].astype(INT_DTYPE).dot(gate_scratch['hs'])
# scales to Q12
gate_scratch['h'] = gate_scratch['h'] * qrec.cache['r_2_h_q'].qbiases
gate_scratch['h'] = gate_scratch['h'] + args['r_h_b'][0]
gate_scratch['h'] = gate_scratch['h'] >> qrec.cache['r_2_h_q'].qnorms
DiagCollector.record(
'h_gate_state', gate_scratch['h'],
scale=scales['act_in'],
node=params)
if idx < params.n_input_cells:
# NE16 8 bit
gate_scratch['hi'] = np.sum(in_tensor * -args['w_2_h_w'][1].zero_point.astype(INT_DTYPE))
gate_scratch['hi'] += args['w_2_h_w'][0].astype(INT_DTYPE).dot(in_tensor)
# scale to Q12
gate_scratch['hi'] = gate_scratch['hi'] * \
qrec.cache['w_2_h_q'].qbiases
gate_scratch['hi'] = gate_scratch['hi'] + args['w_h_b'][0]
gate_scratch['hi'] = gate_scratch['hi'] >> qrec.cache['w_2_h_q'].qnorms
DiagCollector.record('h_gate_inp', gate_scratch['hi'],
scale=scales['act_in'], node=params)
gate_scratch['h'] += gate_scratch['hi']
else:
# Is this correct if there is no input (and below)? This is not a mode that
# exists in any framework and will not ever be used at present
gate_scratch['h'] += scale_to(qrec, 'w_2_h_q', args['w_h_b'][0], 0)
DiagCollector.record(
'h_gate', gate_scratch['h'],
scale=scales['act_in'],
node=params)
# scale to q15 or internal Q depending on activation type
gate_scratch['h'] = get_activation(params.activation, params.hard_act)(
gate_scratch['h'], internal_qtype(qrec))
DiagCollector.record('hr_gate_tanh', gate_scratch['h'],
scale=scales['act_out'], node=params)
# ----------- SCALE Q7 -----------
# Ht = (1 - zt) (.) ht + zt (.) Ht-1
# all parameters in Q15. Result in Q30
# >> and clip
# state must be in Q15 from Q7 unsigned symmetric zeropoint
# TODO - Is this shift correct? Q7 -> Q15
h_state = (state_tensor.astype(INT_DTYPE) - args['h_state'][1].zero_point) << (scales['act_out_q'] - 7)
DiagCollector.record('h_pre_ending', h_state,
scale=scales['act_out'],
node=params)
h_state = (((0x8000 - gate_scratch['z']) * gate_scratch['h']) +
(gate_scratch['z'] * h_state))
DiagCollector.record('h_state_out_prenorm', h_state,
scale=math.pow(2, -30),
node=params)
h_state = qrec.out_qs[0].clip(at_norm(h_state, 30-7) + qrec.out_qs[0].zero_point)
DiagCollector.record('h_state_out', h_state,
scale=math.pow(2, -7),
zero_point=128,
node=params)
args['h_state'][0] = h_state.copy()
return h_state
@classmethod
def step_kernelu16_u8(cls, params: GRUParameters,
args: Mapping[str, np.ndarray],
idx: int,
input_tensor: np.ndarray,
qrec):
input_scratch = {}
state_scratch = {}
scales = qrec.cache['scales']
# TODO - set zero points
DiagCollector.record(
'h_state', args['h_state'][0], scale=scales['state'][0], node=params, zero_point=0x8000)
DiagCollector.record(
'input', input_tensor[idx], scale=scales['in'][0], node=params, zero_point=qrec.in_qs[0].zero_point)
in_tensor = input_tensor[idx].astype(INT_DTYPE)
state_tensor = args['h_state'][0].astype(INT_DTYPE)
state_tensor_signed = (args['h_state'][0] + 0x8000).astype(np.int16).astype(np.int32)
# for gate in ['z', 'h', 'r']:
# DiagCollector.record(f'{gate}_weigths', args[f'r_2_{gate}_w'][0],
# scale=args[f'r_2_{gate}_w'][1].scale,
# node=params,
# zero_point=args[f'r_2_{gate}_w'][1].zero_point)
if idx < params.n_input_cells:
for gate in ['z', 'r']:
# NE16 8 bit
input_scratch[gate] = np.sum(in_tensor * -args[f'w_2_{gate}_w'][1].zero_point.astype(INT_DTYPE))
input_scratch[gate] += args[f'{gate}_b'][1].attr.interleaved_values[0]
input_scratch[gate] += args[f'w_2_{gate}_w'][0].astype(INT_DTYPE).dot(in_tensor)
# add zero offset bias + norm rounding in i_2_gate_q
# scales to r * r_w of gate
DiagCollector.record(f'{gate}_gate_inp_before_scale', input_scratch[gate],
scale=scales['i'][gate], node=params)
input_scratch[gate] = qrec.cache[f'w_2_{gate}_q'].apply_scales(input_scratch[gate], 0)
DiagCollector.record(f'{gate}_gate_inp_after_scale', input_scratch[gate],
scale=scales['i'][gate], node=params)
for gate in ['z', 'h', 'r'] if params.linear_before_reset else ['z', 'r']:
prefix = 'r_' if gate == 'h' else ''
state_scratch[gate] = np.sum(
state_tensor * -args[f'r_2_{gate}_w'][1].zero_point.astype(INT_DTYPE))
state_scratch[gate] += args[f'{prefix}{gate}_b'][0]
state_scratch[gate] += args[f'r_2_{gate}_w'][0].astype(INT_DTYPE).dot(state_tensor)
DiagCollector.record(f'{gate}_gate_state_before_scale', state_scratch[gate],
scale=scales['r'][gate], node=params)
state_scratch[gate] = qrec.cache[f'r_2_{gate}_q'].apply_scales(state_scratch[gate], 0)
if gate == 'h':
DiagCollector.record('h_gate_state', state_scratch[gate],
scale=scales['act_in'], node=params)
else:
DiagCollector.record(f'{gate}_gate_state_after_scale', state_scratch[gate],
scale=scales['act_in'], node=params)
if gate in ['z', 'r']:
state_scratch[gate] += input_scratch[gate]
DiagCollector.record(f'{gate}_gate', state_scratch[gate],
scale=scales['act_in'], node=params)
# pipelined on other cores
state_scratch[gate] = get_activation(params.activation_zr, params.hard_act)(
state_scratch[gate], internal_qtype(qrec))
DiagCollector.record(f'{gate}_gate_sigmoid', state_scratch[gate],
scale=scales['act_out'], node=params)
if params.linear_before_reset:
# haddamard on state after linear
# ht = g(Xt*(Wh^T) + (rt (.) (Ht-1*(Rh^T) + Rbh)) + Wbh) # when linear_before_reset != 0
# Q15 * Q3.12 >> 15 -> Q12
# r is guaranteed to be in Q15 with no overflow
# h (contains recurrent only) needs to be saturated to a Q3.12
state_scratch['h'] = clipshort(state_scratch['h']) * state_scratch['r']
DiagCollector.record(
'hr_haddamard', state_scratch['h'],
scale=scales['act_in'] * scales['act_out'],
node=params)
state_scratch['h'] = at_norm(state_scratch['h'], scales['act_out_q'])
DiagCollector.record(
'hr_haddamard_an', state_scratch['h'],
scale=scales['act_in'],
node=params)
else:
# haddamard on state before linear
# r_gate_scratch = (rt (.) Ht-1)*(Rh^T) + Rbh + Wbh
# r is in Q15 signed. state is in Q7
# Clip and norm to 8 bit unsigned ready for NE16 input
# Could look at doing this in 16 bit on NE16 if accuracy is poor but then scaling will need to be
# manual, bias streamed in, etc.
# Needs r ready so do that first
state_scratch['hs'] = np.clip(at_norm(state_tensor_signed * state_scratch['r'], 15) + 0x8000, 0, 0xffff).astype(np.uint16)
DiagCollector.record(
'hr_haddamard', state_scratch['hs'],
scale=math.pow(2, -7),
zero_point=0x8000,
node=params)
state_scratch['h'] = np.sum(
state_scratch['hs'] * -args['r_2_r_w'][1].zero_point.astype(INT_DTYPE))
state_scratch['h'] += args['r_h_b'][0]
state_scratch['h'] += args['r_2_h_w'][0].astype(INT_DTYPE).dot(state_scratch['hs'])
state_scratch[gate] = qrec.cache['r_2_h_q'].apply_scales(state_scratch[gate], 0)
DiagCollector.record(
'h_gate_state', state_scratch['h'],
scale=scales['act_in'],
node=params)
if idx < params.n_input_cells:
input_scratch['h'] = np.sum(in_tensor * -args['w_2_h_w'][1].zero_point.astype(INT_DTYPE))
input_scratch['h'] += args['w_h_b'][0]
input_scratch['h'] += args['w_2_h_w'][0].astype(INT_DTYPE).dot(in_tensor)
DiagCollector.record(f'h_gate_inp_before_scale', input_scratch['h'],
scale=scales['i']['h'], node=params)
# scale to Q12
input_scratch['h'] = qrec.cache['w_2_h_q'].apply_scales(input_scratch['h'], 0)
DiagCollector.record(
'h_gate_inp', input_scratch['h'],
scale=scales['act_in'],
node=params)
state_scratch['h'] += input_scratch['h']
else:
# Is this correct if there is no input (and below)? This is not a mode that
# exists in any framework and will not ever be used at present
state_scratch['h'] += scale_to(qrec, 'w_2_h_q', args['w_h_b'][0], 0)
DiagCollector.record(
'h_gate', state_scratch['h'],
scale=scales['act_in'],
node=params)
state_scratch['h'] = get_activation(params.activation, params.hard_act)(
state_scratch['h'], internal_qtype(qrec))
DiagCollector.record('hr_gate_tanh', state_scratch['h'],
scale=scales['act_out'], node=params)
# ----------- SCALE Q7 -----------
# Ht = (1 - zt) (.) ht + zt (.) Ht-1
# all parameters in Q15. Result in Q30
# >> and clip
# state already in Q15
h_state = state_tensor_signed.copy()
DiagCollector.record('h_pre_ending', h_state,
scale=scales['act_out'],
node=params)
h_state = (((0x8000 - state_scratch['z']) * state_scratch['h']) +
(state_scratch['z'] * h_state))
DiagCollector.record('h_state_out_prenorm', h_state,
scale=math.pow(2, -30),
node=params)
h_state = qrec.out_qs[0].clip(at_norm(h_state, 30-15) + qrec.out_qs[0].zero_point)
DiagCollector.record('h_state_out', h_state,
scale=math.pow(2, -15),
zero_point=0x8000,
node=params)
args['h_state'][0] = h_state.copy()
return h_state
@classmethod
def step_kernel8_8(cls, params: GRUParameters,
args: Mapping[str, np.ndarray],
idx: int,
input_tensor: np.ndarray,
qrec):
z_gate_scratch = 0
hr_gate_scratch = 0
scales = qrec.cache['scales']
DiagCollector.record(
'h_state', args['h_state'][0], scale=scales['state'], node=params)
DiagCollector.record(
'input', input_tensor[idx], scale=scales['in'][0], node=params)
in_tensor = input_tensor[idx].astype(INT_DTYPE)
state_tensor = args['h_state'][0].astype(INT_DTYPE)
DiagCollector.record('z_weigths', args['w_2_z_w'][0],
scale=scales['r_2_z_w'], node=params)
if idx < params.n_input_cells:
# calculate z gate on input
z_gate_scratch += args['w_2_z_w'][0].astype(
INT_DTYPE).dot(in_tensor)
# calculate r gate on input
hr_gate_scratch += args['w_2_r_w'][0].astype(
INT_DTYPE).dot(in_tensor)
# scale to recurrent * state scale if input scale is different
DiagCollector.record('z_gate_inp', z_gate_scratch,
scale=scales['w_2_z_w'] * scales['state'], node=params)
DiagCollector.record('r_gate_inp', hr_gate_scratch,
scale=scales['w_2_r_w'] * scales['state'], node=params)
if not params.rnn_same_inout_scale:
z_gate_scratch = scale_gru_z_input2_z_HtxW(qrec,
z_gate_scratch, 0)
hr_gate_scratch = scale_gru_r_input2_r_HtxW(qrec,
hr_gate_scratch, 0)
# calculate z gate on recurrent
z_gate_scratch += args['r_2_z_w'][0].astype(
INT_DTYPE).dot(state_tensor) + args['z_b'][0].copy()
DiagCollector.record('z_gate', z_gate_scratch,
scale=scales['r_2_z_w'] * scales['state'], node=params)
# if not hard_act then the scale will scale to Q15
z_gate_scratch = get_activation(params.activation_zr, params.hard_act)(
scale_gru_z_internal(qrec, z_gate_scratch, 0), internal_qtype(qrec))
# normalise to internal Q
if not params.hard_act and internal_qtype(qrec).q != 15:
z_gate_scratch = at_norm(
z_gate_scratch, 15 - internal_qtype(qrec).q)
DiagCollector.record('z_gate_sigmoid', z_gate_scratch,
scale=internal_qtype(qrec).scale, node=params)
# same as above on r gate
hr_gate_scratch += args['r_2_r_w'][0].astype(
INT_DTYPE).dot(state_tensor) + args['r_b'][0].copy()
DiagCollector.record('r_gate', hr_gate_scratch,
scale=scales['r_2_r_w'] * scales['state'], node=params)
hr_gate_scratch = get_activation(params.activation_zr, params.hard_act)(
scale_gru_r_internal(qrec, hr_gate_scratch, 0), internal_qtype(qrec))
if not params.hard_act and internal_qtype(qrec).q != 15:
hr_gate_scratch = at_norm(
hr_gate_scratch, 15 - internal_qtype(qrec).q)
DiagCollector.record('r_gate_sigmoid', hr_gate_scratch,
scale=internal_qtype(qrec).scale, node=params)
if params.linear_before_reset:
# haddamard after linear
# r_gate_scratch = (rt (.) (Ht-1*(Rh^T) + Rbh))
h_gate_recurrent = args['r_2_h_w'][0].astype(
INT_DTYPE).dot(state_tensor) + args['r_h_b'][0]
DiagCollector.record(
'h_gate_state', h_gate_recurrent,
scale=math.pow(2, -7) * scales['r_2_h_w'],
node=params)
# this is int_q_scale * state_q_scale * h_recurrent_weights_scale
hr_gate_scratch = hr_gate_scratch * h_gate_recurrent
DiagCollector.record(
'hr_haddamard', hr_gate_scratch,
scale=math.pow(2, -7) * math.pow(2, -
internal_qtype(qrec).q) * scales['r_2_h_w'],
node=params)
# normalize to state_q_scale * h_recurrent_weights_scale
hr_gate_scratch = at_norm(hr_gate_scratch, internal_qtype(qrec).q)
# ht = g(Xt*(Wh^T) + (rt (.) (Ht-1*(Rh^T) + Rbh)) + Wbh) # when linear_before_reset != 0
if idx < params.n_input_cells:
if not params.rnn_same_inout_scale:
# scale input_scale * h_input_weights_scale to state_q_scale * h_recurrent_weights_scale
hr_gate_input = scale_gru_h_input2_h_HtxW(qrec,
(args['w_2_h_w'][0].astype(INT_DTYPE).dot(
in_tensor) + args['w_h_b'][0]),
0)
else:
# since input_scale == state scale and h_input_weights_scale == h_recurrent_weights_scale
# no scaling is necessary
hr_gate_input = args['w_2_h_w'][0].astype(
INT_DTYPE).dot(in_tensor) + args['w_h_b'][0]
else:
# Is this correct if there is no input (and below)? This is not a mode that
# exists in any framework and will not ever be used at present
if not params.rnn_same_inout_scale:
hr_gate_input = qrec.scale_h_input2_h_HtxW(
args['w_h_b'][0], 0)
else:
hr_gate_input = args['w_h_b'][0]
else:
# haddamard on state before linear
# r_gate_scratch = (rt (.) Ht-1)*(Rh^T) + Rbh + Wbh
# this is int_q_scale * state_q_scale * h_recurrent_weights_scale
# normalize to state_q_scale * h_recurrent_weights_scale
hr_gate_scratch = state_tensor * hr_gate_scratch
DiagCollector.record(
'hr_haddamard', hr_gate_scratch,
scale=math.pow(2, -7) * math.pow(2, -internal_qtype(qrec).q),
node=params)
hr_gate_scratch = at_norm(args['r_2_h_w'][0].astype(INT_DTYPE).dot(
hr_gate_scratch), internal_qtype(qrec).q) + args['r_h_b'][0]
DiagCollector.record(
'h_gate_state', hr_gate_scratch,
scale=math.pow(2, -7) * scales['r_2_h_w'],
node=params)
if idx < params.n_input_cells:
if not params.rnn_same_inout_scale:
# scale input_scale * h_input_weights_scale to state_q_scale * h_recurrent_weights_scale
hr_gate_input = scale_gru_h_input2_h_HtxW(
qrec,
args['w_2_h_w'][0].dot(in_tensor) + args['w_h_b'][0],
0)
else:
hr_gate_input = args['w_2_h_w'][0].astype(
INT_DTYPE).dot(in_tensor) + args['w_h_b'][0]
else:
if not params.rnn_same_inout_scale:
hr_gate_input = qrec.scale_h_input2_h_HtxW(
args['w_h_b'][0], 0)
else:
hr_gate_input = args['w_h_b'][0]
DiagCollector.record(
'h_gate_input', hr_gate_input,
scale=math.pow(2, -7) * scales['r_2_h_w'],
node=params)
hr_gate_scratch += hr_gate_input
DiagCollector.record(
'h_gate', hr_gate_scratch,
scale=math.pow(2, -7) * scales['r_2_h_w'],
node=params)
# scale to q15 or internal Q depending on activation type
hr_gate_scratch = get_activation(params.activation, params.hard_act)(
scale_gru_h_internal(qrec, hr_gate_scratch, 0), internal_qtype(qrec))
# if not hard then go from Q15 -> int_q
if not params.hard_act and internal_qtype(qrec).q != 15:
hr_gate_scratch = at_norm(
hr_gate_scratch, 15 - internal_qtype(qrec).q)
DiagCollector.record('hr_gate_tanh', hr_gate_scratch,
scale=math.pow(2, -internal_qtype(qrec).q), node=params)
# ----------- SCALE Q7 -----------
# Ht = (1 - zt) (.) ht + zt (.) Ht-1
# zt = (1 - int_q) * Q7 + Q7 * Q7 = INT_Q * 2
# >> and clip
h_state = state_tensor.copy() << (internal_qtype(qrec).q - 7)
h_state = (((internal_qtype(qrec).quantize(1) - z_gate_scratch) * hr_gate_scratch) +
(z_gate_scratch * h_state))
DiagCollector.record('h_state_out_prenorm', h_state,
scale=math.pow(2, -(internal_qtype(qrec).q * 2)),
node=params)
h_state = qrec.out_qs[0].clip(
at_norm(
h_state,
(internal_qtype(qrec).q * 2) - 7)).astype(qrec.out_qs[0].dtype)
DiagCollector.record('h_state_out', h_state,
scale=math.pow(2, -7),
node=params)
args['h_state'][0] = h_state.copy()
return h_state
@classmethod
def step_kernel16_8(cls, params: GRUParameters,
args: Mapping[str, np.ndarray],
idx: int,
input_tensor: np.ndarray,
qrec):
z_gate_scratch = 0
hr_gate_scratch = 0
scales = qrec.cache['scales']
DiagCollector.record(
'h_state', args['h_state'][0], scale=scales['state'], node=params)
DiagCollector.record(
'input', input_tensor[idx], scale=scales['in'][0], node=params)
in_tensor = input_tensor[idx].astype(INT_DTYPE)
state_tensor = args['h_state'][0]
DiagCollector.record('z_weigths', args['w_2_z_w'][0],
scale=scales['r_2_z_w'], node=params)
if idx < params.n_input_cells:
# calculate z gate on input
z_gate_scratch += args['w_2_z_w'][0].astype(
INT_DTYPE).dot(in_tensor)
# calculate r gate on input
hr_gate_scratch += args['w_2_r_w'][0].astype(
INT_DTYPE).dot(in_tensor)
# scale to recurrent * state scale if input scale is different
DiagCollector.record('z_gate_inp', z_gate_scratch,
scale=scales['w_2_z_w'] * scales['in'][0], node=params)
DiagCollector.record('r_gate_inp', hr_gate_scratch,
scale=scales['w_2_r_w'] * scales['in'][0], node=params)
z_gate_scratch = scale_to(qrec,
"input_z_w_internal",
z_gate_scratch, 0)
hr_gate_scratch = scale_to(qrec,
"input_r_w_internal",
hr_gate_scratch, 0)
# calculate z gate on recurrent
z_gate_state_scratch = args['r_2_z_w'][0].astype(
INT_DTYPE).dot(state_tensor)
z_gate_state_scratch = scale_to(qrec,
"state_z_w_internal",
z_gate_state_scratch, 0)
# bias in Q12 input already in Q12
z_gate_scratch += args['z_b'][0].copy() + z_gate_state_scratch
DiagCollector.record(
'z_gate', z_gate_scratch, scale=internal_qtype(qrec).scale, node=params)
# will output Q15
z_gate_scratch = get_activation(
params.activation_zr, False)(z_gate_scratch, internal_qtype(qrec))
# leave z in Q15
DiagCollector.record('z_gate_sigmoid', z_gate_scratch,
scale=scales['act'], node=params)
# same as above on r gate
hr_gate_state_scratch = args['r_2_r_w'][0].astype(
INT_DTYPE).dot(state_tensor)
hr_gate_state_scratch = scale_to(
qrec, "state_r_w_internal", hr_gate_state_scratch, 0)
# bias in Q12 input already in Q12
hr_gate_scratch += hr_gate_state_scratch + args['r_b'][0].copy()
DiagCollector.record('r_gate', hr_gate_scratch,
scale=internal_qtype(qrec).scale, node=params)
hr_gate_scratch = get_activation(params.activation_zr, False)(
hr_gate_scratch, internal_qtype(qrec))
DiagCollector.record('r_gate_sigmoid', hr_gate_scratch,
scale=scales['act'], node=params)
if params.linear_before_reset:
# haddamard after linear
# r_gate_scratch = (rt (.) (Ht-1*(Rh^T) + Rbh))
# h bias is in state_scale * h_w scale NOT Q12
h_gate_recurrent = args['r_2_h_w'][0].astype(
INT_DTYPE).dot(state_tensor) + args['r_h_b'][0]
h_gate_recurrent = scale_to(
qrec, "state_h_w_internal", h_gate_recurrent, 0)
# now in Q12
hr_gate_scratch = hr_gate_scratch * h_gate_recurrent
DiagCollector.record(
'hr_haddamard', hr_gate_scratch,
scale=scales['act'] * math.pow(2, -internal_qtype(qrec).q),
node=params)
# now in Q12 + Q15
# normalize to Q12
hr_gate_scratch = at_norm(hr_gate_scratch, 15)
# ht = g(Xt*(Wh^T) + (rt (.) (Ht-1*(Rh^T) + Rbh)) + Wbh) # when linear_before_reset != 0
assert idx < params.n_input_cells
# scale input_scale * h_input_weights_scale to Q12
# h bias is in input_scale * h_w scale NOT Q12
hr_gate_scratch += scale_to(
qrec,
"input_h_w_internal",
args['w_2_h_w'][0].astype(INT_DTYPE).dot(
in_tensor) + args['w_h_b'][0],
0)
else:
# haddamard on state before linear
# r_gate_scratch = (rt (.) Ht-1)*(Rh^T) + Rbh + Wbh
# Q15 * stateQ -> stateQ
h_gate_recurrent = at_norm(
state_tensor * hr_gate_scratch, 15)
DiagCollector.record(
'hr_haddamard', h_gate_recurrent,
scale=scales['state'],
node=params)
h_gate_recurrent = args['r_2_h_w'][0].astype(
INT_DTYPE).dot(h_gate_recurrent) + args['r_h_b'][0]
hr_gate_scratch = scale_to(
qrec, "state_h_w_internal", h_gate_recurrent, 0)
assert idx < params.n_input_cells
# scale input_scale * h_input_weights_scale to internal
hr_gate_scratch += scale_to(
qrec,
"input_h_w_internal",
args['w_2_h_w'][0].astype(INT_DTYPE).dot(
in_tensor) + args['w_h_b'][0],
0)
# outputs q15
hr_gate_scratch = get_activation(params.activation, False)(
hr_gate_scratch, internal_qtype(qrec))
DiagCollector.record('hr_gate_tanh', hr_gate_scratch,
scale=scales['act'], node=params)
# Ht = (1 - zt) (.) ht + zt (.) Ht-1
# zt = (1 - Q15) * Q15 + Q15 * Q15 = Q30
# >> 15 and clip
# h state is in Q15 * 1 or Q14
h_state = state_tensor.copy()
state_q = args['h_state'][1].q
if state_q == 14:
h_state <<= 1
h_state = (
((qrec.cache['act_qtype'].quantize(1) - z_gate_scratch) * hr_gate_scratch) +
(z_gate_scratch * h_state)
)
DiagCollector.record('h_state_out_prenorm', h_state,
scale=math.pow(2, -30), node=params)
if state_q == 14:
h_state = at_norm(h_state, 16)
else:
h_state = at_norm(h_state, 15)
h_state = qrec.out_qs[0].clip(h_state)
DiagCollector.record('h_state_out', h_state,
scale=scales['state'], node=params)
args['h_state'][0] = h_state.copy()
return h_state
def scale_lstm_input_input(qrec, tensor: np.ndarray, axis: int):
return scale_to(qrec, 'i_2_i_q', tensor, axis)
def scale_lstm_input_forget(qrec, tensor: np.ndarray, axis: int):
return scale_to(qrec, 'i_2_f_q', tensor, axis)
def scale_lstm_input_cell(qrec, tensor: np.ndarray, axis: int):
return scale_to(qrec, 'i_2_c_q', tensor, axis)
def scale_lstm_input_output(qrec, tensor: np.ndarray, axis: int):
return scale_to(qrec, 'i_2_o_q', tensor, axis)
def scale_lstm_sum_input(qrec, tensor: np.ndarray, axis: int):
return scale_to(qrec, 'gate_sum_i', tensor, axis)
def scale_lstm_sum_forget(qrec, tensor: np.ndarray, axis: int):
return scale_to(qrec, 'gate_sum_f', tensor, axis)
def scale_lstm_sum_cell(qrec, tensor: np.ndarray, axis: int):
return scale_to(qrec, 'gate_sum_c', tensor, axis)
def scale_lstm_sum_output(qrec, tensor: np.ndarray, axis: int):
return scale_to(qrec, 'gate_sum_o', tensor, axis)
def scale_lstm_istate_input(qrec, tensor: np.ndarray, axis: int):
return scale_to(qrec, 'r_2_i_q', tensor, axis)
def scale_lstm_istate_forget(qrec, tensor: np.ndarray, axis: int):
return scale_to(qrec, 'r_2_f_q', tensor, axis)
def scale_lstm_istate_cell(qrec, tensor: np.ndarray, axis: int):
return scale_to(qrec, 'r_2_c_q', tensor, axis)
def scale_lstm_istate_output(qrec, tensor: np.ndarray, axis: int):
return scale_to(qrec, 'r_2_o_q', tensor, axis)
def scale_lstm_cellin(qrec, tensor: np.ndarray, axis: int):
return scale_to(qrec, 'cell_in_q', tensor, axis)
def scale_lstm_cellout(qrec, tensor: np.ndarray, axis: int):
external_type = qrec.in_qs[LSTMParameters.INPUT_NAMES.index('c_state')]
return external_type.clip(scale_to(qrec, 'cell_out_q', tensor, axis))
def scale_lstm_output(qrec, tensor: np.ndarray, axis: int):
return qrec.out_qs[0].clip(scale_to(qrec, 'state_out_q', tensor, axis))
def check_unsupported(args):
use_cifg = 'i_2_i_w' in args and args['i_2_i_w'][0] is None
use_peephole = 'c_2_o_w' in args and args['c_2_o_w'][0] is not None
use_layer_norm = 'f_norm' in args and args['f_norm'][0] is not None
if use_cifg:
raise NotImplementedError("cifg mode is not supported")
if use_peephole:
raise NotImplementedError("peephole mode is not supported")
if use_layer_norm:
raise NotImplementedError("layer norm mode is not supported")
use_projection_weight = 'proj_w' in args and args['proj_w'][0] is not None
use_projection_bias = 'proj_b' in args and args['proj_b'][0] is not None
if use_projection_weight or use_projection_bias:
raise NotImplementedError("LSTMP is not yet supported by kernel")
@ params_type(LSTMParameters)
@ qrec_type('scaled')
class LSTMSymmetric(RnnSymmetricMixin, KernelBase):
@ classmethod
def step_kernel(cls, params: LSTMParameters,
args: Mapping[str, np.ndarray],
idx: int,
input_tensor: np.ndarray,
qrec):
if args['i_state'][1].dtype == np.uint8:
return cls.step_kernelu8_u8(params, args, idx, input_tensor, qrec)
if args['i_state'][1].dtype == np.uint16:
return cls.step_kernelu16_u8(params, args, idx, input_tensor, qrec)
if args['i_state'][1].dtype == np.int16:
return cls.step_kernel16_8(params, args, idx, input_tensor, qrec)
return cls.step_kernel8_8(params, args, idx, input_tensor, qrec)
# NE16 8 bit kernel
@ classmethod
def step_kernelu8_u8(cls, params: LSTMParameters,
args: Mapping[str, np.ndarray],
idx: int,
input_tensor: np.ndarray,
qrec):
check_unsupported(args)
r_pscales = qrec.cache['r_pscales']
i_pscales = qrec.cache['i_pscales']
input_scratch = {}
if idx < params.n_input_cells:
in_t = input_tensor[idx].astype(INT_DTYPE)
for gate in ['i', 'f', 'c', 'o']:
name = f'i_2_{gate}_w'
# calculate weights zero point * input
w_val = args[name][0].astype(INT_DTYPE)
w_qtype = args[name][1]
input_scratch[gate] = - \
np.sum(in_t * w_qtype.zero_point.astype(INT_DTYPE))
# now calculate gate
input_scratch[gate] = input_scratch[gate] + w_val.dot(in_t)
DiagCollector.record(
f'input_{gate}_in_inputscale',
input_scratch[gate],
scale=i_pscales[gate],
node=params)
input_scratch[gate] = input_scratch[gate] * \
qrec.cache[f'i_2_{gate}_q'].qbiases
input_scratch[gate] = input_scratch[gate] + \
args[f'{gate}_b'][1].attr.interleaved_values[0]
input_scratch[gate] = input_scratch[gate] >> qrec.cache[f'i_2_{gate}_q'].qnorms
DiagCollector.record(
f'input_{gate}_in_statescale',
input_scratch[gate],
scale=r_pscales[gate],
node=params)
state_t = args['i_state'][0].astype(INT_DTYPE)
for gate in ['i', 'f', 'c', 'o']:
name = f'r_2_{gate}_w'
# calculate weights zero point * input
w_val = args[name][0].astype(INT_DTYPE)
w_qtype = args[name][1]
input_scratch[gate] = input_scratch[gate] - \
np.sum(state_t * w_qtype.zero_point.astype(INT_DTYPE))
# now calculate gate
input_scratch[gate] = input_scratch[gate] + w_val.dot(state_t)
DiagCollector.record(
f'state_{gate}_in_statescale',
input_scratch[gate],
scale=i_pscales[gate],
node=params)
input_scratch[gate] = input_scratch[gate] * \
qrec.cache[f'r_2_{gate}_q'].qbiases
input_scratch[gate] = input_scratch[gate] + \
args[f'{gate}_b'][0].astype(INT_DTYPE)
input_scratch[gate] = input_scratch[gate] >> qrec.cache[f'r_2_{gate}_q'].qnorms
DiagCollector.record(
f'state_{gate}_in_intscale',
input_scratch[gate],
scale=r_pscales['int_scale'],
node=params)
int_qtype = internal_qtype(qrec)
# Apply activations
for gate, activation in [('i', 'sigmoid'), ('f', 'sigmoid'), ('o', 'sigmoid'), ('c', 'tanh')]:
input_scratch[gate] = get_activation(activation, params.hard_act)(
input_scratch[gate], int_qtype)
DiagCollector.record(
f'{gate}_gate_after_act', input_scratch[gate],
scale=r_pscales['act_out_scale'], node=params)
# Q15 * c_state Q -> Q15
cstate_cbar_f = args['c_state'][0].astype(INT_DTYPE) * input_scratch['f']
DiagCollector.record(
'cstate_cbar_f_prescale', cstate_cbar_f, node=params)
cstate_cbar_f = scale_lstm_cellin(
qrec,
cstate_cbar_f,
0)
DiagCollector.record(
'cstate_cbar_f', cstate_cbar_f,
scale=r_pscales['act_out_scale'], node=params)
# Q15 * Q15 -> Q15
cstate_c_i = at_norm(
input_scratch['c'] * input_scratch['i'], 15)
DiagCollector.record(
'cstate_c_i', cstate_c_i,
scale=r_pscales['act_out_scale'], node=params)
# Q15 + Q15
cstate = cstate_cbar_f + cstate_c_i
DiagCollector.record(
'c_state_before_scale', cstate,
scale=r_pscales['act_out_scale'], node=params)
# Q15 -> Cell Out
args['c_state'][0] = scale_lstm_cellout(qrec, cstate, 0)
DiagCollector.record(
'c_state_out', args['c_state'][0], scale=args['c_state'][1].scale, node=params)
# Q15 -> Q12 -> Q15
cell_scratch = get_activation('tanh', params.hard_act)(
at_norm(cstate, 3), int_qtype)
# Q15 * Q15 -> Q15
input_scratch['o'] = at_norm((input_scratch['o'] * cell_scratch), 15)
DiagCollector.record(
'output_before_scale', input_scratch['o'], scale=r_pscales['act_out_scale'], node=params)
output = np.clip(
at_norm(
input_scratch['o'] * qrec.cache['state_out_q'].qbiases,
qrec.cache['state_out_q'].qnorms
) + qrec.out_qs[0].zero_point[0],
0,
0xff).astype(np.uint8)
DiagCollector.record(
'output', output, scale=qrec.out_qs[0].scale, node=params, zero_point=qrec.out_qs[0].zero_point[0])
# args['i_state'][0] = qrec.scale_i_state(output_gate_scratch.copy(), 0, ktype="symmetric")
args['i_state'][0] = output.copy()
if params.lstm_output_c_state:
return output, args['c_state'][0]
return output, None
# NE16 16 bit kernel
# Difference with 8 bit kernel is that bias is streamed in separately for each gate and
# scaling is manual in software
# This is necessary to stop input and weights zero offset causing overflow
@ classmethod
def step_kernelu16_u8(cls, params: LSTMParameters,
args: Mapping[str, np.ndarray],
idx: int,
input_tensor: np.ndarray,
qrec):
check_unsupported(args)
r_pscales = qrec.cache['r_pscales']
i_pscales = qrec.cache['i_pscales']
input_scratch = {}
if idx < params.n_input_cells:
in_t = input_tensor[idx].astype(INT_DTYPE)
for gate in ['i', 'f', 'c', 'o']:
name = f'i_2_{gate}_w'
# calculate weights zero point * input
w_val = args[name][0].astype(INT_DTYPE)
w_qtype = args[name][1]
input_scratch[gate] = args[f'{gate}_b'][1].attr.interleaved_values[0] - \
np.sum(in_t * w_qtype.zero_point.astype(INT_DTYPE))
# now calculate gate
input_scratch[gate] = input_scratch[gate] + w_val.dot(in_t)
DiagCollector.record(
f'input_{gate}_in_inputscale',
input_scratch[gate],
scale=i_pscales[gate],
node=params)
if qrec.cache[f'i_2_{gate}_q'].pre_normalization > 0:
input_scratch[gate] = at_norm(
input_scratch[gate],
qrec.cache[f'i_2_{gate}_q'].pre_normalization)
input_scratch[gate] = at_norm(
input_scratch[gate] * qrec.cache[f'i_2_{gate}_q'].qbiases,
qrec.cache[f'i_2_{gate}_q'].qnorms)
DiagCollector.record(
f'input_{gate}_in_statescale',
input_scratch[gate],
scale=r_pscales['int_scale'],
node=params)
state_t = args['i_state'][0].astype(INT_DTYPE)
state_scratch = {}
for gate in ['i', 'f', 'c', 'o']:
name = f'r_2_{gate}_w'
# calculate weights zero point * input
w_val = args[name][0].astype(INT_DTYPE)
w_qtype = args[name][1]
state_scratch[gate] = args[f'{gate}_b'][0].astype(INT_DTYPE) - \
np.sum(state_t * w_qtype.zero_point.astype(INT_DTYPE))
# now calculate gate
state_scratch[gate] = state_scratch[gate] + w_val.dot(state_t)
DiagCollector.record(
f'state_{gate}_in_statescale',
state_scratch[gate],
scale=i_pscales[gate],
node=params)
if qrec.cache[f'r_2_{gate}_q'].pre_normalization > 0:
state_scratch[gate] = at_norm(
state_scratch[gate],
qrec.cache[f'r_2_{gate}_q'].pre_normalization)
state_scratch[gate] = at_norm(
state_scratch[gate] * qrec.cache[f'r_2_{gate}_q'].qbiases,
qrec.cache[f'r_2_{gate}_q'].qnorms)
DiagCollector.record(
f'state_{gate}_in_intscale',
state_scratch[gate],
scale=r_pscales['int_scale'],
node=params)
int_qtype = internal_qtype(qrec)
# Apply activations
for gate, activation in [('i', 'sigmoid'), ('f', 'sigmoid'), ('o', 'sigmoid'), ('c', 'tanh')]:
input_scratch[gate] = get_activation(activation, params.hard_act)(
input_scratch[gate] + state_scratch[gate], int_qtype)
DiagCollector.record(
f'{gate}_gate_after_act', input_scratch[gate],
scale=r_pscales['act_out_scale'], node=params)
# Q15 * c_state Q -> Q15
cstate_cbar_f = args['c_state'][0].astype(INT_DTYPE) * input_scratch['f']
DiagCollector.record(
'cstate_cbar_f_prescale', cstate_cbar_f, node=params)
# Note - There is a prenorm in 16 bit mode but it is done by apply_scales
# cstate_cbar_f = at_norm(cstate_cbar_f, 8)
cstate_cbar_f = scale_lstm_cellin(
qrec,
cstate_cbar_f,
0)
DiagCollector.record(
'cstate_cbar_f', cstate_cbar_f,
scale=r_pscales['act_out_scale'], node=params)
# Q15 * Q15 -> Q15
cstate_c_i = at_norm(
input_scratch['c'] * input_scratch['i'], 15)
DiagCollector.record(
'cstate_c_i', cstate_c_i,
scale=r_pscales['act_out_scale'], node=params)
# Q15 + Q15
cstate = cstate_cbar_f + cstate_c_i
DiagCollector.record(
'c_state_before_scale', cstate,
scale=r_pscales['act_out_scale'], node=params)
# Q15 -> Cell Out
args['c_state'][0] = scale_lstm_cellout(qrec, cstate, 0)
DiagCollector.record(
'c_state_out', args['c_state'][0], scale=args['c_state'][1].scale, node=params)
# Q15 -> Q12 -> Q15
cell_scratch = get_activation('tanh', params.hard_act)(
at_norm(cstate, 3), int_qtype)
# Q15 * Q15 -> Q15
input_scratch['o'] = at_norm((input_scratch['o'] * cell_scratch), 15)
DiagCollector.record(
'output_before_scale', input_scratch['o'], scale=r_pscales['act_out_scale'], node=params)
output = np.clip(
at_norm(
input_scratch['o'] * qrec.cache['state_out_q'].qbiases,
qrec.cache['state_out_q'].qnorms
) + qrec.out_qs[0].zero_point[0],
0,
0xffff).astype(np.uint16)
DiagCollector.record(
'output', output, scale=qrec.out_qs[0].scale, node=params, zero_point=qrec.out_qs[0].zero_point[0])
# args['i_state'][0] = qrec.scale_i_state(output_gate_scratch.copy(), 0, ktype="symmetric")
args['i_state'][0] = output.copy()
if params.lstm_output_c_state:
return output, args['c_state'][0]
return output, None
@ classmethod
def step_kernel8_8(cls, params: LSTMParameters,
args: Mapping[str, np.ndarray],
idx: int,
input_tensor: np.ndarray,
qrec):
use_cifg = 'i_2_i_w' in args and args['i_2_i_w'][0] is None
use_peephole = 'c_2_o_w' in args and args['c_2_o_w'][0] is not None
use_layer_norm = 'f_norm' in args and args['f_norm'][0] is not None
if use_cifg:
raise NotImplementedError("cifg mode is not supported")
if use_peephole:
raise NotImplementedError("peephole mode is not supported")
if use_layer_norm:
raise NotImplementedError("layer norm mode is not supported")
# scales = qrec.cache['scales']
# DiagCollector.record(
# 'input', input_tensor[idx], scale=scales['in'][0], node=params)
# INPUT vs WEIGHTS
# For each cell: compute input_weight * input if there is an input
input_gate_scratch = np.full([params.n_states], 0, dtype=INT_DTYPE)
forget_gate_scratch = np.full([params.n_states], 0, dtype=INT_DTYPE)
cell_scratch = np.full([params.n_states], 0, dtype=INT_DTYPE)
output_gate_scratch = np.full([params.n_states], 0, dtype=INT_DTYPE)
DiagCollector.record(
'i_state', args['i_state'][0], scale=args['i_state'][1].scale, node=params)
DiagCollector.record(
'c_state', args['c_state'][0], scale=args['c_state'][1].scale, node=params)
DiagCollector.record(
'input', input_tensor[idx], scale=qrec.in_qs[0].scale, node=params)
r_pscales = qrec.cache['r_pscales']
i_pscales = qrec.cache['i_pscales']
if idx < params.n_input_cells:
input_gate_scratch += scale_lstm_input_input(qrec,
args['i_2_i_w'][0].astype(INT_DTYPE).dot(
input_tensor[idx].astype(INT_DTYPE)),
0)
forget_gate_scratch += scale_lstm_input_forget(qrec,
args['i_2_f_w'][0].astype(INT_DTYPE).dot(
input_tensor[idx].astype(INT_DTYPE)),
0)
cell_scratch += scale_lstm_input_cell(qrec,
args['i_2_c_w'][0].astype(INT_DTYPE).dot(
input_tensor[idx].astype(INT_DTYPE)),
0)
output_gate_scratch += scale_lstm_input_output(qrec,
args['i_2_o_w'][0].astype(INT_DTYPE).dot(
input_tensor[idx].astype(INT_DTYPE)),
0)
DiagCollector.record(
'i_gate_i', input_gate_scratch, scale=i_pscales['i'], node=params)
DiagCollector.record(
'f_gate_i', forget_gate_scratch, scale=i_pscales['f'], node=params)
DiagCollector.record(
'c_gate_i', cell_scratch, scale=i_pscales['c'], node=params)
DiagCollector.record(
'o_gate_i', output_gate_scratch, scale=i_pscales['o'], node=params)
# perceptron scaling
# 16 bit act(scale(scale(i*iw) + scale(r*rw) + b)) - internal max_accum
# 8 bit different input + i_state
# scale(scale(i*iw) + r*rw + b) internal max(sum(r*w))
# 8 bit same input and i_state
# scale(i*iw + r*rw + b) internal - no scaling
# For each cell: compute recurrent_weight * output_state
input_gate_scratch_state = args['r_2_i_w'][0].astype(
INT_DTYPE).dot(args['i_state'][0].astype(INT_DTYPE))
forget_gate_scratch_state = args['r_2_f_w'][0].astype(
INT_DTYPE).dot(args['i_state'][0].astype(INT_DTYPE))
cell_gate_scratch_state = args['r_2_c_w'][0].astype(
INT_DTYPE).dot(args['i_state'][0].astype(INT_DTYPE))
output_gate_scratch_state = args['r_2_o_w'][0].astype(
INT_DTYPE).dot(args['i_state'][0].astype(INT_DTYPE))
DiagCollector.record(
'i_gate_r', input_gate_scratch_state, scale=r_pscales['i'], node=params)
DiagCollector.record(
'f_gate_r', forget_gate_scratch_state, scale=r_pscales['f'], node=params)
DiagCollector.record(
'c_gate_r', cell_gate_scratch_state, scale=r_pscales['c'], node=params)
DiagCollector.record(
'o_gate_r', output_gate_scratch_state, scale=r_pscales['o'], node=params)
# Add bias for regular lstm
input_gate_scratch += args['i_b'][0].astype(
INT_DTYPE).copy() + input_gate_scratch_state
forget_gate_scratch += args['f_b'][0].astype(
INT_DTYPE).copy() + forget_gate_scratch_state
cell_scratch += args['c_b'][0].astype(INT_DTYPE).copy() + \
cell_gate_scratch_state
output_gate_scratch += args['o_b'][0].astype(
INT_DTYPE).copy() + output_gate_scratch_state
DiagCollector.record(
'i_gate_post_bias', input_gate_scratch, scale=r_pscales['i'], node=params)
DiagCollector.record(
'f_gate_post_bias', forget_gate_scratch, scale=r_pscales['f'], node=params)
DiagCollector.record(
'c_gate_post_bias', cell_scratch, scale=r_pscales['c'], node=params)
DiagCollector.record(
'o_gate_post_bias', output_gate_scratch, scale=r_pscales['o'], node=params)
input_gate_scratch = scale_lstm_istate_input(
qrec, input_gate_scratch, 0)
forget_gate_scratch = scale_lstm_istate_forget(
qrec, forget_gate_scratch, 0)
cell_scratch = scale_lstm_istate_cell(
qrec, cell_scratch, 0)
output_gate_scratch = scale_lstm_istate_output(
qrec, output_gate_scratch, 0)
int_qtype = internal_qtype(qrec)
DiagCollector.record('i_gate', input_gate_scratch,
scale=r_pscales['int_scale'], node=params)
DiagCollector.record('f_gate', forget_gate_scratch,
scale=r_pscales['int_scale'], node=params)
DiagCollector.record('c_gate', cell_scratch,
scale=r_pscales['int_scale'], node=params)
DiagCollector.record('o_gate', output_gate_scratch,
scale=r_pscales['int_scale'], node=params)
# Apply activations in internal Q * 1
input_gate_scratch = get_activation('sigmoid', params.hard_act)(
input_gate_scratch, int_qtype)
DiagCollector.record('i_gate_after_act', input_gate_scratch,
scale=r_pscales['act_out_scale'], node=params)
forget_gate_scratch = get_activation('sigmoid', params.hard_act)(
forget_gate_scratch, int_qtype)
DiagCollector.record('f_gate_after_act', forget_gate_scratch,
scale=r_pscales['act_out_scale'], node=params)
output_gate_scratch = get_activation('sigmoid', params.hard_act)(
output_gate_scratch, int_qtype)
DiagCollector.record('o_gate_after_act', output_gate_scratch,
scale=r_pscales['act_out_scale'], node=params)
cell_scratch = get_activation('tanh', params.hard_act)(
cell_scratch, int_qtype)
DiagCollector.record('c_gate_after_act', cell_scratch,
scale=r_pscales['act_out_scale'], node=params)
# cstate = cstate * Of + Og * Oi
if params.hard_act:
# Scale cell state * Of to internal Q * 2
cstate = scale_lstm_cellin(
qrec, args['c_state'][0].astype(INT_DTYPE) * forget_gate_scratch, 0)
DiagCollector.record('cstate_cbar_f', cstate,
scale=r_pscales['c_before_scale'], node=params)
cstate_c_i = cell_scratch * input_gate_scratch
DiagCollector.record('cstate_c_i', cstate_c_i,
scale=r_pscales['c_before_scale'], node=params)
cstate += cstate_c_i
DiagCollector.record('c_state_before_scale', cstate,
scale=r_pscales['c_before_scale'], node=params)
# cstate now in (2 * Q) * 1
else:
# Multiply cstate [Scstate] * Of [Sq15] and scale to [Sq12]
# Multiply Og [Sq15] * Oi [Sq15] --> [Sq30] >> 30-12 --> [Sq12]
# cstate is now in q12 = internal_qtype
cstate_cbar_f = scale_lstm_cellin(
qrec,
args['c_state'][0].astype(INT_DTYPE) * forget_gate_scratch,
0)
DiagCollector.record('cstate_cbar_f', cstate_cbar_f,
scale=int_qtype.scale, node=params)
cstate_c_i = at_norm(
cell_scratch * input_gate_scratch,
(30-int_qtype.q))
DiagCollector.record('cstate_c_i', cstate_c_i,
scale=int_qtype.scale, node=params)
cstate = cstate_cbar_f + cstate_c_i
DiagCollector.record('c_state_before_scale', cstate,
scale=int_qtype.scale, node=params)
# if params.cell_clip > 0.0:
# args['c_state'] = abs_clip(args['c_state'], params.cell_clip)
# if there is a clip value this should override the min max here
# clip here
args['c_state'][0] = scale_lstm_cellout(qrec, cstate, 0)
DiagCollector.record(
'c_state_out', args['c_state'][0], scale=args['c_state'][1].scale, node=params)
if params.hard_act:
two_qtype = QType.Pow2(
int_qtype.bits, int_qtype.q * 2, True)
cell_scratch = get_activation(
'tanh', params.hard_act)(cstate, two_qtype)
# Assume scaling from internalq * 3 -> Q7 * 1
output_gate_scratch *= cell_scratch
else:
cell_scratch = get_activation('tanh', params.hard_act)(
cstate, int_qtype)
# output = Og[Sq15] * tanh(cell_scratch)[Sq15] -> [Sq30] >> 15 -> [Sq15]
output_gate_scratch = (output_gate_scratch * cell_scratch) >> 15
output = scale_lstm_output(qrec, output_gate_scratch, 0)
DiagCollector.record(
'output', output, scale=qrec.out_qs[0].scale, node=params)
use_projection_weight = 'proj_w' in args and args['proj_w'][0] is not None
use_projection_bias = 'proj_b' in args and args['proj_b'][0] is not None
if use_projection_weight or use_projection_bias:
raise NotImplementedError("LSTMP is not yet supported by kernel")
# args['i_state'][0] = qrec.scale_i_state(output_gate_scratch.copy(), 0, ktype="symmetric")
args['i_state'][0] = output.copy()
if params.lstm_output_c_state:
return output, args['c_state'][0]
return output, None
@classmethod
def step_kernel16_8(cls, params: LSTMParameters,
args: Mapping[str, np.ndarray],
idx: int,
input_tensor: np.ndarray,
qrec):
use_cifg = 'i_2_i_w' in args and args['i_2_i_w'][0] is None
use_peephole = 'c_2_o_w' in args and args['c_2_o_w'][0] is not None
use_layer_norm = 'f_norm' in args and args['f_norm'][0] is not None
if use_cifg:
raise NotImplementedError("cifg mode is not supported")
if use_peephole:
raise NotImplementedError("peephole mode is not supported")
if use_layer_norm:
raise NotImplementedError("layer norm mode is not supported")
# INPUT vs WEIGHTS
# For each cell: compute input_weight * input if there is an input
input_scratch = {k: np.full([params.n_states], 0, dtype=INT_DTYPE)
for k in ['i', 'f', 'c', 'o']}
DiagCollector.record(
'i_state', args['i_state'][0], scale=args['i_state'][1].scale, node=params)
DiagCollector.record(
'c_state', args['c_state'][0], scale=args['c_state'][1].scale, node=params)
DiagCollector.record(
'input', input_tensor[idx], scale=qrec.in_qs[0].scale, node=params)
r_pscales = qrec.cache['r_pscales']
i_pscales = qrec.cache['i_pscales']
int_qtype = internal_qtype(qrec)
if idx < params.n_input_cells:
for k in ['i', 'f', 'c', 'o']:
input_scratch[k] += args[f'i_2_{k}_w'][0].astype(INT_DTYPE).dot(
input_tensor[idx].astype(INT_DTYPE))
input_scratch[k] = scale_to(
qrec, f'i_2_{k}_q', input_scratch[k], 0)
DiagCollector.record(
f'{k}_gate_i', input_scratch[k], scale=int_qtype.scale, node=params)
state_scratch = {}
for k in ['i', 'f', 'c', 'o']:
state_scratch[k] = args[f'r_2_{k}_w'][0].astype(
INT_DTYPE).dot(args['i_state'][0].astype(INT_DTYPE))
state_scratch[k] = scale_to(
qrec, f'r_2_{k}_q', state_scratch[k], 0)
DiagCollector.record(
f'{k}_gate_r', state_scratch[k], scale=int_qtype.scale, node=params)
for k in ['i', 'f', 'c', 'o']:
# Add bias for regular lstm
input_scratch[k] += args[f'{k}_b'][0].astype(
INT_DTYPE).copy() + state_scratch[k]
DiagCollector.record(f'{k}_gate', input_scratch[k],
scale=int_qtype.scale, node=params)
DiagCollector.record_ref(
f'{k}_gate_post_bias', f'{k}_gate', node=params)
# Apply activations in internal Q * 1
for k in ['i', 'f', 'o']:
input_scratch[k] = get_activation('sigmoid', False)(
input_scratch[k], int_qtype)
DiagCollector.record(f'{k}_gate_after_act', input_scratch[k],
scale=r_pscales['act_out_scale'], node=params)
input_scratch['c'] = get_activation('tanh', False)(
input_scratch['c'], int_qtype)
DiagCollector.record('c_gate_after_act', input_scratch['c'],
scale=r_pscales['act_out_scale'], node=params)
# Multiply cstate [Scstate] * Of [Sq15] and scale to [Sq12]
# Multiply Og [Sq15] * Oi [Sq15] --> [Sq30] >> 30-12 --> [Sq12]
# cstate is now in q12 = internal_qtype
# NOTE: for int16 scale apply 8 bit norm to product before mult_bias then norm - 8 in kernel
# this is done by prenormalization in scaled qtype set by quantizer
cstate_cbar_f = scale_lstm_cellin(
qrec,
args['c_state'][0].astype(INT_DTYPE) * input_scratch['f'],
0)
DiagCollector.record('cstate_cbar_f', cstate_cbar_f,
scale=int_qtype.scale, node=params)
cstate_c_i = at_norm(
input_scratch['c'] * input_scratch['i'],
(30-int_qtype.q))
DiagCollector.record('cstate_c_i', cstate_c_i,
scale=int_qtype.scale, node=params)
cstate = cstate_cbar_f + cstate_c_i
DiagCollector.record('c_state_before_scale', cstate,
scale=int_qtype.scale, node=params)
# if params.cell_clip > 0.0:
# args['c_state'] = abs_clip(args['c_state'], params.cell_clip)
# if there is a clip value this should override the min max here
# clip here
args['c_state'][0] = scale_lstm_cellout(qrec, cstate, 0)
DiagCollector.record(
'c_state_out', args['c_state'][0], scale=args['c_state'][1].scale, node=params)
cell_gate_scratch = get_activation('tanh', False)(
cstate, int_qtype)
output_gate_scratch = (input_scratch['o'] * cell_gate_scratch)
output = scale_lstm_output(qrec, output_gate_scratch, 0)
DiagCollector.record(
'output', output, scale=qrec.out_qs[0].scale, node=params)
use_projection_weight = 'proj_w' in args and args['proj_w'][0] is not None
use_projection_bias = 'proj_b' in args and args['proj_b'][0] is not None
if use_projection_weight or use_projection_bias:
raise NotImplementedError("LSTMP is not yet supported by kernel")
# args['i_state'][0] = qrec.scale_i_state(output_gate_scratch.copy(), 0, ktype="symmetric")
args['i_state'][0] = output.copy()
if params.lstm_output_c_state:
return output, args['c_state'][0]
return output, None
| 45.091748
| 152
| 0.574931
| 11,477
| 87,974
| 4.115361
| 0.042607
| 0.072197
| 0.032012
| 0.023819
| 0.857173
| 0.808964
| 0.768822
| 0.735921
| 0.699399
| 0.659405
| 0
| 0.019875
| 0.309125
| 87,974
| 1,950
| 153
| 45.114872
| 0.757235
| 0.141189
| 0
| 0.603093
| 0
| 0
| 0.085045
| 0.007507
| 0
| 0
| 0.000771
| 0.000513
| 0.002946
| 1
| 0.041237
| false
| 0
| 0.008837
| 0.021355
| 0.105302
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
02d074f29e2685e52a1553fff48a7416d38a7bdb
| 81,535
|
py
|
Python
|
dlpy/caffe_models/model_resnet101.py
|
jld23/python-dlpy
|
39fe417a02da8f40975691392f5735fe02160da0
|
[
"Apache-2.0"
] | null | null | null |
dlpy/caffe_models/model_resnet101.py
|
jld23/python-dlpy
|
39fe417a02da8f40975691392f5735fe02160da0
|
[
"Apache-2.0"
] | null | null | null |
dlpy/caffe_models/model_resnet101.py
|
jld23/python-dlpy
|
39fe417a02da8f40975691392f5735fe02160da0
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from ..utils import input_table_check
def ResNet101_Model(s, model_table='RESNET101', n_channels=3, width=224, height=224,
random_crop=None, offsets=None,
random_flip=None, random_mutation=None):
'''
ResNet101 model definition
Parameters
----------
s : CAS
Specifies the CAS connection object
model_table : string, dict or CAS table, optional
Specifies the CAS table to store the model.
n_channels : int, optional
Specifies the number of the channels of the input layer
Default: 3
width : int, optional
Specifies the width of the input layer
Default: 224
height : int, optional
Specifies the height of the input layer
Default: 224
random_crop : string, optional
Specifies how to crop the data in the input layer when image data is
used. Images are cropped to the values that are specified in the width
and height parameters.deepLearn. Only the images with one or both dimensions
that are larger than those sizes are cropped.
Valid Values: 'none', 'unique', 'randomresized', 'resizethencrop'
offsets : double or iter-of-doubles, optional
Specifies an offset for each channel in the input data. The final
input data is set after applying scaling and subtracting the
specified offsets.deepLearn.
Default: (103.939, 116.779, 123.68)
random_flip : string, optional
Specifies how to flip the data in the input layer when image data is
used. Approximately half of the input data is subject to flipping.
Valid Values: 'h', 'hv', 'v', 'none'
random_mutation : string, optional
Specifies how to apply data augmentations/mutations to the data in the input layer.
Valid Values: 'none', 'random'
Returns
-------
None
A CAS table defining the model is created
'''
model_table_opts = input_table_check(model_table)
# quick error-checking and default setting
# let the server check the error and default
#if random_crop is None:
# random_crop = 'none'
#elif random_crop.lower() not in ['none', 'unique']:
# raise ValueError('random_crop can only be "none" or "unique"')
if offsets is None:
offsets = [103.939, 116.779, 123.68]
# instantiate model
s.deepLearn.buildModel(model=dict(replace=True, **model_table_opts), type='CNN')
# input layer
# to keep back compatible with the older VDMML, check random_flip and random_mutation first
s.deepLearn.addLayer(model=model_table_opts, name='data',
layer=dict(type='input', nchannels=n_channels, width=width, height=height,
randomcrop=random_crop, offsets=offsets,
randomFlip=random_flip, randomMutation=random_mutation))
# -------------------- Layer 1 ----------------------
# conv1 layer: 64 channels, 7x7 conv, stride=2; output = 112 x 112 */
s.deepLearn.addLayer(model=model_table_opts, name='conv1',
layer=dict(type='convolution', nFilters=64, width=7, height=7,
stride=2, act='identity'),
srcLayers=['data'])
# conv1 batch norm layer: 64 channels, output = 112 x 112 */
s.deepLearn.addLayer(model=model_table_opts, name='bn_conv1',
layer=dict(type='batchnorm', act='relu'), srcLayers=['conv1'])
# pool1 layer: 64 channels, 3x3 pooling, output = 56 x 56 */
s.deepLearn.addLayer(model=model_table_opts, name='pool1',
layer=dict(type='pooling', width=3, height=3, stride=2, pool='max'),
srcLayers=['bn_conv1'])
# ------------------- Residual Layer 2A -----------------------
# res2a_branch1 layer: 256 channels, 1x1 conv, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='res2a_branch1',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['pool1'])
# res2a_branch1 batch norm layer: 256 channels, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='bn2a_branch1',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res2a_branch1'])
# res2a_branch2a layer: 64 channels, 1x1 conv, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='res2a_branch2a',
layer=dict(type='convolution', nFilters=64, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['pool1'])
# res2a_branch2a batch norm layer: 64 channels, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='bn2a_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res2a_branch2a'])
# res2a_branch2b layer: 64 channels, 3x3 conv, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='res2a_branch2b',
layer=dict(type='convolution', nFilters=64, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn2a_branch2a'])
# res2a_branch2b batch norm layer: 64 channels, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='bn2a_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res2a_branch2b'])
# res2a_branch2c layer: 256 channels, 1x1 conv, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='res2a_branch2c',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn2a_branch2b'])
# res2a_branch2c batch norm layer: 256 channels, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='bn2a_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res2a_branch2c'])
# res2a residual layer: 256 channels, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='res2a',
layer=dict(type='residual', act='relu'),
srcLayers=['bn2a_branch2c', 'bn2a_branch1'])
# ------------------- Residual Layer 2B -----------------------
# res2b_branch2a layer: 64 channels, 1x1 conv, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='res2b_branch2a',
layer=dict(type='convolution', nFilters=64, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res2a'])
# res2b_branch2a batch norm layer: 64 channels, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='bn2b_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res2b_branch2a'])
# res2b_branch2b layer: 64 channels, 3x3 conv, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='res2b_branch2b',
layer=dict(type='convolution', nFilters=64, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn2b_branch2a'])
# res2b_branch2b batch norm layer: 64 channels, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='bn2b_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res2b_branch2b'])
# res2b_branch2c layer: 256 channels, 1x1 conv, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='res2b_branch2c',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn2b_branch2b'])
# res2b_branch2c batch norm layer: 256 channels, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='bn2b_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res2b_branch2c'])
# res2b residual layer: 256 channels, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='res2b',
layer=dict(type='residual', act='relu'),
srcLayers=['bn2b_branch2c', 'res2a'])
# ------------------- Residual Layer 2C -----------------------
# res2c_branch2a layer: 64 channels, 1x1 conv, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='res2c_branch2a',
layer=dict(type='convolution', nFilters=64, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res2b'])
# res2c_branch2a batch norm layer: 64 channels, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='bn2c_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res2c_branch2a'])
# res2c_branch2b layer: 64 channels, 3x3 conv, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='res2c_branch2b',
layer=dict(type='convolution', nFilters=64, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn2c_branch2a'])
# res2c_branch2b batch norm layer: 64 channels, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='bn2c_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res2c_branch2b'])
# res2c_branch2c layer: 256 channels, 1x1 conv, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='res2c_branch2c',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn2c_branch2b'])
# res2c_branch2c batch norm layer: 256 channels, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='bn2c_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res2c_branch2c'])
# res2c residual layer: 256 channels, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='res2c',
layer=dict(type='residual', act='relu'),
srcLayers=['bn2c_branch2c', 'res2b'])
# ------------- Layer 3A --------------------
# res3a_branch1 layer: 512 channels, 1x1 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3a_branch1',
layer=dict(type='convolution', nFilters=512, width=1, height=1,
stride=2, includebias=False, act='identity'),
srcLayers=['res2c'])
# res3a_branch1 batch norm layer: 512 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3a_branch1',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res3a_branch1'])
# res3a_branch2a layer: 128 channels, 1x1 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3a_branch2a',
layer=dict(type='convolution', nFilters=128, width=1, height=1,
stride=2, includebias=False, act='identity'),
srcLayers=['res2c'])
# res3a_branch2a batch norm layer: 128 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3a_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res3a_branch2a'])
# res3a_branch2b layer: 128 channels, 3x3 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3a_branch2b',
layer=dict(type='convolution', nFilters=128, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn3a_branch2a'])
# res3a_branch2b batch norm layer: 128 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3a_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res3a_branch2b'])
# res3a_branch2c layer: 512 channels, 1x1 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3a_branch2c',
layer=dict(type='convolution', nFilters=512, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn3a_branch2b'])
# res3a_branch2c batch norm layer: 512 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3a_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res3a_branch2c'])
# res3a residual layer: 512 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3a',
layer=dict(type='residual', act='relu'),
srcLayers=['bn3a_branch2c', 'bn3a_branch1'])
# ------------------- Residual Layer 3B1 -----------------------
# res3b1_branch2a layer: 128 channels, 1x1 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b1_branch2a',
layer=dict(type='convolution', nFilters=128, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res3a'])
# res3b1_branch2a batch norm layer: 128 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3b1_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res3b1_branch2a'])
# res3b1_branch2b layer: 128 channels, 3x3 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b1_branch2b',
layer=dict(type='convolution', nFilters=128, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn3b1_branch2a'])
# res3b1_branch2b batch norm layer: 128 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3b1_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res3b1_branch2b'])
# res3b1_branch2c layer: 512 channels, 1x1 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b1_branch2c',
layer=dict(type='convolution', nFilters=512, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn3b1_branch2b'])
# res3b1_branch2c batch norm layer: 512 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3b1_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res3b1_branch2c'])
# res3b1 residual layer: 512 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b1',
layer=dict(type='residual', act='relu'),
srcLayers=['bn3b1_branch2c', 'res3a'])
# ------------------- Residual Layer 3B2 -----------------------
# res3b2_branch2a layer: 128 channels, 1x1 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b2_branch2a',
layer=dict(type='convolution', nFilters=128, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res3b1'])
# res3b2_branch2a batch norm layer: 128 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3b2_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res3b2_branch2a'])
# res3b2_branch2b layer: 128 channels, 3x3 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b2_branch2b',
layer=dict(type='convolution', nFilters=128, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn3b2_branch2a'])
# res3b2_branch2b batch norm layer: 128 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3b2_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res3b2_branch2b'])
# res3b2_branch2c layer: 512 channels, 1x1 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b2_branch2c',
layer=dict(type='convolution', nFilters=512, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn3b2_branch2b'])
# res3b2_branch2c batch norm layer: 512 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3b2_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res3b2_branch2c'])
# res3b2 residual layer: 512 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b2',
layer=dict(type='residual', act='relu'),
srcLayers=['bn3b2_branch2c', 'res3b1'])
# ------------------- Residual Layer 3B3 -----------------------
# res3b3_branch2a layer: 128 channels, 1x1 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b3_branch2a',
layer=dict(type='convolution', nFilters=128, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res3b2'])
# res3b3_branch2a batch norm layer: 128 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3b3_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res3b3_branch2a'])
# res3b3_branch2b layer: 128 channels, 3x3 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b3_branch2b',
layer=dict(type='convolution', nFilters=128, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn3b3_branch2a'])
# res3b3_branch2b batch norm layer: 128 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3b3_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res3b3_branch2b'])
# res3b3_branch2c layer: 512 channels, 1x1 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b3_branch2c',
layer=dict(type='convolution', nFilters=512, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn3b3_branch2b'])
# res3b3_branch2c batch norm layer: 512 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3b3_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res3b3_branch2c'])
# res3b3 residual layer: 512 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b3',
layer=dict(type='residual', act='relu'),
srcLayers=['bn3b3_branch2c', 'res3b2'])
# ------------- Layer 4A --------------------
# res4a_branch1 layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4a_branch1',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=2, includebias=False, act='identity'),
srcLayers=['res3b3'])
# res4a_branch1 batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4a_branch1',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4a_branch1'])
# res4a_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4a_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=2, includebias=False, act='identity'),
srcLayers=['res3b3'])
# res4a_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4a_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4a_branch2a'])
# res4a_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4a_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4a_branch2a'])
# res4a_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4a_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4a_branch2b'])
# res4a_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4a_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4a_branch2b'])
# res4a_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4a_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4a_branch2c'])
# res4a residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4a',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4a_branch2c', 'bn4a_branch1'])
# ------------------- Residual Layer 4B1 -----------------------
# res4b1_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b1_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4a'])
# res4b1_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b1_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b1_branch2a'])
# res4b1_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b1_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b1_branch2a'])
# res4b1_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b1_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b1_branch2b'])
# res4b1_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b1_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b1_branch2b'])
# res4b1_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b1_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b1_branch2c'])
# res4b1 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b1',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b1_branch2c', 'res4a'])
# ------------------- Residual Layer 4B2 -----------------------
# res4b2_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b2_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b1'])
# res4b2_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b2_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b2_branch2a'])
# res4b2_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b2_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b2_branch2a'])
# res4b2_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b2_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b2_branch2b'])
# res4b2_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b2_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b2_branch2b'])
# res4b2_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b2_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b2_branch2c'])
# res4b2 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b2',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b2_branch2c', 'res4b1'])
# ------------------- Residual Layer 4B3 -----------------------
# res4b3_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b3_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b2'])
# res4b3_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b3_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b3_branch2a'])
# res4b3_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b3_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b3_branch2a'])
# res4b3_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b3_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b3_branch2b'])
# res4b3_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b3_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b3_branch2b'])
# res4b3_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b3_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b3_branch2c'])
# res4b3 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b3',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b3_branch2c', 'res4b2'])
# ------------------- Residual Layer 4B4 ----------------------- */
# res4b4_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b4_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b3'])
# res4b4_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b4_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b4_branch2a'])
# res4b4_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b4_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b4_branch2a'])
# res4b4_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b4_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b4_branch2b'])
# res4b4_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b4_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b4_branch2b'])
# res4b4_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b4_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b4_branch2c'])
# res4b4 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b4',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b4_branch2c', 'res4b3'])
# ------------------- Residual Layer 4B5 -----------------------
# res4b5_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b5_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b4'])
# res4b5_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b5_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b5_branch2a'])
# res4b5_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b5_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b5_branch2a'])
# res4b5_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b5_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b5_branch2b'])
# res4b5_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b5_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b5_branch2b'])
# res4b5_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b5_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b5_branch2c'])
# res4b5 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b5',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b5_branch2c', 'res4b4'])
# ------------------- Residual Layer 4B6 -----------------------
# res4b6_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b6_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b5'])
# res4b6_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b6_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b6_branch2a'])
# res4b6_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b6_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b6_branch2a'])
# res4b6_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b6_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b6_branch2b'])
# res4b6_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b6_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b6_branch2b'])
# res4b6_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b6_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b6_branch2c'])
# res4b6 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b6',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b6_branch2c', 'res4b5'])
# ------------------- Residual Layer 4B7 -----------------------
# res4b7_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b7_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b6'])
# res4b7_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b7_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b7_branch2a'])
# res4b7_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b7_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b7_branch2a'])
# res4b7_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b7_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b7_branch2b'])
# res4b7_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b7_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b7_branch2b'])
# res4b7_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b7_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b7_branch2c'])
# res4b7 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b7',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b7_branch2c', 'res4b6'])
# ------------------- Residual Layer 4B8 -----------------------
# res4b8_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b8_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b7'])
# res4b8_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b8_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b8_branch2a'])
# res4b8_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b8_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b8_branch2a'])
# res4b8_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b8_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b8_branch2b'])
# res4b8_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b8_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b8_branch2b'])
# res4b8_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b8_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b8_branch2c'])
# res4b8 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b8',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b8_branch2c', 'res4b7'])
# ------------------- Residual Layer 4B9 -----------------------
# res4b9_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b9_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b8'])
# res4b9_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b9_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b9_branch2a'])
# res4b9_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b9_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b9_branch2a'])
# res4b9_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b9_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b9_branch2b'])
# res4b9_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b9_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b9_branch2b'])
# res4b9_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b9_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b9_branch2c'])
# res4b9 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b9',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b9_branch2c', 'res4b8'])
# ------------------- Residual Layer 4B10 -----------------------
# res4b10_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b10_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b9'])
# res4b10_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b10_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b10_branch2a'])
# res4b10_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b10_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b10_branch2a'])
# res4b10_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b10_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b10_branch2b'])
# res4b10_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b10_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b10_branch2b'])
# res4b10_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b10_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b10_branch2c'])
# res4b10 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b10',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b10_branch2c', 'res4b9'])
# ------------------- Residual Layer 4B11 -----------------------
# res4b11_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b11_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b10'])
# res4b11_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b11_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b11_branch2a'])
# res4b11_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b11_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b11_branch2a'])
# res4b11_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b11_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b11_branch2b'])
# res4b11_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b11_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b11_branch2b'])
# res4b11_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b11_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b11_branch2c'])
# res4b11 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b11',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b11_branch2c', 'res4b10'])
# ------------------- Residual Layer 4B12 -----------------------
# res4b12_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b12_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b11'])
# res4b12_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b12_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b12_branch2a'])
# res4b12_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b12_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b12_branch2a'])
# res4b12_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b12_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b12_branch2b'])
# res4b12_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b12_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b12_branch2b'])
# res4b12_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b12_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b12_branch2c'])
# res4b12 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b12',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b12_branch2c', 'res4b11'])
# ------------------- Residual Layer 4B13 -----------------------
# res4b13_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b13_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b12'])
# res4b13_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b13_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b13_branch2a'])
# res4b13_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b13_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b13_branch2a'])
# res4b13_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b13_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b13_branch2b'])
# res4b13_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b13_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b13_branch2b'])
# res4b13_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b13_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b13_branch2c'])
# res4b13 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b13',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b13_branch2c', 'res4b12'])
# ------------------- Residual Layer 4B14 -----------------------
# res4b14_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b14_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b13'])
# res4b14_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b14_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b14_branch2a'])
# res4b14_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b14_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b14_branch2a'])
# res4b14_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b14_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b14_branch2b'])
# res4b14_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b14_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b14_branch2b'])
# res4b14_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b14_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b14_branch2c'])
# res4b14 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b14',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b14_branch2c', 'res4b13'])
# ------------------- Residual Layer 4B15 -----------------------
# res4b15_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b15_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b14'])
# res4b15_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b15_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b15_branch2a'])
# res4b15_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b15_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b15_branch2a'])
# res4b15_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b15_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b15_branch2b'])
# res4b15_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b15_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b15_branch2b'])
# res4b15_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b15_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b15_branch2c'])
# res4b15 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b15',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b15_branch2c', 'res4b14'])
# ------------------- Residual Layer 4B16 -----------------------
# res4b16_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b16_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b15'])
# res4b16_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b16_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b16_branch2a'])
# res4b16_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b16_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b16_branch2a'])
# res4b16_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b16_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b16_branch2b'])
# res4b16_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b16_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b16_branch2b'])
# res4b16_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b16_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b16_branch2c'])
# res4b16 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b16',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b16_branch2c', 'res4b15'])
# ------------------- Residual Layer 4B17 -----------------------
# res4b17_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b17_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b16'])
# res4b17_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b17_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b17_branch2a'])
# res4b17_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b17_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b17_branch2a'])
# res4b17_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b17_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b17_branch2b'])
# res4b17_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b17_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b17_branch2b'])
# res4b17_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b17_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b17_branch2c'])
# res4b17 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b17',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b17_branch2c', 'res4b16'])
# ------------------- Residual Layer 4B18 -----------------------
# res4b18_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b18_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b17'])
# res4b18_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b18_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b18_branch2a'])
# res4b18_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b18_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b18_branch2a'])
# res4b18_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b18_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b18_branch2b'])
# res4b18_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b18_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b18_branch2b'])
# res4b18_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b18_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b18_branch2c'])
# res4b18 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b18',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b18_branch2c', 'res4b17'])
# ------------------- Residual Layer 4B19 -----------------------
# res4b19_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b19_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b18'])
# res4b19_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b19_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b19_branch2a'])
# res4b19_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b19_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b19_branch2a'])
# res4b19_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b19_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b19_branch2b'])
# res4b19_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b19_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b19_branch2b'])
# res4b19_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b19_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b19_branch2c'])
# res4b19 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b19',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b19_branch2c', 'res4b18'])
# ------------------- Residual Layer 4B20 -----------------------
# res4b20_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b20_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b19'])
# res4b20_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b20_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b20_branch2a'])
# res4b20_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b20_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b20_branch2a'])
# res4b20_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b20_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b20_branch2b'])
# res4b20_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b20_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b20_branch2b'])
# res4b20_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b20_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b20_branch2c'])
# res4b20 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b20',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b20_branch2c', 'res4b19'])
# ------------------- Residual Layer 4B21 -----------------------
# res4b21_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b21_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b20'])
# res4b21_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b21_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b21_branch2a'])
# res4b21_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b21_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b21_branch2a'])
# res4b21_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b21_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b21_branch2b'])
# res4b21_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b21_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b21_branch2b'])
# res4b21_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b21_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b21_branch2c'])
# res4b21 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b21',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b21_branch2c', 'res4b20'])
# ------------------- Residual Layer 4B22 -----------------------
# res4b22_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b22_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b21'])
# res4b22_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b22_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b22_branch2a'])
# res4b22_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b22_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b22_branch2a'])
# res4b22_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b22_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b22_branch2b'])
# res4b22_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b22_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b22_branch2b'])
# res4b22_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b22_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b22_branch2c'])
# res4b22 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b22',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b22_branch2c', 'res4b21'])
# ------------- Layer 5A -------------------- */
# res5a_branch1 layer: 2048 channels, 1x1 conv, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='res5a_branch1',
layer=dict(type='convolution', nFilters=2048, width=1, height=1,
stride=2, includebias=False, act='identity'),
srcLayers=['res4b22'])
# res5a_branch1 batch norm layer: 2048 channels, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='bn5a_branch1',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res5a_branch1'])
# res5a_branch2a layer: 512 channels, 1x1 conv, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='res5a_branch2a',
layer=dict(type='convolution', nFilters=512, width=1, height=1,
stride=2, includebias=False, act='identity'),
srcLayers=['res4b22'])
# res5a_branch2a batch norm layer: 512 channels, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='bn5a_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res5a_branch2a'])
# res5a_branch2b layer: 512 channels, 3x3 conv, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='res5a_branch2b',
layer=dict(type='convolution', nFilters=512, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn5a_branch2a'])
# res5a_branch2b batch norm layer: 512 channels, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='bn5a_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res5a_branch2b'])
# res5a_branch2c layer: 2048 channels, 1x1 conv, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='res5a_branch2c',
layer=dict(type='convolution', nFilters=2048, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn5a_branch2b'])
# res5a_branch2c batch norm layer: 2048 channels, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='bn5a_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res5a_branch2c'])
# res5a residual layer: 2048 channels, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='res5a',
layer=dict(type='residual', act='relu'),
srcLayers=['bn5a_branch2c', 'bn5a_branch1'])
# ------------------- Residual Layer 5B -----------------------
# res5b_branch2a layer: 512 channels, 1x1 conv, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='res5b_branch2a',
layer=dict(type='convolution', nFilters=512, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res5a'])
# res5b_branch2a batch norm layer: 512 channels, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='bn5b_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res5b_branch2a'])
# res5b_branch2b layer: 512 channels, 3x3 conv, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='res5b_branch2b',
layer=dict(type='convolution', nFilters=512, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn5b_branch2a'])
# res5b_branch2b batch norm layer: 512 channels, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='bn5b_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res5b_branch2b'])
# res5b_branch2c layer: 2048 channels, 1x1 conv, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='res5b_branch2c',
layer=dict(type='convolution', nFilters=2048, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn5b_branch2b'])
# res5b_branch2c batch norm layer: 2048 channels, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='bn5b_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res5b_branch2c'])
# res5b residual layer: 2048 channels, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='res5b',
layer=dict(type='residual', act='relu'),
srcLayers=['bn5b_branch2c', 'res5a'])
# ------------------- Residual Layer 5C -----------------------
# res5c_branch2a layer: 512 channels, 1x1 conv, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='res5c_branch2a',
layer=dict(type='convolution', nFilters=512, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res5b'])
# res5c_branch2a batch norm layer: 512 channels, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='bn5c_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res5c_branch2a'])
# res5c_branch2b layer: 512 channels, 3x3 conv, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='res5c_branch2b',
layer=dict(type='convolution', nFilters=512, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn5c_branch2a'])
# res5c_branch2b batch norm layer: 512 channels, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='bn5c_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res5c_branch2b'])
# res5c_branch2c layer: 2048 channels, 1x1 conv, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='res5c_branch2c',
layer=dict(type='convolution', nFilters=2048, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn5c_branch2b'])
# res5c_branch2c batch norm layer: 2048 channels, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='bn5c_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res5c_branch2c'])
# res5c residual layer: 2048 channels, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='res5c',
layer=dict(type='residual', act='relu'),
srcLayers=['bn5c_branch2c', 'res5b'])
# ------------------- final layers ----------------------
# pool5 layer: 2048 channels, 7x7 pooling, output = 1 x 1
kernel_width = width // 2 // 2 // 2 // 2 // 2
kernel_height = height // 2 // 2 // 2 // 2 // 2
stride = kernel_width
s.deepLearn.addLayer(model=model_table_opts, name='pool5',
layer=dict(type='pooling', width=kernel_width,
height=kernel_height, stride=stride, pool='mean'),
srcLayers=['res5c'])
# fc1000 output layer: 1000 neurons */
s.deepLearn.addLayer(model=model_table_opts, name='fc1000',
layer=dict(type='output', n=1000, act='softmax'),
srcLayers=['pool5'])
return s.CASTable(**model_table_opts)
| 54.758227
| 99
| 0.576746
| 8,862
| 81,535
| 5.176935
| 0.038592
| 0.05471
| 0.075679
| 0.122826
| 0.85165
| 0.847727
| 0.844283
| 0.81599
| 0.689764
| 0.689764
| 0
| 0.086485
| 0.296327
| 81,535
| 1,488
| 100
| 54.795027
| 0.713145
| 0.245686
| 0
| 0.410798
| 0
| 0
| 0.173575
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.001174
| false
| 0
| 0.001174
| 0
| 0.003521
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
02ff7a449413ae6b7930ff759f53c073de3c0027
| 24
|
py
|
Python
|
main.py
|
ualikhansars/python_features
|
92d477d588da8310fcb3c8aafb9e251d80adc1e5
|
[
"MIT"
] | null | null | null |
main.py
|
ualikhansars/python_features
|
92d477d588da8310fcb3c8aafb9e251d80adc1e5
|
[
"MIT"
] | null | null | null |
main.py
|
ualikhansars/python_features
|
92d477d588da8310fcb3c8aafb9e251d80adc1e5
|
[
"MIT"
] | null | null | null |
print('Python features')
| 24
| 24
| 0.791667
| 3
| 24
| 6.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.041667
| 24
| 1
| 24
| 24
| 0.826087
| 0
| 0
| 0
| 0
| 0
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
b8458099682ef5848084222d925f4b3d5ee7958c
| 14,366
|
py
|
Python
|
script.module.exodus/lib/resources/lib/sources/pl/alltube.py
|
TheWardoctor/wardoctors-repo
|
893f646d9e27251ffc00ca5f918e4eb859a5c8f0
|
[
"Apache-2.0"
] | 1
|
2019-03-05T09:37:15.000Z
|
2019-03-05T09:37:15.000Z
|
script.module.exodus/lib/resources/lib/sources/pl/alltube.py
|
TheWardoctor/wardoctors-repo
|
893f646d9e27251ffc00ca5f918e4eb859a5c8f0
|
[
"Apache-2.0"
] | null | null | null |
script.module.exodus/lib/resources/lib/sources/pl/alltube.py
|
TheWardoctor/wardoctors-repo
|
893f646d9e27251ffc00ca5f918e4eb859a5c8f0
|
[
"Apache-2.0"
] | 1
|
2021-11-05T20:48:09.000Z
|
2021-11-05T20:48:09.000Z
|
# -*- coding: utf-8 -*-
'''
Exodus Add-on
Copyright (C) 2017 homik
Based on MrKnow fanfilm addon
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re, urlparse, json, base64
from resources.lib.modules import cleantitle
from resources.lib.modules import client
def byteify(input):
if isinstance(input, dict):
return dict([(byteify(key), byteify(value)) for key, value in input.iteritems()])
elif isinstance(input, list):
return [byteify(element) for element in input]
elif isinstance(input, unicode):
return input.encode('utf-8')
else:
return input
class source:
def __init__(self):
self.priority = 1
self.language = ['pl']
self.domains = ['alltube.tv']
self.base_link = 'http://alltube.tv'
self.search_link = '/szukaj'
self.moviesearch_link = '/index.php?url=search/autocomplete/&phrase=%s'
self.tvsearch_cache = 'http://alltube.tv/seriale-online/'
self.episode_link = '-Season-%01d-Episode-%01d'
def get_rows(self, r, search_type):
divs = client.parseDOM(r, 'div', attrs={'class': 'col-sm-12'})
for div in divs:
header = client.parseDOM(div, 'h2', attrs={'class': 'headline'})
if header and header[0] == search_type:
return client.parseDOM(div, 'div', attrs={'class': 'item-block clearfix'})
def name_matches(self, names, names_found):
for name in names:
if name in names_found:
return True
return False
def try_read_year(self, url):
index = url.rfind('/')
found_year = url[index - 4:index]
if found_year.isdigit():
return found_year
return None
def search(self, title, localtitle, year, search_type):
try:
r = client.request(urlparse.urljoin(self.base_link, self.search_link), post={'search': cleantitle.query(title)})
r = self.get_rows(r, search_type)
names = [cleantitle.get(i) for i in [title, localtitle]]
for row in r:
url = client.parseDOM(row, 'a', ret='href')[0]
names_found = client.parseDOM(row, 'h3')[0]
if names_found.startswith('Zwiastun') and not localtitle.startswith('Zwiastun'):
continue
names_found = names_found.split('/')
names_found = [cleantitle.get(i) for i in names_found]
if self.name_matches(names, names_found):
found_year = self.try_read_year(url)
if not found_year or found_year == year:
return url
except:
return
def movie(self, imdb, title, localtitle, aliases, year):
return self.search(title, localtitle, year, 'Filmy')
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
return self.search(tvshowtitle, localtvshowtitle, year, 'Seriale')
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
txts = 's%02de%02d' % (int(season), int(episode))
result = client.request(url)
result = client.parseDOM(result, 'li', attrs={'class': 'episode'})
result = [i for i in result if txts in i][0]
url = client.parseDOM(result, 'a', ret='href')[0]
url = url.encode('utf-8')
return url
except:
return
def get_language_by_type(self, lang_type):
if lang_type in ['Napisy', 'Lektor', 'Dubbing']:
return 'pl', lang_type
if lang_type == 'PL':
return 'pl', None
return 'en', None
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
url = urlparse.urljoin(self.base_link, url)
result = client.request(url)
links = client.parseDOM(result, 'tr')
links = [(client.parseDOM(i, 'a', attrs={'class': 'watch'}, ret='data-iframe')[0],
client.parseDOM(i, 'img', ret='alt')[0],
client.parseDOM(i, 'td', attrs={'class':'text-center'})[0]) for i in links]
for i in links:
try:
url1 = '%s?%s' % (url, i[0])
url1 = url1.encode('utf-8')
language, info = self.get_language_by_type(i[2]);
sources.append({'source': i[1].encode('utf-8'), 'quality': 'SD', 'language': language, 'url': url1, 'info': info, 'direct': False, 'debridonly': False})
except:
pass
return sources
except:
return sources
def resolve(self, url):
try:
myurl = url.split('?')
mycookie = client.request(myurl[0], output='cookie', error=True)
tmp = 'ZGVmIGFiYyhpbl9hYmMpOg0KICAgIGRlZiByaGV4KGEpOg0KICAgICAgICBoZXhfY2hyID0gJzAxMjM0NTY3ODlhYmNkZWYnDQogICAgICAgIHJldCA9ICcnDQogICAgICAgIGZvciBpIGluIHJhbmdlKDQpOg0KICAgICAgICAgICAgcmV0ICs9IGhleF9jaHJbKGEgPj4gKGkgKiA4ICsgNCkpICYgMHgwRl0gKyBoZXhfY2hyWyhhID4+IChpICogOCkpICYgMHgwRl0NCiAgICAgICAgcmV0dXJuIHJldA0KICAgIGRlZiBoZXgodGV4dCk6DQogICAgICAgIHJldCA9ICcnDQogICAgICAgIGZvciBpIGluIHJhbmdlKGxlbih0ZXh0KSk6DQogICAgICAgICAgICByZXQgKz0gcmhleCh0ZXh0W2ldKQ0KICAgICAgICByZXR1cm4gcmV0DQogICAgZGVmIGFkZDMyKGEsIGIpOg0KICAgICAgICByZXR1cm4gKGEgKyBiKSAmIDB4RkZGRkZGRkYNCiAgICBkZWYgY21uKGEsIGIsIGMsIGQsIGUsIGYpOg0KICAgICAgICBiID0gYWRkMzIoYWRkMzIoYiwgYSksIGFkZDMyKGQsIGYpKTsNCiAgICAgICAgcmV0dXJuIGFkZDMyKChiIDw8IGUpIHwgKGIgPj4gKDMyIC0gZSkpLCBjKQ0KICAgIGRlZiBmZihhLCBiLCBjLCBkLCBlLCBmLCBnKToNCiAgICAgICAgcmV0dXJuIGNtbigoYiAmIGMpIHwgKCh+YikgJiBkKSwgYSwgYiwgZSwgZiwgZykNCiAgICBkZWYgZ2coYSwgYiwgYywgZCwgZSwgZiwgZyk6DQogICAgICAgIHJldHVybiBjbW4oKGIgJiBkKSB8IChjICYgKH5kKSksIGEsIGIsIGUsIGYsIGcpDQogICAgZGVmIGhoKGEsIGIsIGMsIGQsIGUsIGYsIGcpOg0KICAgICAgICByZXR1cm4gY21uKGIgXiBjIF4gZCwgYSwgYiwgZSwgZiwgZykNCiAgICBkZWYgaWkoYSwgYiwgYywgZCwgZSwgZiwgZyk6DQogICAgICAgIHJldHVybiBjbW4oYyBeIChiIHwgKH5kKSksIGEsIGIsIGUsIGYsIGcpDQogICAgZGVmIGNyeXB0Y3ljbGUodGFiQSwgdGFiQik6DQogICAgICAgIGEgPSB0YWJBWzBdDQogICAgICAgIGIgPSB0YWJBWzFdDQogICAgICAgIGMgPSB0YWJBWzJdDQogICAgICAgIGQgPSB0YWJBWzNdDQogICAgICAgIGEgPSBmZihhLCBiLCBjLCBkLCB0YWJCWzBdLCA3LCAtNjgwODc2OTM2KTsNCiAgICAgICAgZCA9IGZmKGQsIGEsIGIsIGMsIHRhYkJbMV0sIDEyLCAtMzg5NTY0NTg2KTsNCiAgICAgICAgYyA9IGZmKGMsIGQsIGEsIGIsIHRhYkJbMl0sIDE3LCA2MDYxMDU4MTkpOw0KICAgICAgICBiID0gZmYoYiwgYywgZCwgYSwgdGFiQlszXSwgMjIsIC0xMDQ0NTI1MzMwKTsNCiAgICAgICAgYSA9IGZmKGEsIGIsIGMsIGQsIHRhYkJbNF0sIDcsIC0xNzY0MTg4OTcpOw0KICAgICAgICBkID0gZmYoZCwgYSwgYiwgYywgdGFiQls1XSwgMTIsIDEyMDAwODA0MjYpOw0KICAgICAgICBjID0gZmYoYywgZCwgYSwgYiwgdGFiQls2XSwgMTcsIC0xNDczMjMxMzQxKTsNCiAgICAgICAgYiA9IGZmKGIsIGMsIGQsIGEsIHRhYkJbN10sIDIyLCAtNDU3MDU5ODMpOw0KICAgICAgICBhID0gZmYoYSwgYiwgYywgZCwgdGFiQls4XSwgNywgMTc3MDAzNTQxNik7DQogICAgICAgIGQgPSBmZihkLCBhLCBiLCBjLCB0YWJCWzldLCAxMiwgLTE5NTg0MTQ0MTcpOw0KICAgICAgICBjID0gZmYoYywgZCwgYSwgYiwgdGFiQlsxMF0sIDE3LCAtNDIwNjMpOw0KICAgICAgICBiID0gZmYoYiwgYywgZCwgYSwgdGFiQlsxMV0sIDIyLCAtMTk5MDQwNDE2Mik7DQogICAgICAgIGEgPSBmZihhLCBiLCBjLCBkLCB0YWJCWzEyXSwgNywgMTgwNDYwMzY4Mik7DQogICAgICAgIGQgPSBmZihkLCBhLCBiLCBjLCB0YWJCWzEzXSwgMTIsIC00MDM0MTEwMSk7DQogICAgICAgIGMgPSBmZihjLCBkLCBhLCBiLCB0YWJCWzE0XSwgMTcsIC0xNTAyMDAyMjkwKTsNCiAgICAgICAgYiA9IGZmKGIsIGMsIGQsIGEsIHRhYkJbMTVdLCAyMiwgMTIzNjUzNTMyOSk7DQogICAgICAgIGEgPSBnZyhhLCBiLCBjLCBkLCB0YWJCWzFdLCA1LCAtMTY1Nzk2NTEwKTsNCiAgICAgICAgZCA9IGdnKGQsIGEsIGIsIGMsIHRhYkJbNl0sIDksIC0xMDY5NTAxNjMyKTsNCiAgICAgICAgYyA9IGdnKGMsIGQsIGEsIGIsIHRhYkJbMTFdLCAxNCwgNjQzNzE3NzEzKTsNCiAgICAgICAgYiA9IGdnKGIsIGMsIGQsIGEsIHRhYkJbMF0sIDIwLCAtMzczODk3MzAyKTsNCiAgICAgICAgYSA9IGdnKGEsIGIsIGMsIGQsIHRhYkJbNV0sIDUsIC03MDE1NTg2OTEpOw0KICAgICAgICBkID0gZ2coZCwgYSwgYiwgYywgdGFiQlsxMF0sIDksIDM4MDE2MDgzKTsNCiAgICAgICAgYyA9IGdnKGMsIGQsIGEsIGIsIHRhYkJbMTVdLCAxNCwgLTY2MDQ3ODMzNSk7DQogICAgICAgIGIgPSBnZyhiLCBjLCBkLCBhLCB0YWJCWzRdLCAyMCwgLTQwNTUzNzg0OCk7DQogICAgICAgIGEgPSBnZyhhLCBiLCBjLCBkLCB0YWJCWzldLCA1LCA1Njg0NDY0MzgpOw0KICAgICAgICBkID0gZ2coZCwgYSwgYiwgYywgdGFiQlsxNF0sIDksIC0xMDE5ODAzNjkwKTsNCiAgICAgICAgYyA9IGdnKGMsIGQsIGEsIGIsIHRhYkJbM10sIDE0LCAtMTg3MzYzOTYxKTsNCiAgICAgICAgYiA9IGdnKGIsIGMsIGQsIGEsIHRhYkJbOF0sIDIwLCAxMTYzNTMxNTAxKTsNCiAgICAgICAgYSA9IGdnKGEsIGIsIGMsIGQsIHRhYkJbMTNdLCA1LCAtMTQ0NDY4MTQ2Nyk7DQogICAgICAgIGQgPSBnZyhkLCBhLCBiLCBjLCB0YWJCWzJdLCA5LCAtNTE0MDM3ODQpOw0KICAgICAgICBjID0gZ2coYywgZCwgYSwgYiwgdGFiQls3XSwgMTQsIDE3MzUzMjg0NzMpOw0KICAgICAgICBiID0gZ2coYiwgYywgZCwgYSwgdGFiQlsxMl0sIDIwLCAtMTkyNjYwNzczNCk7DQogICAgICAgIGEgPSBoaChhLCBiLCBjLCBkLCB0YWJCWzVdLCA0LCAtMzc4NTU4KTsNCiAgICAgICAgZCA9IGhoKGQsIGEsIGIsIGMsIHRhYkJbOF0sIDExLCAtMjAyMjU3NDQ2Myk7DQogICAgICAgIGMgPSBoaChjLCBkLCBhLCBiLCB0YWJCWzExXSwgMTYsIDE4MzkwMzA1NjIpOw0KICAgICAgICBiID0gaGgoYiwgYywgZCwgYSwgdGFiQlsxNF0sIDIzLCAtMzUzMDk1NTYpOw0KICAgICAgICBhID0gaGgoYSwgYiwgYywgZCwgdGFiQlsxXSwgNCwgLTE1MzA5OTIwNjApOw0KICAgICAgICBkID0gaGgoZCwgYSwgYiwgYywgdGFiQls0XSwgMTEsIDEyNzI4OTMzNTMpOw0KICAgICAgICBjID0gaGgoYywgZCwgYSwgYiwgdGFiQls3XSwgMTYsIC0xNTU0OTc2MzIpOw0KICAgICAgICBiID0gaGgoYiwgYywgZCwgYSwgdGFiQlsxMF0sIDIzLCAtMTA5NDczMDY0MCk7DQogICAgICAgIGEgPSBoaChhLCBiLCBjLCBkLCB0YWJCWzEzXSwgNCwgNjgxMjc5MTc0KTsNCiAgICAgICAgZCA9IGhoKGQsIGEsIGIsIGMsIHRhYkJbMF0sIDExLCAtMzU4NTM3MjIyKTsNCiAgICAgICAgYyA9IGhoKGMsIGQsIGEsIGIsIHRhYkJbM10sIDE2LCAtNzIyNTIxOTc5KTsNCiAgICAgICAgYiA9IGhoKGIsIGMsIGQsIGEsIHRhYkJbNl0sIDIzLCA3NjAyOTE4OSk7DQogICAgICAgIGEgPSBoaChhLCBiLCBjLCBkLCB0YWJCWzldLCA0LCAtNjQwMzY0NDg3KTsNCiAgICAgICAgZCA9IGhoKGQsIGEsIGIsIGMsIHRhYkJbMTJdLCAxMSwgLTQyMTgxNTgzNSk7DQogICAgICAgIGMgPSBoaChjLCBkLCBhLCBiLCB0YWJCWzE1XSwgMTYsIDUzMDc0MjUyMCk7DQogICAgICAgIGIgPSBoaChiLCBjLCBkLCBhLCB0YWJCWzJdLCAyMywgLTk5NTMzODY1MSk7DQogICAgICAgIGEgPSBpaShhLCBiLCBjLCBkLCB0YWJCWzBdLCA2LCAtMTk4NjMwODQ0KTsNCiAgICAgICAgZCA9IGlpKGQsIGEsIGIsIGMsIHRhYkJbN10sIDEwLCAxMTI2ODkxNDE1KTsNCiAgICAgICAgYyA9IGlpKGMsIGQsIGEsIGIsIHRhYkJbMTRdLCAxNSwgLTE0MTYzNTQ5MDUpOw0KICAgICAgICBiID0gaWkoYiwgYywgZCwgYSwgdGFiQls1XSwgMjEsIC01NzQzNDA1NSk7DQogICAgICAgIGEgPSBpaShhLCBiLCBjLCBkLCB0YWJCWzEyXSwgNiwgMTcwMDQ4NTU3MSk7DQogICAgICAgIGQgPSBpaShkLCBhLCBiLCBjLCB0YWJCWzNdLCAxMCwgLTE4OTQ5ODY2MDYpOw0KICAgICAgICBjID0gaWkoYywgZCwgYSwgYiwgdGFiQlsxMF0sIDE1LCAtMTA1MTUyMyk7DQogICAgICAgIGIgPSBpaShiLCBjLCBkLCBhLCB0YWJCWzFdLCAyMSwgLTIwNTQ5MjI3OTkpOw0KICAgICAgICBhID0gaWkoYSwgYiwgYywgZCwgdGFiQls4XSwgNiwgMTg3MzMxMzM1OSk7DQogICAgICAgIGQgPSBpaShkLCBhLCBiLCBjLCB0YWJCWzE1XSwgMTAsIC0zMDYxMTc0NCk7DQogICAgICAgIGMgPSBpaShjLCBkLCBhLCBiLCB0YWJCWzZdLCAxNSwgLTE1NjAxOTgzODApOw0KICAgICAgICBiID0gaWkoYiwgYywgZCwgYSwgdGFiQlsxM10sIDIxLCAxMzA5MTUxNjQ5KTsNCiAgICAgICAgYSA9IGlpKGEsIGIsIGMsIGQsIHRhYkJbNF0sIDYsIC0xNDU1MjMwNzApOw0KICAgICAgICBkID0gaWkoZCwgYSwgYiwgYywgdGFiQlsxMV0sIDEwLCAtMTEyMDIxMDM3OSk7DQogICAgICAgIGMgPSBpaShjLCBkLCBhLCBiLCB0YWJCWzJdLCAxNSwgNzE4Nzg3MjU5KTsNCiAgICAgICAgYiA9IGlpKGIsIGMsIGQsIGEsIHRhYkJbOV0sIDIxLCAtMzQzNDg1NTUxKTsNCiAgICAgICAgdGFiQVswXSA9IGFkZDMyKGEsIHRhYkFbMF0pOw0KICAgICAgICB0YWJBWzFdID0gYWRkMzIoYiwgdGFiQVsxXSk7DQogICAgICAgIHRhYkFbMl0gPSBhZGQzMihjLCB0YWJBWzJdKTsNCiAgICAgICAgdGFiQVszXSA9IGFkZDMyKGQsIHRhYkFbM10pDQogICAgZGVmIGNyeXB0YmxrKHRleHQpOg0KICAgICAgICByZXQgPSBbXQ0KICAgICAgICBmb3IgaSBpbiByYW5nZSgwLCA2NCwgNCk6DQogICAgICAgICAgICByZXQuYXBwZW5kKG9yZCh0ZXh0W2ldKSArIChvcmQodGV4dFtpKzFdKSA8PCA4KSArIChvcmQodGV4dFtpKzJdKSA8PCAxNikgKyAob3JkKHRleHRbaSszXSkgPDwgMjQpKQ0KICAgICAgICByZXR1cm4gcmV0DQogICAgZGVmIGpjc3lzKHRleHQpOg0KICAgICAgICB0eHQgPSAnJzsNCiAgICAgICAgdHh0TGVuID0gbGVuKHRleHQpDQogICAgICAgIHJldCA9IFsxNzMyNTg0MTkzLCAtMjcxNzMzODc5LCAtMTczMjU4NDE5NCwgMjcxNzMzODc4XQ0KICAgICAgICBpID0gNjQNCiAgICAgICAgd2hpbGUgaSA8PSBsZW4odGV4dCk6DQogICAgICAgICAgICBjcnlwdGN5Y2xlKHJldCwgY3J5cHRibGsodGV4dFsnc3Vic3RyaW5nJ10oaSAtIDY0LCBpKSkpDQogICAgICAgICAgICBpICs9IDY0DQogICAgICAgIHRleHQgPSB0ZXh0W2kgLSA2NDpdDQogICAgICAgIHRtcCA9IFswLCAwLCAwLCAwLCAwLCAwLCAwLCAwLCAwLCAwLCAwLCAwLCAwLCAwLCAwLCAwXQ0KICAgICAgICBpID0gMA0KICAgICAgICB3aGlsZSBpIDwgbGVuKHRleHQpOg0KICAgICAgICAgICAgdG1wW2kgPj4gMl0gfD0gb3JkKHRleHRbaV0pIDw8ICgoaSAlIDQpIDw8IDMpDQogICAgICAgICAgICBpICs9IDENCiAgICAgICAgdG1wW2kgPj4gMl0gfD0gMHg4MCA8PCAoKGkgJSA0KSA8PCAzKQ0KICAgICAgICBpZiBpID4gNTU6DQogICAgICAgICAgICBjcnlwdGN5Y2xlKHJldCwgdG1wKTsNCiAgICAgICAgICAgIGZvciBpIGluIHJhbmdlKDE2KToNCiAgICAgICAgICAgICAgICB0bXBbaV0gPSAwDQogICAgICAgIHRtcFsxNF0gPSB0eHRMZW4gKiA4Ow0KICAgICAgICBjcnlwdGN5Y2xlKHJldCwgdG1wKTsNCiAgICAgICAgcmV0dXJuIHJldA0KICAgIGRlZiByZXplZG93YSh0ZXh0KToNCiAgICAgICAgcmV0dXJuIGhleChqY3N5cyh0ZXh0KSkNCiAgICByZXR1cm4gcmV6ZWRvd2EoaW5fYWJjKQ0K'
tmp = base64.b64decode(tmp)
_myFun = compile(tmp, '', 'exec')
vGlobals = {"__builtins__": None, 'len': len, 'list': list, 'ord': ord, 'range': range}
vLocals = {'abc': ''}
exec _myFun in vGlobals, vLocals
myFun1 = vLocals['abc']
data = client.request(urlparse.urljoin(self.base_link, '/jsverify.php?op=tag'), cookie=mycookie)
data = byteify(json.loads(data))
d = {}
for i in range(len(data['key'])):
d[data['key'][i]] = data['hash'][i]
tmp = ''
for k in sorted(d.keys()):
tmp += d[k]
mycookie = 'tmvh=%s;%s' % (myFun1(tmp), mycookie)
link = client.request(myurl[-1].decode('base64') + '&width=673&height=471.09999999999997', cookie=mycookie)
match = re.search('<iframe src="(.+?)"', link)
if match:
linkVideo = match.group(1)
return linkVideo
return
except:
return
| 77.236559
| 7,544
| 0.782194
| 798
| 14,366
| 14.011278
| 0.33208
| 0.013773
| 0.00322
| 0.005098
| 0.038011
| 0.015741
| 0.007155
| 0
| 0
| 0
| 0
| 0.045293
| 0.159335
| 14,366
| 185
| 7,545
| 77.654054
| 0.880517
| 0.001462
| 0
| 0.15625
| 0
| 0
| 0.594398
| 0.560947
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0.007813
| 0.023438
| null | null | 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b8776b9b64125926c03f3623ac5a7c7cc61ed3f1
| 149
|
py
|
Python
|
src/sfilter/tools/radon.py
|
alex-d-bondarev/sfilter
|
900000d5cf8afbedc36b1e75ed8c4ea416540403
|
[
"MIT"
] | null | null | null |
src/sfilter/tools/radon.py
|
alex-d-bondarev/sfilter
|
900000d5cf8afbedc36b1e75ed8c4ea416540403
|
[
"MIT"
] | 39
|
2021-08-08T18:16:52.000Z
|
2021-12-26T15:16:04.000Z
|
src/sfilter/tools/radon.py
|
alex-d-bondarev/sfilter
|
900000d5cf8afbedc36b1e75ed8c4ea416540403
|
[
"MIT"
] | null | null | null |
"""
Call radon mi() command
"""
import radon.cli as cli
def run_radon(dir_path):
cli.mi(paths=[dir_path], json=True, output_file="radon.json")
| 16.555556
| 65
| 0.691275
| 25
| 149
| 3.96
| 0.64
| 0.141414
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.14094
| 149
| 8
| 66
| 18.625
| 0.773438
| 0.154362
| 0
| 0
| 0
| 0
| 0.084746
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b89fbc986a5779ff719772e36f2540ae4c71968d
| 3,120
|
py
|
Python
|
utils/sampler.py
|
BehroozRazeghi/Variational-Nested-Leakage
|
e92bf430c4d874edfe6ecf45c33363baccd0206e
|
[
"MIT"
] | 1
|
2021-06-11T16:12:58.000Z
|
2021-06-11T16:12:58.000Z
|
utils/sampler.py
|
BehroozRazeghi/Variational-Nested-Leakage
|
e92bf430c4d874edfe6ecf45c33363baccd0206e
|
[
"MIT"
] | null | null | null |
utils/sampler.py
|
BehroozRazeghi/Variational-Nested-Leakage
|
e92bf430c4d874edfe6ecf45c33363baccd0206e
|
[
"MIT"
] | 2
|
2021-12-08T21:49:21.000Z
|
2022-01-05T18:42:24.000Z
|
import numpy as np
import random
from math import *
from chainer import Variable
def onehot_categorical(batchsize, num_labels):
y = np.zeros((batchsize, num_labels), dtype=np.float32)
indices = np.random.randint(0, num_labels, batchsize)
for b in range(batchsize):
y[b, indices[b]] = 1
return y
def uniform(batchsize, ndim, minv=-1, maxv=1):
return np.random.uniform(minv, maxv, (batchsize, ndim)).astype(np.float32)
def gaussian(batchsize, ndim, mean=0, var=1):
return np.random.normal(mean, var, (batchsize, ndim)).astype(np.float32)
def gaussian_mixture(batchsize, ndim, num_labels):
if ndim % 2 != 0:
raise Exception("ndim must be a multiple of 2.")
def sample(x, y, label, num_labels):
shift = 1.4
r = 2.0 * np.pi / float(num_labels) * float(label)
new_x = x * cos(r) - y * sin(r)
new_y = x * sin(r) + y * cos(r)
new_x += shift * cos(r)
new_y += shift * sin(r)
return np.array([new_x, new_y]).reshape((2,))
x_var = 0.5
y_var = 0.05
x = np.random.normal(0, x_var, (batchsize, ndim // 2))
y = np.random.normal(0, y_var, (batchsize, ndim // 2))
z = np.empty((batchsize, ndim), dtype=np.float32)
for batch in range(batchsize):
for zi in range(ndim // 2):
z[batch, zi*2:zi*2+2] = sample(x[batch, zi], y[batch, zi], random.randint(0, num_labels - 1), num_labels)
return z
def supervised_gaussian_mixture(batchsize, ndim, label_indices, num_labels):
if ndim % 2 != 0:
raise Exception("ndim must be a multiple of 2.")
def sample(x, y, label, num_labels):
shift = 1.4
r = 2.0 * np.pi / float(num_labels) * float(label)
new_x = x * cos(r) - y * sin(r)
new_y = x * sin(r) + y * cos(r)
new_x += shift * cos(r)
new_y += shift * sin(r)
return np.array([new_x, new_y]).reshape((2,))
x_var = 0.5
y_var = 0.05
x = np.random.normal(0, x_var, (batchsize, ndim // 2))
y = np.random.normal(0, y_var, (batchsize, ndim // 2))
z = np.empty((batchsize, ndim), dtype=np.float32)
for batch in range(batchsize):
for zi in range(ndim // 2):
z[batch, zi*2:zi*2+2] = sample(x[batch, zi], y[batch, zi], label_indices[batch], num_labels)
return z
def swiss_roll(batchsize, ndim, num_labels):
def sample(label, num_labels):
uni = np.random.uniform(0.0, 1.0) / float(num_labels) + float(label) / float(num_labels)
r = sqrt(uni) * 3.0
rad = np.pi * 4.0 * sqrt(uni)
x = r * cos(rad)
y = r * sin(rad)
return np.array([x, y]).reshape((2,))
z = np.zeros((batchsize, ndim), dtype=np.float32)
for batch in range(batchsize):
for zi in range(ndim // 2):
z[batch, zi*2:zi*2+2] = sample(random.randint(0, num_labels - 1), num_labels)
return z
def supervised_swiss_roll(batchsize, ndim, label_indices, num_labels):
def sample(label, num_labels):
uni = np.random.uniform(0.0, 1.0) / float(num_labels) + float(label) / float(num_labels)
r = sqrt(uni) * 3.0
rad = np.pi * 4.0 * sqrt(uni)
x = r * cos(rad)
y = r * sin(rad)
return np.array([x, y]).reshape((2,))
z = np.zeros((batchsize, ndim), dtype=np.float32)
for batch in range(batchsize):
for zi in range(ndim // 2):
z[batch, zi*2:zi*2+2] = sample(label_indices[batch], num_labels)
return z
| 33.548387
| 108
| 0.654808
| 562
| 3,120
| 3.539146
| 0.129893
| 0.104072
| 0.042232
| 0.03821
| 0.810458
| 0.797386
| 0.772247
| 0.699849
| 0.699849
| 0.699849
| 0
| 0.0357
| 0.174038
| 3,120
| 93
| 109
| 33.548387
| 0.736127
| 0
| 0
| 0.725
| 0
| 0
| 0.018584
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1375
| false
| 0
| 0.05
| 0.025
| 0.325
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b8b8100e44468273836832ab816fb591585466c1
| 190
|
py
|
Python
|
core/apps/kubeops_api/cis_thread.py
|
r4b3rt/KubeOperator
|
1fef19816ada64d8b25f87a5e3356ea5f161d7e5
|
[
"Apache-2.0"
] | 1
|
2021-04-01T04:14:43.000Z
|
2021-04-01T04:14:43.000Z
|
core/apps/kubeops_api/cis_thread.py
|
r4b3rt/KubeOperator
|
1fef19816ada64d8b25f87a5e3356ea5f161d7e5
|
[
"Apache-2.0"
] | 1
|
2022-03-02T09:29:37.000Z
|
2022-03-02T09:29:37.000Z
|
core/apps/kubeops_api/cis_thread.py
|
r4b3rt/KubeOperator
|
1fef19816ada64d8b25f87a5e3356ea5f161d7e5
|
[
"Apache-2.0"
] | 1
|
2020-07-06T04:53:51.000Z
|
2020-07-06T04:53:51.000Z
|
import threading
class CisThread(threading.Thread):
def __init__(self, func):
threading.Thread.__init__(self)
self.func = func
def run(self):
self.func()
| 15.833333
| 39
| 0.631579
| 22
| 190
| 5.090909
| 0.454545
| 0.214286
| 0.214286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.263158
| 190
| 11
| 40
| 17.272727
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0.142857
| 0
| 0.571429
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
b214ca4ebe44850f2d43c5df125140290e2e0145
| 23
|
py
|
Python
|
python/ql/test/query-tests/Imports/PyCheckerTests/pkg_ok/foo3.py
|
vadi2/codeql
|
a806a4f08696d241ab295a286999251b56a6860c
|
[
"MIT"
] | 4,036
|
2020-04-29T00:09:57.000Z
|
2022-03-31T14:16:38.000Z
|
python/ql/test/query-tests/Imports/PyCheckerTests/pkg_ok/foo3.py
|
vadi2/codeql
|
a806a4f08696d241ab295a286999251b56a6860c
|
[
"MIT"
] | 2,970
|
2020-04-28T17:24:18.000Z
|
2022-03-31T22:40:46.000Z
|
python/ql/test/query-tests/Imports/PyCheckerTests/pkg_ok/foo3.py
|
ScriptBox99/github-codeql
|
2ecf0d3264db8fb4904b2056964da469372a235c
|
[
"MIT"
] | 794
|
2020-04-29T00:28:25.000Z
|
2022-03-30T08:21:46.000Z
|
class Foo3():
pass
| 7.666667
| 13
| 0.565217
| 3
| 23
| 4.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0625
| 0.304348
| 23
| 2
| 14
| 11.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
b223e0b54431e34f7a3dcf401a7a628a3bff1994
| 43
|
py
|
Python
|
all_repos_depends/errors.py
|
mxr/all-repos-depends
|
dcf715dbfb7182899e2412dbfaaf1ef4cc50865c
|
[
"MIT"
] | 11
|
2018-04-23T06:41:55.000Z
|
2022-01-27T13:37:59.000Z
|
all_repos_depends/errors.py
|
mxr/all-repos-depends
|
dcf715dbfb7182899e2412dbfaaf1ef4cc50865c
|
[
"MIT"
] | 2
|
2018-04-23T06:03:18.000Z
|
2018-04-23T06:03:51.000Z
|
all_repos_depends/errors.py
|
mxr/all-repos-depends
|
dcf715dbfb7182899e2412dbfaaf1ef4cc50865c
|
[
"MIT"
] | 2
|
2021-02-01T15:02:14.000Z
|
2021-09-25T15:49:44.000Z
|
class DependsError(RuntimeError):
pass
| 14.333333
| 33
| 0.767442
| 4
| 43
| 8.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.162791
| 43
| 2
| 34
| 21.5
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
b2426c678b15b95671b93ff5dfb2bc305b4a0bf4
| 130
|
py
|
Python
|
python/triton/ops/__init__.py
|
daadaada/triton
|
e5248b655b237f26b8134b9cad08de41fb885fb1
|
[
"MIT"
] | null | null | null |
python/triton/ops/__init__.py
|
daadaada/triton
|
e5248b655b237f26b8134b9cad08de41fb885fb1
|
[
"MIT"
] | null | null | null |
python/triton/ops/__init__.py
|
daadaada/triton
|
e5248b655b237f26b8134b9cad08de41fb885fb1
|
[
"MIT"
] | null | null | null |
from .conv import _conv, conv
from .matmul import _matmul, matmul
from .softmax import _softmax, softmax
from . import blocksparse
| 32.5
| 38
| 0.807692
| 18
| 130
| 5.666667
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.138462
| 130
| 4
| 39
| 32.5
| 0.910714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b275366f0995f4f8f3befbbcc9db785b6fe39257
| 72
|
py
|
Python
|
fe621/tree_pricing/trinomial/__init__.py
|
rukmal/FE-621-Homework
|
9c7cef7931b58aed54867acd8e8cf1928bc6d2dd
|
[
"MIT"
] | 4
|
2020-04-29T04:34:50.000Z
|
2021-11-11T07:49:08.000Z
|
fe621/tree_pricing/trinomial/__init__.py
|
rukmal/FE-621-Homework
|
9c7cef7931b58aed54867acd8e8cf1928bc6d2dd
|
[
"MIT"
] | null | null | null |
fe621/tree_pricing/trinomial/__init__.py
|
rukmal/FE-621-Homework
|
9c7cef7931b58aed54867acd8e8cf1928bc6d2dd
|
[
"MIT"
] | 1
|
2020-04-23T07:32:44.000Z
|
2020-04-23T07:32:44.000Z
|
from .trinomial_price import TrinomialAdditivePriceTree as AdditiveTree
| 36
| 71
| 0.902778
| 7
| 72
| 9.142857
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 72
| 1
| 72
| 72
| 0.969697
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
a24302426c238309c3ef21af9b143626bc87fe27
| 6,488
|
py
|
Python
|
tests/test_cli.py
|
PDOK/geopackage-validator
|
2bf4933f62376b552bc86dda9fb50c901b5294b3
|
[
"MIT"
] | null | null | null |
tests/test_cli.py
|
PDOK/geopackage-validator
|
2bf4933f62376b552bc86dda9fb50c901b5294b3
|
[
"MIT"
] | 33
|
2020-08-10T15:49:57.000Z
|
2022-03-29T13:05:47.000Z
|
tests/test_cli.py
|
PDOK/geopackage-validator
|
2bf4933f62376b552bc86dda9fb50c901b5294b3
|
[
"MIT"
] | null | null | null |
import json
from click.testing import CliRunner
from geopackage_validator.cli import cli
from geopackage_validator import __version__
def test_show_validations():
runner = CliRunner()
result = runner.invoke(cli, ["show-validations"])
assert result.exit_code == 0
assert (
'RQ1": "Layer names must start with a letter, and valid characters are lowercase a-z, numbers or underscores."'
in result.output
)
def test_generate_definitions_no_gpkg():
runner = CliRunner()
result = runner.invoke(cli, ["generate-definitions"])
assert result.exit_code == 1
assert "Give --gpkg-path or s3 location" in result.output
def test_generate_definitions_error_s3():
runner = CliRunner()
result = runner.invoke(
cli, ["generate-definitions", "--s3-endpoint-no-protocol", "s3host"]
)
assert result.exit_code == 1
assert "S3 access key has to be given" in result.output
def test_generate_definitions_with_gpkg():
runner = CliRunner()
result = runner.invoke(
cli, ["generate-definitions", "--gpkg-path", "tests/data/test_allcorrect.gpkg"]
)
expected = {
"geopackage_validator_version": __version__,
"projection": 28992,
"tables": [
{
"name": "test_allcorrect",
"geometry_column": "geom",
"columns": [
{"name": "fid", "type": "INTEGER"},
{"name": "geom", "type": "POLYGON"},
],
}
],
}
assert result.exit_code == 0
assert json.loads(result.output) == expected
def test_generate_definitions_with_ndimension_geometries():
runner = CliRunner()
result = runner.invoke(
cli, ["generate-definitions", "--gpkg-path", "tests/data/test_dimensions.gpkg"]
)
expected = {
"geopackage_validator_version": __version__,
"projection": 28992,
"tables": [
{
"name": "test_dimensions",
"geometry_column": "geom",
"columns": [
{"name": "fid", "type": "INTEGER"},
{"name": "geom", "type": "POLYGON"},
],
},
{
"name": "test_dimensions3",
"geometry_column": "geom",
"columns": [
{"name": "fid", "type": "INTEGER"},
{"name": "geom", "type": "POLYGON"},
],
},
{
"name": "test_dimensions4",
"geometry_column": "geom",
"columns": [
{"name": "fid", "type": "INTEGER"},
{"name": "geom", "type": "POLYGON"},
],
},
{
"name": "test_dimensions4_correct",
"geometry_column": "geom",
"columns": [
{"name": "fid", "type": "INTEGER"},
{"name": "geom", "type": "POLYGON"},
],
},
{
"name": "test_dimensions3_correct",
"geometry_column": "geom",
"columns": [
{"name": "fid", "type": "INTEGER"},
{"name": "geom", "type": "POLYGON"},
],
},
],
}
assert result.exit_code == 0
assert json.loads(result.output) == expected
EXPECTED_VALIDATION_YAML = """geopackage_validator_version: {version}
projection: 28992
tables:
- name: test_allcorrect
geometry_column: geom
columns:
- name: fid
type: INTEGER
- name: geom
type: POLYGON"""
def test_generate_definitions_with_gpkg_yaml_output():
runner = CliRunner()
result = runner.invoke(
cli,
[
"generate-definitions",
"--gpkg-path",
"tests/data/test_allcorrect.gpkg",
"--yaml",
],
)
assert result.exit_code == 0
assert result.output.strip("\n") == EXPECTED_VALIDATION_YAML.format(
version=__version__
)
def test_validate_no_gpkg():
runner = CliRunner()
result = runner.invoke(cli, ["validate"])
assert result.exit_code == 1
assert "Give --gpkg-path or s3 location" in result.output
def test_validate_error_s3():
runner = CliRunner()
result = runner.invoke(cli, ["validate", "--s3-endpoint-no-protocol", "s3host"])
assert result.exit_code == 1
assert "S3 access key has to be given" in result.output
def test_validate_with_gpkg():
runner = CliRunner()
result = runner.invoke(
cli, ["validate", "--gpkg-path", "tests/data/test_allcorrect.gpkg"]
)
assert result.exit_code == 0
assert '"geopackage_validator_version": ' in result.output
assert '"success": true' in result.output
def test_validate_with_rq8_missing_definitions_path():
runner = CliRunner()
result = runner.invoke(
cli,
[
"validate",
"--gpkg-path",
"tests/data/test_allcorrect.gpkg",
"--validations",
"RQ8",
],
)
assert result.exit_code == 0
assert "Missing '--table-definitions-path' input" in result.output
def test_validate_with_rq8_with_yaml_definitions_path():
runner = CliRunner()
result = runner.invoke(
cli,
[
"validate",
"--gpkg-path",
"tests/data/test_allcorrect.gpkg",
"--table-definitions-path",
"tests/data/test_allcorrect_definition.yml",
],
)
assert result.exit_code == 0
assert "RQ8" in result.output
def test_validate_with_rq8_with_json_definitions_path():
runner = CliRunner()
result = runner.invoke(
cli,
[
"validate",
"--gpkg-path",
"tests/data/test_allcorrect.gpkg",
"--table-definitions-path",
"tests/data/test_allcorrect_definition.json",
],
)
assert result.exit_code == 0
assert "RQ8" in result.output
def test_validate_with_rq8_with_old_definitions_path():
runner = CliRunner()
result = runner.invoke(
cli,
[
"validate",
"--gpkg-path",
"tests/data/test_allcorrect.gpkg",
"--table-definitions-path",
"tests/data/test_allcorrect_old_definition.json",
],
)
assert result.exit_code == 0
assert "RQ8" in result.output
| 28.08658
| 119
| 0.543619
| 621
| 6,488
| 5.462158
| 0.1562
| 0.049528
| 0.080483
| 0.103479
| 0.849057
| 0.840212
| 0.785377
| 0.757665
| 0.697229
| 0.666863
| 0
| 0.011633
| 0.324291
| 6,488
| 230
| 120
| 28.208696
| 0.762089
| 0
| 0
| 0.563452
| 0
| 0.005076
| 0.294236
| 0.106196
| 0
| 0
| 0
| 0
| 0.137056
| 1
| 0.06599
| false
| 0
| 0.020305
| 0
| 0.086294
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
a250d594c98a1cc01ee1c05cb11b2e55f4c316b1
| 622
|
py
|
Python
|
stomp_message_broker.py
|
MarkAufdencamp/stomp-client-daemon
|
d40c4a953abfb78dc02fad94593a6e08debbcd37
|
[
"Apache-2.0"
] | 3
|
2015-08-03T00:58:32.000Z
|
2018-10-31T06:33:15.000Z
|
stomp_message_broker.py
|
MarkAufdencamp/stomp-client-daemon
|
d40c4a953abfb78dc02fad94593a6e08debbcd37
|
[
"Apache-2.0"
] | null | null | null |
stomp_message_broker.py
|
MarkAufdencamp/stomp-client-daemon
|
d40c4a953abfb78dc02fad94593a6e08debbcd37
|
[
"Apache-2.0"
] | null | null | null |
# StompMessageBroker is a proxy class of StompDaemonConnection with only a sendMessage method
# This exposes a simple interface for a StompMessageController method to communicate via the broker
class StompMessageBroker():
def __init__(self, stomp_daemon_connection):
self.stomp_daemon_connection = stomp_daemon_connection
def sendMessage(self, message, queue):
print("stomp_message_broker.sendMessage() - {0} - {1}".format(queue, message))
#print(self.stomp_daemon_connection)
self.stomp_daemon_connection.stompConn.send(queue, message)
def brokerId():
return self.stomp_daemon_connection.msgSrvrClientId
| 47.846154
| 99
| 0.810289
| 77
| 622
| 6.311688
| 0.506494
| 0.135802
| 0.259259
| 0.257202
| 0.205761
| 0.205761
| 0.205761
| 0.205761
| 0
| 0
| 0
| 0.003623
| 0.11254
| 622
| 13
| 100
| 47.846154
| 0.876812
| 0.360129
| 0
| 0
| 0
| 0
| 0.116456
| 0.086076
| 0
| 0
| 0
| 0
| 0
| 1
| 0.375
| false
| 0
| 0
| 0.125
| 0.625
| 0.125
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
a27bc5278cabe6198d868c19ef4a2328bcbe7fdb
| 69
|
py
|
Python
|
packages/ipylintotype/src/ipylintotype/widgets/__init__.py
|
deathbeds/lintotype
|
4b83f784b56ef12a245c0ca92d48eb95a9b0f7da
|
[
"BSD-3-Clause"
] | 18
|
2019-03-03T21:38:51.000Z
|
2020-06-12T14:24:37.000Z
|
packages/ipylintotype/src/ipylintotype/widgets/__init__.py
|
deathbeds/lintotype
|
4b83f784b56ef12a245c0ca92d48eb95a9b0f7da
|
[
"BSD-3-Clause"
] | 7
|
2019-03-03T18:55:59.000Z
|
2019-03-13T03:34:17.000Z
|
packages/ipylintotype/src/ipylintotype/widgets/__init__.py
|
deathbeds/lintotype
|
4b83f784b56ef12a245c0ca92d48eb95a9b0f7da
|
[
"BSD-3-Clause"
] | 2
|
2019-04-24T16:05:02.000Z
|
2020-03-25T17:47:35.000Z
|
from .diagnoser_widget import show_diagnoser, show_formatter # noqa
| 34.5
| 68
| 0.84058
| 9
| 69
| 6.111111
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115942
| 69
| 1
| 69
| 69
| 0.901639
| 0.057971
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
a2822cf81363e568637c38032d449f53a604c10f
| 19
|
py
|
Python
|
Lib/site-packages/stripe/version.py
|
2anirban/LSTM-Stock-Predictor
|
bcd3709ff88c8d1286df93163b30164c1d225652
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
Lib/site-packages/stripe/version.py
|
2anirban/LSTM-Stock-Predictor
|
bcd3709ff88c8d1286df93163b30164c1d225652
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
Lib/site-packages/stripe/version.py
|
2anirban/LSTM-Stock-Predictor
|
bcd3709ff88c8d1286df93163b30164c1d225652
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
VERSION = "2.21.0"
| 9.5
| 18
| 0.578947
| 4
| 19
| 2.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 0.157895
| 19
| 1
| 19
| 19
| 0.4375
| 0
| 0
| 0
| 0
| 0
| 0.315789
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
a28f62e5f9185d3ed102939d41b540145dd0bc87
| 17
|
py
|
Python
|
Chapter 03/Chap03_Example3.21.py
|
Anancha/Programming-Techniques-using-Python
|
e80c329d2a27383909d358741a5cab03cb22fd8b
|
[
"MIT"
] | null | null | null |
Chapter 03/Chap03_Example3.21.py
|
Anancha/Programming-Techniques-using-Python
|
e80c329d2a27383909d358741a5cab03cb22fd8b
|
[
"MIT"
] | null | null | null |
Chapter 03/Chap03_Example3.21.py
|
Anancha/Programming-Techniques-using-Python
|
e80c329d2a27383909d358741a5cab03cb22fd8b
|
[
"MIT"
] | null | null | null |
print "I am done"
| 17
| 17
| 0.705882
| 4
| 17
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.176471
| 17
| 1
| 17
| 17
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
a29539de8a40fc30546682170894d22d729366a5
| 77
|
py
|
Python
|
libconfpacker/packagers/deb/__init__.py
|
confpack/confpacker
|
5e430922a735e4d625c59656e6ca06bdc5e91df8
|
[
"Apache-2.0"
] | null | null | null |
libconfpacker/packagers/deb/__init__.py
|
confpack/confpacker
|
5e430922a735e4d625c59656e6ca06bdc5e91df8
|
[
"Apache-2.0"
] | null | null | null |
libconfpacker/packagers/deb/__init__.py
|
confpack/confpacker
|
5e430922a735e4d625c59656e6ca06bdc5e91df8
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
from .packager import DebianPackager
| 19.25
| 38
| 0.87013
| 9
| 77
| 6.888889
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116883
| 77
| 3
| 39
| 25.666667
| 0.911765
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
a29bc55aa57b02a4e0c51038323cc9238f0e2045
| 64
|
py
|
Python
|
launchSrv.py
|
moonmagian/teeworlds_srv_smod
|
9271d367bbc58befe40306d44e1cd5c7c87644fa
|
[
"Zlib"
] | 4
|
2016-06-18T05:27:56.000Z
|
2017-05-05T05:30:51.000Z
|
launchSrv.py
|
moonmagian/teeworlds_srv_smod
|
9271d367bbc58befe40306d44e1cd5c7c87644fa
|
[
"Zlib"
] | 1
|
2016-06-18T05:28:16.000Z
|
2016-06-18T11:28:19.000Z
|
launchSrv.py
|
moonmagian/teeworlds_srv_smod
|
9271d367bbc58befe40306d44e1cd5c7c87644fa
|
[
"Zlib"
] | null | null | null |
import os
os.system('./bam/bam')
os.system('./teeworlds_srv_d')
| 16
| 30
| 0.703125
| 11
| 64
| 3.909091
| 0.636364
| 0.372093
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0625
| 64
| 3
| 31
| 21.333333
| 0.716667
| 0
| 0
| 0
| 0
| 0
| 0.40625
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
a2dc7e37421c45d4dfc9cb703ef4611decdccaee
| 46
|
py
|
Python
|
src/cloudlight/utils/__init__.py
|
joigno/cloudlight
|
8a6510047abd97e0bf3a568322205beb56fa5260
|
[
"BSD-3-Clause"
] | 3
|
2020-08-21T00:18:50.000Z
|
2020-10-21T17:40:47.000Z
|
src/cloudlight/utils/__init__.py
|
joigno/cloudlight
|
8a6510047abd97e0bf3a568322205beb56fa5260
|
[
"BSD-3-Clause"
] | null | null | null |
src/cloudlight/utils/__init__.py
|
joigno/cloudlight
|
8a6510047abd97e0bf3a568322205beb56fa5260
|
[
"BSD-3-Clause"
] | null | null | null |
'''
Created on Apr 8, 2010
@author: jose
'''
| 7.666667
| 22
| 0.586957
| 7
| 46
| 3.857143
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.138889
| 0.217391
| 46
| 5
| 23
| 9.2
| 0.611111
| 0.804348
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
a7b84440fcb5b9f5108150bd67080cacc4e281e9
| 2,571
|
py
|
Python
|
database/db_example.py
|
alenasf/advanced_topics_in_python
|
62840f386735daf7c53a96560f5567785299a770
|
[
"Apache-2.0"
] | null | null | null |
database/db_example.py
|
alenasf/advanced_topics_in_python
|
62840f386735daf7c53a96560f5567785299a770
|
[
"Apache-2.0"
] | null | null | null |
database/db_example.py
|
alenasf/advanced_topics_in_python
|
62840f386735daf7c53a96560f5567785299a770
|
[
"Apache-2.0"
] | null | null | null |
import sqlite3 as db
#
# """Example_1: CREATE TABLE"""
#
#
# class Post:
# def __init__(self,title, body, author):
# self.title = title
# self.body = body
# self.author = author
#
#
#
# # create file and write data result in it
# connection = db.connect("my_database.db")
# cursor = connection.cursor()
#
# create_posts_table_string = '''
# CREATE TABLE Posts(
# id INTEGER PRIMARY KEY AUTOINCREMENT,
# title text,
# body text,
# author text
# );
# '''
# title = input("enter post title: ")
# body = input("enter post body: ")
# author = input("enter post author: ")
# post = Post(title, body, author)
#
#
# cursor.execute(create_posts_table_string)
# insert_post_string = " insert into Posts(title, body, author) values(:title, :body, :author)";
# cursor.execute(insert_post_string,{'title':post.title, 'body':post.body, 'author':post.author})
#
#
# connection.commit()
# cursor.close()
# connection.close()
#
#
# """Example_2: INSERT"""
#
#
# def insert_posts(post,cursor):
# insert_post_string = " insert into Posts(title, body, author) values(:title, :body, :author)";
# cursor.execute(insert_post_string, {'title': post.title, 'body': post.body, 'author': post.author})
#
# connection = db.connect("my_database.db")
# cursor = connection.cursor()
#
# create_posts_table_string = '''
# CREATE TABLE Posts(
# id INTEGER PRIMARY KEY AUTOINCREMENT,
# title text,
# body text,
# author text
# );
# '''
# title = input("enter post title: ")
# body = input("enter post body: ")
# author = input("enter post author: ")
# post = Post(title, body, author)
#
# cursor.execute(create_posts_table_string)
# insert_posts(post,cursor)
#
# connection.commit()
# cursor.close()
# connection.close()
"""Example_3: SELECT"""
def get_posts_by_author(author,cursor):
posts = cursor.execute("select * from Posts where author=:author", {'author':author})
return posts
connection = db.connect("my_database.db")
cursor = connection.cursor()
create_posts_table_string = '''
CREATE TABLE Posts(
id INTEGER PRIMARY KEY AUTOINCREMENT,
title text,
body text,
author text
);
'''
title = input("enter post title: ")
body = input("enter post body: ")
author = input("enter post author: ")
post = Post(title, body, author)
cursor.execute(create_posts_table_string)
insert_posts(post,cursor)
connection.commit()
posts = get_posts_by_author("me", cursor)
print(post.fetchall())
cursor.close()
connection.close()
| 24.961165
| 105
| 0.651886
| 313
| 2,571
| 5.210863
| 0.175719
| 0.071735
| 0.077253
| 0.080932
| 0.77989
| 0.77989
| 0.77989
| 0.735745
| 0.735745
| 0.735745
| 0
| 0.001935
| 0.196033
| 2,571
| 103
| 106
| 24.961165
| 0.787131
| 0.642552
| 0
| 0
| 0
| 0
| 0.317191
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0
| 0.04
| 0
| 0.12
| 0.04
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
a7c617e8a88a6b120100da4efe5387bb40f8a051
| 76
|
py
|
Python
|
module2.py
|
JaeGyu/PythonEx_1
|
e67053db6ca7431c3dd66351c190c53229e3f141
|
[
"MIT"
] | null | null | null |
module2.py
|
JaeGyu/PythonEx_1
|
e67053db6ca7431c3dd66351c190c53229e3f141
|
[
"MIT"
] | null | null | null |
module2.py
|
JaeGyu/PythonEx_1
|
e67053db6ca7431c3dd66351c190c53229e3f141
|
[
"MIT"
] | null | null | null |
import singletone
print("아래는 모듈2에서 출력 합니다.")
print(singletone.only_one_var)
| 19
| 30
| 0.802632
| 12
| 76
| 4.916667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014493
| 0.092105
| 76
| 3
| 31
| 25.333333
| 0.84058
| 0
| 0
| 0
| 0
| 0
| 0.223684
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0.666667
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
a7d389b95bdfde022f88cfa0f4d0c595b40e377c
| 206
|
py
|
Python
|
vedacls/utils/__init__.py
|
ChaseMonsterAway/vedacls
|
91657f688dcaf3f9f4c58eb40a8f5c8f34a4bd73
|
[
"Apache-2.0"
] | 26
|
2020-05-25T02:23:25.000Z
|
2021-09-24T01:50:26.000Z
|
vedacls/utils/__init__.py
|
ChaseMonsterAway/vedacls
|
91657f688dcaf3f9f4c58eb40a8f5c8f34a4bd73
|
[
"Apache-2.0"
] | null | null | null |
vedacls/utils/__init__.py
|
ChaseMonsterAway/vedacls
|
91657f688dcaf3f9f4c58eb40a8f5c8f34a4bd73
|
[
"Apache-2.0"
] | 11
|
2020-06-18T08:22:42.000Z
|
2021-09-23T01:47:58.000Z
|
from .checkpoint import load_checkpoint, save_checkpoint, weights_to_cpu
from .metrics import AverageMeter, ProgressMeter, accuracy
from .registry import Registry, build_from_cfg
from .config import Config
| 41.2
| 72
| 0.854369
| 27
| 206
| 6.296296
| 0.592593
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.101942
| 206
| 4
| 73
| 51.5
| 0.918919
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ac1a183d96229caf7c0e1207799299cb9606de73
| 240
|
py
|
Python
|
lib/models/cell_infers/__init__.py
|
rainwangphy/AutoDL-Projects
|
1a40948255ac3c16ee529d94144a39bf26e89bfa
|
[
"MIT"
] | 817
|
2020-01-15T00:23:41.000Z
|
2022-03-31T14:52:03.000Z
|
lib/models/cell_infers/__init__.py
|
rainwangphy/AutoDL-Projects
|
1a40948255ac3c16ee529d94144a39bf26e89bfa
|
[
"MIT"
] | 77
|
2020-01-14T14:02:45.000Z
|
2022-03-25T07:06:02.000Z
|
lib/models/cell_infers/__init__.py
|
rainwangphy/AutoDL-Projects
|
1a40948255ac3c16ee529d94144a39bf26e89bfa
|
[
"MIT"
] | 176
|
2020-01-15T10:39:41.000Z
|
2022-03-31T04:24:53.000Z
|
#####################################################
# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019.01 #
#####################################################
from .tiny_network import TinyNetwork
from .nasnet_cifar import NASNetonCIFAR
| 40
| 53
| 0.429167
| 20
| 240
| 5.05
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027149
| 0.079167
| 240
| 5
| 54
| 48
| 0.429864
| 0.204167
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
ac1bee32808a2b00e86f62ca67e2d2287d163606
| 161
|
py
|
Python
|
epistasis/models/linear/__init__.py
|
lperezmo/epistasis
|
4f751d9e2d9ca632a7b688cf32bd950ad7c2a754
|
[
"Unlicense"
] | 21
|
2016-08-31T15:14:55.000Z
|
2021-11-27T14:42:35.000Z
|
epistasis/models/linear/__init__.py
|
lperezmo/epistasis
|
4f751d9e2d9ca632a7b688cf32bd950ad7c2a754
|
[
"Unlicense"
] | 14
|
2016-11-30T18:39:00.000Z
|
2020-04-07T23:48:49.000Z
|
epistasis/models/linear/__init__.py
|
lperezmo/epistasis
|
4f751d9e2d9ca632a7b688cf32bd950ad7c2a754
|
[
"Unlicense"
] | 8
|
2016-08-30T00:30:14.000Z
|
2020-04-02T01:03:19.000Z
|
from .ordinary import EpistasisLinearRegression
from .lasso import EpistasisLasso
from .ridge import EpistasisRidge
from .elastic_net import EpistasisElasticNet
| 32.2
| 47
| 0.875776
| 17
| 161
| 8.235294
| 0.647059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.099379
| 161
| 4
| 48
| 40.25
| 0.965517
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ac3095eec012a04dc42a006edefc93127bc945af
| 10,756
|
py
|
Python
|
chapter_2_collection/pyAudioAnalysis3/nlx-multi_train.py
|
fancyerii/voicebook
|
def82da8577086d0361643a05fec2463006533a9
|
[
"Apache-2.0"
] | 1
|
2020-03-05T01:19:17.000Z
|
2020-03-05T01:19:17.000Z
|
chapter_2_collection/pyAudioAnalysis3/nlx-multi_train.py
|
fancyerii/voicebook
|
def82da8577086d0361643a05fec2463006533a9
|
[
"Apache-2.0"
] | null | null | null |
chapter_2_collection/pyAudioAnalysis3/nlx-multi_train.py
|
fancyerii/voicebook
|
def82da8577086d0361643a05fec2463006533a9
|
[
"Apache-2.0"
] | null | null | null |
import os, getpass, time, sys
import pyautogui
#check version of python to train model in models folder
g=sys.version
g=g[0]
if g=='2':
library='pyaudioanalysis'
os.chdir('/Users/'+getpass.getuser()+'/'+library)
import audioTrainTest as aT
#now get the folder names that you want to classify
modelname=raw_input('what is the name of your model?')
classnum=raw_input('how many classes are you training? (note only supports N=2 and N=5 classes)')
a=0
folderlist=list()
while a != int(classnum):
folderlist.append(raw_input('what is the folder name for class %s?'%(str(a+1))))
a=a+1
elif g=='3':
library='pyaudioanalysis3'
os.chdir('/Users/'+getpass.getuser()+'/'+library)
import audioTrainTest as aT
#now get the folder names that you want to classify
modelname=input('what is the name of your model?')
classnum=input('how many classes are you training? (note only supports N=2 and N=5 classes)')
a=0
folderlist=list()
while a != int(classnum):
folderlist.append(input('what is the folder name for class %s?'%(str(a+1))))
a=a+1
#change directory so images get saved there
try:
os.chdir('/Users/'+getpass.getuser()+'/'+library+'/models/')
except:
os.mkdir('/Users/'+getpass.getuser()+'/'+library+'/models/')
os.chdir('/Users/'+getpass.getuser()+'/'+library+'/models/')
#now make the models around the length of the directory
try:
if len(folderlist)==2:
#make folders
folder1='/Users/'+getpass.getuser()+'/'+library+'/models/'+folderlist[0]
folder2='/Users/'+getpass.getuser()+'/'+library+'/models/'+folderlist[1]
print('training SVM')
aT.featureAndTrain([folder1,folder2], 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "svm", os.getcwd()+'/'+modelname+"_svm2Classes", True)
time.sleep(3)
im = pyautogui.screenshot(modelname+'_svm2Classes.png')
print('training knn')
aT.featureAndTrain([folder1,folder2], 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "knn", os.getcwd()+'/'+modelname+"_knn2Classes", True)
time.sleep(3)
im = pyautogui.screenshot(modelname+'_knn2Classes.png')
print('training extratrees')
aT.featureAndTrain([folder1,folder2], 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "extratrees", os.getcwd()+'/'+modelname+"_et2Classes", True)
time.sleep(3)
im = pyautogui.screenshot(modelname+'_et2Classes.png')
print('training gradientbost')
aT.featureAndTrain([folder1,folder2], 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "gradientboosting", os.getcwd()+'/'+modelname+"_gb2Classes", True)
time.sleep(3)
im = pyautogui.screenshot(modelname+'_gb2Classes.png')
print('training random forest')
aT.featureAndTrain([folder1,folder2], 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "randomforest", os.getcwd()+'/'+modelname+"_rf2Classes", True)
time.sleep(3)
im = pyautogui.screenshot(modelname+'_rf2Classes.png')
#now manually select the most accurate model from screenshots (can automate this with tesseler)
elif len(folderlist)==3:
folder1='/Users/'+getpass.getuser()+'/'+library+'/models/'+folderlist[0]
folder2='/Users/'+getpass.getuser()+'/'+library+'/models/'+folderlist[1]
folder3='/Users/'+getpass.getuser()+'/'+library+'/models/'+folderlist[2]
print('training SVM')
aT.featureAndTrain([folder1,folder2,folder3], 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "svm", modelname+"_svm3Classes")
time.sleep(3)
im = pyautogui.screenshot(modelname+"_svm3Classes.png")
print('training KNN')
aT.featureAndTrain([folder1,folder2,folder3], 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "knn", modelname+"_knn3Classes")
time.sleep(3)
im = pyautogui.screenshot(modelname+"_knn3Classes.png")
print('training extratrees')
aT.featureAndTrain([folder1,folder2,folder3], 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "extratrees", modelname+"_et3Classes")
time.sleep(3)
im = pyautogui.screenshot(modelname+"_et3Classes.png")
print('training gradientboost')
aT.featureAndTrain([folder1,folder2,folder3], 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "gradientboosting", modelname+"_gb3Classes")
time.sleep(3)
im = pyautogui.screenshot(modelname+"_gb3Classes.png")
print('training random forest')
aT.featureAndTrain([folder1,folder2,folder3], 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "randomforest", modelname+"_rf3Classes")
time.sleep(3)
im = pyautogui.screenshot(modelname+"_rf3Classes.png")
elif len(folderlist)==4:
folder1='/Users/'+getpass.getuser()+'/'+library+'/models/'+folderlist[0]
folder2='/Users/'+getpass.getuser()+'/'+library+'/models/'+folderlist[1]
folder3='/Users/'+getpass.getuser()+'/'+library+'/models/'+folderlist[2]
folder4='/Users/'+getpass.getuser()+'/'+library+'/models/'+folderlist[3]
print('training SVM')
aT.featureAndTrain([folder1,folder2,folder3,folder4], 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "svm", modelname+"_svm4Classes")
time.sleep(3)
im = pyautogui.screenshot(modelname+"_svm4Classes.png")
print('training KNN')
aT.featureAndTrain([folder1,folder2,folder3,folder4], 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "knn", modelname+"_knn4Classes")
time.sleep(3)
im = pyautogui.screenshot(modelname+"_knn4Classes.png")
print('training extratrees')
aT.featureAndTrain([folder1,folder2,folder3,folder4], 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "extratrees", modelname+"_et4Classes")
time.sleep(3)
im = pyautogui.screenshot(modelname+"_et4Classes.png")
print('training gradientboost')
aT.featureAndTrain([folder1,folder2,folder3,folder4], 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "gradientboosting", modelname+"_gb4Classes")
time.sleep(3)
im = pyautogui.screenshot(modelname+"_gb4Classes.png")
print('training random forest')
aT.featureAndTrain([folder1,folder2,folder3,folder4], 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "randomforest", modelname+"_rf4Classes")
time.sleep(3)
im = pyautogui.screenshot(modelname+"_rf4Classes.png")
elif len(folderlist)==5:
folder1='/Users/'+getpass.getuser()+'/'+library+'/models/'+folderlist[0]
folder2='/Users/'+getpass.getuser()+'/'+library+'/models/'+folderlist[1]
folder3='/Users/'+getpass.getuser()+'/'+library+'/models/'+folderlist[2]
folder4='/Users/'+getpass.getuser()+'/'+library+'/models/'+folderlist[3]
folder5='/Users/'+getpass.getuser()+'/'+library+'/models/'+folderlist[4]
print('training SVM')
aT.featureAndTrain([folder1,folder2,folder3,folder4,folder5], 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "svm", modelname+"_svm5Classes")
time.sleep(3)
im = pyautogui.screenshot(modelname+"_svm5Classes.png")
print('training KNN')
aT.featureAndTrain([folder1,folder2,folder3,folder4,folder5], 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "knn", modelname+"_knn5Classes")
time.sleep(3)
im = pyautogui.screenshot(modelname+"_knn5Classes.png")
print('training extratrees')
aT.featureAndTrain([folder1,folder2,folder3,folder4,folder5], 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "extratrees", modelname+"_et5Classes")
time.sleep(3)
im = pyautogui.screenshot(modelname+"_et5Classes.png")
print('training gradientboost')
aT.featureAndTrain([folder1,folder2,folder3,folder4,folder5], 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "gradientboosting", modelname+"_gb5Classes")
time.sleep(3)
im = pyautogui.screenshot(modelname+"_gb5Classes.png")
print('training random forest')
aT.featureAndTrain([folder1,folder2,folder3,folder4,folder5], 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "randomforest", modelname+"_rf5Classes")
time.sleep(3)
im = pyautogui.screenshot(modelname+"_rf5Classes.png")
elif len(folderlist)==6:
folder1='/Users/'+getpass.getuser()+'/'+library+'/models/'+folderlist[0]
folder2='/Users/'+getpass.getuser()+'/'+library+'/models/'+folderlist[1]
folder3='/Users/'+getpass.getuser()+'/'+library+'/models/'+folderlist[2]
folder4='/Users/'+getpass.getuser()+'/'+library+'/models/'+folderlist[3]
folder5='/Users/'+getpass.getuser()+'/'+library+'/models/'+folderlist[4]
folder6='/Users/'+getpass.getuser()+'/'+library+'/models/'+folderlist[5]
print('training SVM')
aT.featureAndTrain([folder1,folder2,folder3,folder4,folder5,folder6], 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "svm", modelname+"_svm6Classes")
time.sleep(3)
im = pyautogui.screenshot(modelname+"_svm6Classes.png")
print('training KNN')
aT.featureAndTrain([folder1,folder2,folder3,folder4,folder5,folder6], 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "knn", modelname+"_knn6Classes")
time.sleep(3)
im = pyautogui.screenshot(modelname+"_knn6Classes.png")
print('training extratrees')
aT.featureAndTrain([folder1,folder2,folder3,folder4,folder5,folder6], 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "extratrees", modelname+"_et6Classes")
time.sleep(3)
im = pyautogui.screenshot(modelname+"_et6Classes.png")
print('training gradientboost')
aT.featureAndTrain([folder1,folder2,folder3,folder4,folder5,folder6], 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "gradientboosting", modelname+"_gb6Classes")
time.sleep(3)
im = pyautogui.screenshot(modelname+"_gb6Classes.png")
print('training random forest')
aT.featureAndTrain([folder1,folder2,folder3,folder4,folder5,folder6], 1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, "randomforest", modelname+"_rf6Classes")
time.sleep(3)
im = pyautogui.screenshot(modelname+"_rf6Classes.png")
else:
print('Sorry, cannot train 7 or more classes. Please try again with fewer classes')
except:
print('error, folders do not exist or files or improperly formatted')
| 44.630705
| 170
| 0.64894
| 1,187
| 10,756
| 5.83572
| 0.136479
| 0.014436
| 0.068572
| 0.093836
| 0.824022
| 0.818536
| 0.812473
| 0.683413
| 0.642847
| 0.608777
| 0
| 0.038648
| 0.194124
| 10,756
| 240
| 171
| 44.816667
| 0.760498
| 0.033563
| 0
| 0.52795
| 0
| 0
| 0.20851
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.161491
| 0.024845
| 0
| 0.024845
| 0.167702
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
ac31f37979fe54ac2b0cc9a01fcee29422718eb4
| 89
|
py
|
Python
|
murmeltier/utils/trimmed_dict.py
|
malyvsen/evo-ai
|
3f9c9bd01b5c212b26a13ca0c230ee3df42a9612
|
[
"MIT"
] | 3
|
2018-06-23T09:45:49.000Z
|
2018-11-27T23:39:46.000Z
|
murmeltier/utils/trimmed_dict.py
|
malyvsen/evo-ai
|
3f9c9bd01b5c212b26a13ca0c230ee3df42a9612
|
[
"MIT"
] | null | null | null |
murmeltier/utils/trimmed_dict.py
|
malyvsen/evo-ai
|
3f9c9bd01b5c212b26a13ca0c230ee3df42a9612
|
[
"MIT"
] | null | null | null |
def trimmed_dict(dict, keys):
return {key: dict[key] for key in dict if key in keys}
| 29.666667
| 58
| 0.696629
| 17
| 89
| 3.588235
| 0.529412
| 0.163934
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.202247
| 89
| 2
| 59
| 44.5
| 0.859155
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
ac73fbfb909031e8f59586d3ce6410da6fa41fba
| 270
|
py
|
Python
|
vision_stuff/__init__.py
|
streanger/vision_stuff
|
431bcee18477237143b2dc2bc1da0c9ed7debe10
|
[
"MIT"
] | null | null | null |
vision_stuff/__init__.py
|
streanger/vision_stuff
|
431bcee18477237143b2dc2bc1da0c9ed7debe10
|
[
"MIT"
] | null | null | null |
vision_stuff/__init__.py
|
streanger/vision_stuff
|
431bcee18477237143b2dc2bc1da0c9ed7debe10
|
[
"MIT"
] | null | null | null |
from .vision_stuff import script_path, show_image, blank_image, save_img, shrink_img, shrink_img_dir, shrink_img_cli, shrink_dir_cli, shrink_example, roll_image, convert_rotation, roll_layers, roll_layers_example, gradient_image, gradient_example, margin, margin_example
| 270
| 270
| 0.866667
| 41
| 270
| 5.195122
| 0.512195
| 0.126761
| 0.112676
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.07037
| 270
| 1
| 270
| 270
| 0.848606
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ac7db0f4a87fe828b4bf12377b7af297dd8f4dce
| 1,957
|
py
|
Python
|
web/transiq/restapi/migrations/0005_auto_20180802_1513.py
|
manibhushan05/transiq
|
763fafb271ce07d13ac8ce575f2fee653cf39343
|
[
"Apache-2.0"
] | null | null | null |
web/transiq/restapi/migrations/0005_auto_20180802_1513.py
|
manibhushan05/transiq
|
763fafb271ce07d13ac8ce575f2fee653cf39343
|
[
"Apache-2.0"
] | 14
|
2020-06-05T23:06:45.000Z
|
2022-03-12T00:00:18.000Z
|
web/transiq/restapi/migrations/0005_auto_20180802_1513.py
|
manibhushan05/transiq
|
763fafb271ce07d13ac8ce575f2fee653cf39343
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 2.0.5 on 2018-08-02 15:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('restapi', '0004_auto_20180727_1603'),
]
operations = [
migrations.RenameField(
model_name='employeerolesbookingstatusmapping',
old_name='position',
new_name='assignment_status',
),
migrations.RenameField(
model_name='historicalemployeerolesbookingstatusmapping',
old_name='position',
new_name='assignment_status',
),
migrations.AlterField(
model_name='bookingstatuses',
name='status',
field=models.CharField(choices=[('confirmed', 'Confirmed'), ('loaded', 'Loaded'), ('lr_generated', 'Lr_Generated'), ('advance_paid', 'Advance_Paid'), ('reconciled', 'Reconciled'), ('unloaded', 'Unloaded'), ('pod_uploaded', 'PoD_Uploaded'), ('pod_verified', 'PoD_Verified'), ('invoice_raised', 'Invoice_Raised'), ('invoice_confirmed', 'Invoice Confirmed'), ('balance_paid', 'Balance_Paid'), ('party_invoice_sent', 'Party_Invoice_Sent'), ('inward_followup', 'Inward_Followup'), ('complete', 'Complete')], default='confirmed', max_length=15, null=True),
),
migrations.AlterField(
model_name='historicalbookingstatuses',
name='status',
field=models.CharField(choices=[('confirmed', 'Confirmed'), ('loaded', 'Loaded'), ('lr_generated', 'Lr_Generated'), ('advance_paid', 'Advance_Paid'), ('reconciled', 'Reconciled'), ('unloaded', 'Unloaded'), ('pod_uploaded', 'PoD_Uploaded'), ('pod_verified', 'PoD_Verified'), ('invoice_raised', 'Invoice_Raised'), ('invoice_confirmed', 'Invoice Confirmed'), ('balance_paid', 'Balance_Paid'), ('party_invoice_sent', 'Party_Invoice_Sent'), ('inward_followup', 'Inward_Followup'), ('complete', 'Complete')], default='confirmed', max_length=15, null=True),
),
]
| 57.558824
| 562
| 0.652529
| 185
| 1,957
| 6.616216
| 0.351351
| 0.029412
| 0.045752
| 0.04902
| 0.702614
| 0.702614
| 0.702614
| 0.702614
| 0.624183
| 0.624183
| 0
| 0.021699
| 0.175779
| 1,957
| 33
| 563
| 59.30303
| 0.737136
| 0.022994
| 0
| 0.592593
| 1
| 0
| 0.463874
| 0.064921
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.037037
| 0
| 0.148148
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ac8b14d754de5edce90b21df03d1292a9144367a
| 3,787
|
py
|
Python
|
tests/InitTask_test.py
|
bastienboutonnet/status-villain
|
75f667aa8d3e165434565017d11fbf18729f30ca
|
[
"MIT"
] | null | null | null |
tests/InitTask_test.py
|
bastienboutonnet/status-villain
|
75f667aa8d3e165434565017d11fbf18729f30ca
|
[
"MIT"
] | 7
|
2021-08-20T11:25:09.000Z
|
2021-09-30T20:58:01.000Z
|
tests/InitTask_test.py
|
bastienboutonnet/status-villain
|
75f667aa8d3e165434565017d11fbf18729f30ca
|
[
"MIT"
] | null | null | null |
from pathlib import Path
import pytest
from status_villain.tasks.tasks import InitTask
TEST_DIR = Path(__file__).resolve().parent
class Question:
def __init__(self, return_value):
self._return_value = return_value
def ask(self):
return self._return_value
@pytest.mark.datafiles(TEST_DIR)
def test_create_profiles_dir(datafiles):
profiles_dir_path = datafiles
init_task = InitTask(profiles_dir_path=profiles_dir_path)
init_task.create_profiles_dir()
assert Path(profiles_dir_path).exists()
@pytest.mark.datafiles(TEST_DIR)
def test_create_profiles_dir_not_exist(datafiles):
profiles_dir_path = Path(datafiles).joinpath("nest")
init_task = InitTask(profiles_dir_path=profiles_dir_path)
init_task.create_profiles_dir()
assert Path(profiles_dir_path).exists()
@pytest.mark.datafiles(TEST_DIR)
def test_create_profiles_file(datafiles, mocker):
from status_villain.tasks.tasks import UserInfoInputModel
profiles_dir_path = datafiles
profiles_file_path = Path(profiles_dir_path).joinpath("credentials.yaml")
# mock the user input
mocker.patch("questionary.confirm", return_value=Question(True))
init_task = InitTask(profiles_dir_path=profiles_dir_path, profiles_file_path=profiles_file_path)
init_task.user_info = UserInfoInputModel(
first_name="Bastien",
last_name="Boutonnet",
password="hunter123",
username="bb",
email="bb@gmail.com",
)
init_task.create_profiles_file()
assert profiles_file_path.exists()
@pytest.mark.datafiles(TEST_DIR)
def test_persist_credentials(datafiles, mocker):
from status_villain.tasks.tasks import UserInfoInputModel
profiles_dir_path = datafiles # noqa: F811
profiles_file_path = Path(profiles_dir_path).joinpath("credentials.yaml")
# mock the user input
mocker.patch("questionary.confirm", return_value=Question(True))
init_task = InitTask(profiles_dir_path=profiles_dir_path, profiles_file_path=profiles_file_path)
init_task.user_info = UserInfoInputModel(
first_name="Bastien",
last_name="Boutonnet",
password="hunter123",
username="bb",
email="bb@gmail.com",
)
init_task.persist_credentials()
assert profiles_dir_path.exists()
assert profiles_file_path.exists()
@pytest.mark.datafiles(TEST_DIR)
def test_persist_credentials_no_user_info(datafiles, mocker):
profiles_dir_path = datafiles # noqa: F811
profiles_file_path = Path(profiles_dir_path).joinpath("credentials.yaml")
# mock the user input
mocker.patch("questionary.confirm", return_value=Question(True))
init_task = InitTask(profiles_dir_path=profiles_dir_path, profiles_file_path=profiles_file_path)
with pytest.raises(AttributeError, match="'InitTask' object has no attribute 'user_info'"):
init_task.persist_credentials()
@pytest.mark.datafiles(TEST_DIR)
def test_run(datafiles, mocker, monkeypatch):
profiles_dir_path = datafiles
profiles_file_path = Path(profiles_dir_path).joinpath("credentials.yaml")
# mock the user input
mocker.patch("questionary.confirm", return_value=Question(True))
mocker.patch(
"questionary.prompt",
return_value=dict(
first_name="Bastien",
last_name="Boutonnet",
password="hunter123",
username="bb",
email="bb@gmail.com",
),
)
init_task = InitTask(profiles_dir_path=profiles_dir_path, profiles_file_path=profiles_file_path)
def create_user_mock(*args, **kwargs):
return None
monkeypatch.setattr("status_villain.tasks.tasks.create_user", create_user_mock)
init_task.run()
assert profiles_dir_path.exists()
assert profiles_file_path.exists()
| 30.540323
| 100
| 0.736995
| 474
| 3,787
| 5.535865
| 0.162447
| 0.125762
| 0.148628
| 0.08689
| 0.772104
| 0.772104
| 0.759527
| 0.746951
| 0.746951
| 0.746951
| 0
| 0.004757
| 0.167415
| 3,787
| 123
| 101
| 30.788618
| 0.827466
| 0.02667
| 0
| 0.654762
| 0
| 0
| 0.098668
| 0.010329
| 0
| 0
| 0
| 0
| 0.083333
| 1
| 0.107143
| false
| 0.035714
| 0.059524
| 0.02381
| 0.202381
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ce028281568f1c0430866b87f1c866f1eadc4177
| 1,665
|
py
|
Python
|
ndn/parsimonious.py
|
ErinCall/ParserDemos
|
c61ac2b1c2c43e6adc81eba077e4e33484733e31
|
[
"MIT"
] | 1
|
2015-04-09T00:57:25.000Z
|
2015-04-09T00:57:25.000Z
|
ndn/parsimonious.py
|
AndrewLorente/ParserDemos
|
c61ac2b1c2c43e6adc81eba077e4e33484733e31
|
[
"MIT"
] | null | null | null |
ndn/parsimonious.py
|
AndrewLorente/ParserDemos
|
c61ac2b1c2c43e6adc81eba077e4e33484733e31
|
[
"MIT"
] | 1
|
2021-06-19T06:01:52.000Z
|
2021-06-19T06:01:52.000Z
|
from __future__ import absolute_import
from parsimonious.grammar import Grammar
from parsimonious.nodes import NodeVisitor
from random import randint
grammar = Grammar("""
expression = operation / element
operation = ws element ws operator ws expression ws
element = parenthetical / number
parenthetical = "(" ws expression ws ")"
ws = ~"\s"*
operator = ~"[+\-/*d]"
number = "-"? ~"[0-9]+" ("." ~"[0-9]+")?
""")
class Calculator(NodeVisitor):
def generic_visit(self, node, visited_children):
pass
def visit_expression(self, node, visited_children):
return visited_children[0]
def visit_operation(self, node, visited_children):
#visited_children is [ws, number, ws, operator, ws, number, ws]
return visited_children[3](visited_children[1], visited_children[5])
def visit_element(self, node, visited_children):
return visited_children[0]
def visit_parenthetical(self, node, visited_children):
#visited_children is ['(', whitespace, some_expression, whitespace, ')']
return visited_children[2]
def visit_operator(self, node, visited_children):
def roll(num, size):
return sum(map(lambda _: randint(1, size), range(0, int(num))))
return {
'+': lambda x, y: x + y,
'-': lambda x, y: x - y,
'*': lambda x, y: x * y,
'/': lambda x, y: x / y,
'd': roll,
}[node.text]
def visit_number(self, node, visited_children):
return float(node.text)
def calculate(text):
return Calculator().visit(grammar.parse(text))
| 32.647059
| 80
| 0.614414
| 193
| 1,665
| 5.150259
| 0.274611
| 0.226358
| 0.105634
| 0.161972
| 0.256539
| 0.227364
| 0.227364
| 0.146881
| 0.146881
| 0.146881
| 0
| 0.009709
| 0.257658
| 1,665
| 50
| 81
| 33.3
| 0.794498
| 0.07988
| 0
| 0.052632
| 0
| 0
| 0.199346
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.236842
| false
| 0.026316
| 0.105263
| 0.184211
| 0.578947
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
ce11d98eeec46393aec1093555f54b54f588f745
| 165
|
py
|
Python
|
survae/data/datasets/image/unsupervised_wrappers/__init__.py
|
alisiahkoohi/survae_flows
|
e1747b05524c7ab540a211ed360ab3e67bc3e96d
|
[
"MIT"
] | 262
|
2020-07-05T20:57:44.000Z
|
2022-03-28T02:24:43.000Z
|
survae/data/datasets/image/unsupervised_wrappers/__init__.py
|
alisiahkoohi/survae_flows
|
e1747b05524c7ab540a211ed360ab3e67bc3e96d
|
[
"MIT"
] | 17
|
2020-08-15T05:43:34.000Z
|
2022-01-31T12:24:21.000Z
|
survae/data/datasets/image/unsupervised_wrappers/__init__.py
|
alisiahkoohi/survae_flows
|
e1747b05524c7ab540a211ed360ab3e67bc3e96d
|
[
"MIT"
] | 35
|
2020-08-24T06:55:37.000Z
|
2022-02-11T05:17:58.000Z
|
from .cifar10 import UnsupervisedCIFAR10
from .mnist import UnsupervisedMNIST
from .fashion_mnist import UnsupervisedFashionMNIST
from .svhn import UnsupervisedSVHN
| 33
| 51
| 0.878788
| 17
| 165
| 8.470588
| 0.588235
| 0.152778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026846
| 0.09697
| 165
| 4
| 52
| 41.25
| 0.939597
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ce1a75f7cf9f1f9b51ba164206fe1846102d28e9
| 94
|
py
|
Python
|
bdn/provider/admin.py
|
OpenSourceUniversity/bdn
|
8e8d5b4d63ff4cb9bdf7c5f23d07aa3ad3dd0121
|
[
"MIT"
] | 1
|
2019-01-18T19:57:25.000Z
|
2019-01-18T19:57:25.000Z
|
bdn/provider/admin.py
|
OpenSourceUniversity/bdn
|
8e8d5b4d63ff4cb9bdf7c5f23d07aa3ad3dd0121
|
[
"MIT"
] | 3
|
2019-06-23T17:26:24.000Z
|
2022-02-11T03:40:54.000Z
|
bdn/provider/admin.py
|
OpenSourceUniversity/bdn
|
8e8d5b4d63ff4cb9bdf7c5f23d07aa3ad3dd0121
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Provider
admin.site.register(Provider)
| 15.666667
| 32
| 0.819149
| 13
| 94
| 5.923077
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117021
| 94
| 5
| 33
| 18.8
| 0.927711
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ce1c808293547bb1060bdeddde6c9dc88e797a1a
| 41
|
py
|
Python
|
securify/__init__.py
|
AlexandreH/securify2
|
2d2ba0e1c20cdda550120ecdc1a7164db9b90e3c
|
[
"Apache-2.0"
] | 258
|
2020-01-23T16:58:38.000Z
|
2022-03-31T17:29:25.000Z
|
securify/__init__.py
|
sirhashalot/securify2
|
6852707449577add14bafce8e304946b3490a977
|
[
"Apache-2.0"
] | 34
|
2020-01-30T06:11:58.000Z
|
2022-02-27T07:53:17.000Z
|
securify/__init__.py
|
sirhashalot/securify2
|
6852707449577add14bafce8e304946b3490a977
|
[
"Apache-2.0"
] | 66
|
2020-01-28T09:23:05.000Z
|
2022-03-22T09:01:43.000Z
|
class SecurifyError(Exception):
pass
| 13.666667
| 31
| 0.756098
| 4
| 41
| 7.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.170732
| 41
| 2
| 32
| 20.5
| 0.911765
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
ce1e1d3c376e2db27311a2d685c1af6271041ddc
| 128
|
py
|
Python
|
blackjack/__init__.py
|
Jonxslays/Blackjack
|
916c2a00d0727cc2275fe885bcb067dd55d88f2c
|
[
"MIT"
] | null | null | null |
blackjack/__init__.py
|
Jonxslays/Blackjack
|
916c2a00d0727cc2275fe885bcb067dd55d88f2c
|
[
"MIT"
] | null | null | null |
blackjack/__init__.py
|
Jonxslays/Blackjack
|
916c2a00d0727cc2275fe885bcb067dd55d88f2c
|
[
"MIT"
] | null | null | null |
from . import models
from .models import *
from .game import Game
__all__: list[str] = ["Game"]
__all__.extend(models.__all__)
| 18.285714
| 30
| 0.734375
| 18
| 128
| 4.555556
| 0.444444
| 0.170732
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.140625
| 128
| 6
| 31
| 21.333333
| 0.745455
| 0
| 0
| 0
| 0
| 0
| 0.03125
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ce2e9369433af0ab344983d877a3cdb5fd14edf1
| 42
|
py
|
Python
|
cuticulus/__init__.py
|
ngngardner/cuticulus
|
592e799ec9ae09ee12b12565a638ff9e448fbc21
|
[
"MIT"
] | null | null | null |
cuticulus/__init__.py
|
ngngardner/cuticulus
|
592e799ec9ae09ee12b12565a638ff9e448fbc21
|
[
"MIT"
] | null | null | null |
cuticulus/__init__.py
|
ngngardner/cuticulus
|
592e799ec9ae09ee12b12565a638ff9e448fbc21
|
[
"MIT"
] | null | null | null |
"""Main module and exported functions."""
| 21
| 41
| 0.714286
| 5
| 42
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119048
| 42
| 1
| 42
| 42
| 0.810811
| 0.833333
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
cbfd9960191154e22a6a05c36caffc3e7b2bdefa
| 226
|
py
|
Python
|
notifications/views.py
|
Parimal7/kwikpic-assignment
|
c0a7bc1124f973c34058505e40b360a6b74c1536
|
[
"CC0-1.0"
] | null | null | null |
notifications/views.py
|
Parimal7/kwikpic-assignment
|
c0a7bc1124f973c34058505e40b360a6b74c1536
|
[
"CC0-1.0"
] | null | null | null |
notifications/views.py
|
Parimal7/kwikpic-assignment
|
c0a7bc1124f973c34058505e40b360a6b74c1536
|
[
"CC0-1.0"
] | null | null | null |
from django.shortcuts import render
from catalog.middleware.filter_ip_middleware import property_not_important
# Create your views here.
#@property_not_important
def index2(request):
return render(request, 'index2.html')
| 28.25
| 74
| 0.823009
| 30
| 226
| 6
| 0.7
| 0.122222
| 0.222222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009901
| 0.106195
| 226
| 7
| 75
| 32.285714
| 0.881188
| 0.20354
| 0
| 0
| 0
| 0
| 0.062147
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0.25
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
|
0
| 5
|
02013a1909a2172a03e7a1040aa766d34a580803
| 135
|
py
|
Python
|
keywords2vec/imports.py
|
dperezrada/keywords2vec
|
1c067dabafce8ad590fc6b1c255132bcb55f4415
|
[
"Apache-2.0"
] | 25
|
2019-04-19T06:47:05.000Z
|
2021-11-08T10:33:46.000Z
|
keywords2vec/imports.py
|
dperezrada/keywords2vec
|
1c067dabafce8ad590fc6b1c255132bcb55f4415
|
[
"Apache-2.0"
] | 3
|
2020-02-26T14:17:57.000Z
|
2021-09-28T00:56:08.000Z
|
keywords2vec/imports.py
|
dperezrada/keywords2vec
|
1c067dabafce8ad590fc6b1c255132bcb55f4415
|
[
"Apache-2.0"
] | 6
|
2019-05-05T11:48:54.000Z
|
2022-03-22T06:24:25.000Z
|
import gzip
import os
import re
import unidecode
import nltk
from stop_words import safe_get_stop_words
from annoy import AnnoyIndex
| 13.5
| 42
| 0.851852
| 22
| 135
| 5.045455
| 0.590909
| 0.162162
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 135
| 9
| 43
| 15
| 0.965217
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
020aace1f325d5185ed2199f1a07cb71aa0f17cc
| 45
|
py
|
Python
|
crashreport_stats/static/crashreport_stats/__init__.py
|
FairphoneMirrors/hiccup-server
|
8b80109740ea663d23ca46bb272c8fd95f873f1e
|
[
"Apache-2.0"
] | null | null | null |
crashreport_stats/static/crashreport_stats/__init__.py
|
FairphoneMirrors/hiccup-server
|
8b80109740ea663d23ca46bb272c8fd95f873f1e
|
[
"Apache-2.0"
] | 1
|
2019-10-21T18:00:57.000Z
|
2019-10-21T18:00:57.000Z
|
crashreport_stats/static/crashreport_stats/__init__.py
|
FairphoneMirrors/hiccup-server
|
8b80109740ea663d23ca46bb272c8fd95f873f1e
|
[
"Apache-2.0"
] | null | null | null |
"""Hiccup statistics pages statics files."""
| 22.5
| 44
| 0.733333
| 5
| 45
| 6.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 45
| 1
| 45
| 45
| 0.825
| 0.844444
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0214f83be40b0a547902fa669033d20cf9195546
| 91
|
py
|
Python
|
Math/10953-A+B-6.py
|
homveloper/Algorithm
|
ae2e063d75a3ecc2537f97ede002450b45da4aa5
|
[
"Apache-2.0"
] | null | null | null |
Math/10953-A+B-6.py
|
homveloper/Algorithm
|
ae2e063d75a3ecc2537f97ede002450b45da4aa5
|
[
"Apache-2.0"
] | null | null | null |
Math/10953-A+B-6.py
|
homveloper/Algorithm
|
ae2e063d75a3ecc2537f97ede002450b45da4aa5
|
[
"Apache-2.0"
] | null | null | null |
print(*map(sum,[ list(map(int,input().split(','))) for i in range(int(input()))]),sep='\n')
| 91
| 91
| 0.593407
| 16
| 91
| 3.375
| 0.8125
| 0.296296
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.054945
| 91
| 1
| 91
| 91
| 0.627907
| 0
| 0
| 0
| 0
| 0
| 0.032609
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
0235d107230c1396cde127f304b3cc2e52475ffd
| 157
|
py
|
Python
|
dorna_ros/src/dorna_exceptions.py
|
beduffy/dorna_arm_ros
|
82d159db4722f7613260c96d22e8e3ac75178203
|
[
"MIT"
] | 13
|
2019-09-02T17:50:40.000Z
|
2021-12-04T17:56:48.000Z
|
dorna_ros/src/dorna_exceptions.py
|
beduffy/dorna_arm_ros
|
82d159db4722f7613260c96d22e8e3ac75178203
|
[
"MIT"
] | 6
|
2019-09-10T22:11:07.000Z
|
2021-08-19T13:01:29.000Z
|
dorna_ros/src/dorna_exceptions.py
|
beduffy/dorna_arm_ros
|
82d159db4722f7613260c96d22e8e3ac75178203
|
[
"MIT"
] | 2
|
2020-04-14T21:18:12.000Z
|
2020-09-20T14:11:04.000Z
|
#! /usr/bin/env python3
class ConnectionException(Exception):
pass
class HomingException(Exception):
pass
class PathException(Exception):
pass
| 15.7
| 37
| 0.745223
| 16
| 157
| 7.3125
| 0.625
| 0.333333
| 0.307692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007634
| 0.165605
| 157
| 10
| 38
| 15.7
| 0.885496
| 0.140127
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
026150342ac2363a273b797866dbfceddb78b82e
| 37
|
py
|
Python
|
dikicli/__main__.py
|
silenc3r/dikicli
|
9f0b10e5a01e480e98a14de2f25870822a8c2f8d
|
[
"MIT"
] | 2
|
2019-12-30T00:08:08.000Z
|
2021-02-06T18:02:49.000Z
|
dikicli/__main__.py
|
silenc3r/dikicli
|
9f0b10e5a01e480e98a14de2f25870822a8c2f8d
|
[
"MIT"
] | 5
|
2020-02-17T20:05:37.000Z
|
2021-02-03T19:45:59.000Z
|
dikicli/__main__.py
|
silenc3r/dikicli
|
9f0b10e5a01e480e98a14de2f25870822a8c2f8d
|
[
"MIT"
] | 1
|
2019-03-03T07:56:01.000Z
|
2019-03-03T07:56:01.000Z
|
from dikicli.cli import main
main()
| 9.25
| 28
| 0.756757
| 6
| 37
| 4.666667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.162162
| 37
| 3
| 29
| 12.333333
| 0.903226
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
026919f8f4f640216017f22db5c6099d96db0362
| 298
|
py
|
Python
|
crate/web/packages/templatetags/package_utils.py
|
vijay2312/crate.web
|
dbf078485675ecd568e33a170d31b068949ec9bf
|
[
"BSD-2-Clause"
] | 1
|
2021-06-23T18:14:30.000Z
|
2021-06-23T18:14:30.000Z
|
crate/web/packages/templatetags/package_utils.py
|
vijay2312/crate.web
|
dbf078485675ecd568e33a170d31b068949ec9bf
|
[
"BSD-2-Clause"
] | null | null | null |
crate/web/packages/templatetags/package_utils.py
|
vijay2312/crate.web
|
dbf078485675ecd568e33a170d31b068949ec9bf
|
[
"BSD-2-Clause"
] | null | null | null |
import os
from django import template
register = template.Library()
@register.filter
def filename(value):
return os.path.basename(value)
@register.filter
def digest_type(digest):
return digest.split("$")[0]
@register.filter
def digest_value(digest):
return digest.split("$")[1]
| 14.190476
| 34
| 0.721477
| 39
| 298
| 5.461538
| 0.487179
| 0.197183
| 0.239437
| 0.215962
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007874
| 0.147651
| 298
| 20
| 35
| 14.9
| 0.830709
| 0
| 0
| 0.25
| 0
| 0
| 0.006711
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.166667
| 0.25
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
65f8f9430d3c16d307e22358aaf089d5b23ee724
| 89
|
py
|
Python
|
kong_pdk/exception.py
|
danielpoonwj/kong-python-pdk
|
31d2b458555f8dc1498dd601c622bd5935dd79eb
|
[
"Apache-2.0"
] | null | null | null |
kong_pdk/exception.py
|
danielpoonwj/kong-python-pdk
|
31d2b458555f8dc1498dd601c622bd5935dd79eb
|
[
"Apache-2.0"
] | null | null | null |
kong_pdk/exception.py
|
danielpoonwj/kong-python-pdk
|
31d2b458555f8dc1498dd601c622bd5935dd79eb
|
[
"Apache-2.0"
] | null | null | null |
class PluginServerException(Exception):
pass
class PDKException(Exception):
pass
| 17.8
| 39
| 0.775281
| 8
| 89
| 8.625
| 0.625
| 0.376812
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157303
| 89
| 5
| 40
| 17.8
| 0.92
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
65f9584329ba101d61453d94d41212eec8293299
| 146
|
py
|
Python
|
khmernltk/__init__.py
|
VietHoang1710/khmer_nltk
|
1e04dfc6e3aa107fa2f875c6feada6eb19aa38f5
|
[
"Apache-2.0"
] | 18
|
2021-04-08T07:12:32.000Z
|
2022-02-12T02:22:12.000Z
|
khmernltk/__init__.py
|
VietHoang1710/khmer_nltk
|
1e04dfc6e3aa107fa2f875c6feada6eb19aa38f5
|
[
"Apache-2.0"
] | null | null | null |
khmernltk/__init__.py
|
VietHoang1710/khmer_nltk
|
1e04dfc6e3aa107fa2f875c6feada6eb19aa38f5
|
[
"Apache-2.0"
] | 5
|
2021-04-07T03:53:24.000Z
|
2022-01-07T03:58:25.000Z
|
from khmernltk.pos_tag import pos_tag
from khmernltk.sentence_tokenize import sentence_tokenize
from khmernltk.word_tokenize import word_tokenize
| 36.5
| 57
| 0.89726
| 21
| 146
| 5.952381
| 0.380952
| 0.312
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.082192
| 146
| 3
| 58
| 48.666667
| 0.932836
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
5a150b995e8f8054ae3a34b5bae2a2a5078dd590
| 27
|
py
|
Python
|
jd/version.py
|
lf1-io/jobdeploy
|
cbb0e3c4c6c331df90f77c8ff028a1e196b32091
|
[
"Apache-2.0"
] | null | null | null |
jd/version.py
|
lf1-io/jobdeploy
|
cbb0e3c4c6c331df90f77c8ff028a1e196b32091
|
[
"Apache-2.0"
] | null | null | null |
jd/version.py
|
lf1-io/jobdeploy
|
cbb0e3c4c6c331df90f77c8ff028a1e196b32091
|
[
"Apache-2.0"
] | null | null | null |
__version__ = "0.0.1dev12"
| 13.5
| 26
| 0.703704
| 4
| 27
| 3.75
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.208333
| 0.111111
| 27
| 1
| 27
| 27
| 0.416667
| 0
| 0
| 0
| 0
| 0
| 0.37037
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
5a37356a520e51cc1f3db6ce27ee3f65c682c2fa
| 64
|
py
|
Python
|
jiractl_shared_functions/__init__.py
|
gcarrarom/fancy-jira
|
c8e9d4dea328ffe86da6de54f67da0e2bde996f5
|
[
"MIT"
] | 3
|
2021-03-31T21:20:49.000Z
|
2021-11-13T11:14:38.000Z
|
jiractl_shared_functions/__init__.py
|
gcarrarom/fancy-jira
|
c8e9d4dea328ffe86da6de54f67da0e2bde996f5
|
[
"MIT"
] | null | null | null |
jiractl_shared_functions/__init__.py
|
gcarrarom/fancy-jira
|
c8e9d4dea328ffe86da6de54f67da0e2bde996f5
|
[
"MIT"
] | null | null | null |
from .configuration_functions import *
from .exceptions import *
| 32
| 38
| 0.828125
| 7
| 64
| 7.428571
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109375
| 64
| 2
| 39
| 32
| 0.912281
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
0ca486c30e63a2cce4ac971fc25e05dae2be2af1
| 746
|
py
|
Python
|
sdk/python/pulumi_google_native/cloudtasks/__init__.py
|
AaronFriel/pulumi-google-native
|
75d1cda425e33d4610348972cd70bddf35f1770d
|
[
"Apache-2.0"
] | 44
|
2021-04-18T23:00:48.000Z
|
2022-02-14T17:43:15.000Z
|
sdk/python/pulumi_google_native/cloudtasks/__init__.py
|
AaronFriel/pulumi-google-native
|
75d1cda425e33d4610348972cd70bddf35f1770d
|
[
"Apache-2.0"
] | 354
|
2021-04-16T16:48:39.000Z
|
2022-03-31T17:16:39.000Z
|
sdk/python/pulumi_google_native/cloudtasks/__init__.py
|
AaronFriel/pulumi-google-native
|
75d1cda425e33d4610348972cd70bddf35f1770d
|
[
"Apache-2.0"
] | 8
|
2021-04-24T17:46:51.000Z
|
2022-01-05T10:40:21.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from .. import _utilities
import typing
# Make subpackages available:
if typing.TYPE_CHECKING:
import pulumi_google_native.cloudtasks.v2 as __v2
v2 = __v2
import pulumi_google_native.cloudtasks.v2beta2 as __v2beta2
v2beta2 = __v2beta2
import pulumi_google_native.cloudtasks.v2beta3 as __v2beta3
v2beta3 = __v2beta3
else:
v2 = _utilities.lazy_import('pulumi_google_native.cloudtasks.v2')
v2beta2 = _utilities.lazy_import('pulumi_google_native.cloudtasks.v2beta2')
v2beta3 = _utilities.lazy_import('pulumi_google_native.cloudtasks.v2beta3')
| 35.52381
| 80
| 0.765416
| 98
| 746
| 5.5
| 0.44898
| 0.133581
| 0.200371
| 0.267161
| 0.510204
| 0.510204
| 0.261596
| 0
| 0
| 0
| 0
| 0.049051
| 0.152815
| 746
| 20
| 81
| 37.3
| 0.803797
| 0.253351
| 0
| 0
| 1
| 0
| 0.203267
| 0.203267
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.615385
| 0
| 0.615385
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
0ca9b153fab36513b5b1d4f7ab363101b1e40afd
| 24
|
py
|
Python
|
main/__init__.py
|
batpad/go-api
|
6c187396fddae9ebcb923540824c86c40f8254bb
|
[
"MIT"
] | null | null | null |
main/__init__.py
|
batpad/go-api
|
6c187396fddae9ebcb923540824c86c40f8254bb
|
[
"MIT"
] | 5
|
2020-06-06T00:54:03.000Z
|
2021-11-15T17:49:56.000Z
|
main/__init__.py
|
batpad/go-api
|
6c187396fddae9ebcb923540824c86c40f8254bb
|
[
"MIT"
] | null | null | null |
__version__ = '1.1.208'
| 12
| 23
| 0.666667
| 4
| 24
| 3
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.238095
| 0.125
| 24
| 1
| 24
| 24
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0.291667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0cae3b66585b81034498a35b5b15ae9a7b431d69
| 264
|
py
|
Python
|
utils/data/samplers/__init__.py
|
rs9899/Parsing-R-CNN
|
a0c9ed8850abe740eedf8bfc6e1577cc0aa3fc7b
|
[
"MIT"
] | 289
|
2018-10-25T09:42:57.000Z
|
2022-03-30T08:31:50.000Z
|
utils/data/samplers/__init__.py
|
qzane/Parsing-R-CNN
|
8c4d940dcd322bf7a8671f8b0faaabb3259bd384
|
[
"MIT"
] | 28
|
2019-01-07T02:39:49.000Z
|
2022-01-25T08:54:36.000Z
|
utils/data/samplers/__init__.py
|
qzane/Parsing-R-CNN
|
8c4d940dcd322bf7a8671f8b0faaabb3259bd384
|
[
"MIT"
] | 44
|
2018-12-20T07:36:46.000Z
|
2022-03-16T14:30:20.000Z
|
from .distributed import DistributedSampler
from .repeat_factor import RepeatFactorTrainingSampler
from .grouped_batch_sampler import GroupedBatchSampler
from .iteration_based_batch_sampler import IterationBasedBatchSampler
from .range_sampler import RangeSampler
| 44
| 69
| 0.905303
| 27
| 264
| 8.592593
| 0.592593
| 0.168103
| 0.155172
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075758
| 264
| 5
| 70
| 52.8
| 0.95082
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
0cf1af18e64861232e0d5da675288f7757d48ba4
| 20
|
py
|
Python
|
src/main.py
|
RajeevMogili/Ltecomm
|
10e138367cbe353ed48e270fd22cf83103d53c7d
|
[
"Apache-2.0"
] | null | null | null |
src/main.py
|
RajeevMogili/Ltecomm
|
10e138367cbe353ed48e270fd22cf83103d53c7d
|
[
"Apache-2.0"
] | null | null | null |
src/main.py
|
RajeevMogili/Ltecomm
|
10e138367cbe353ed48e270fd22cf83103d53c7d
|
[
"Apache-2.0"
] | null | null | null |
#Code for feature1
| 6.666667
| 18
| 0.75
| 3
| 20
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0625
| 0.2
| 20
| 2
| 19
| 10
| 0.875
| 0.85
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0cfe12a7580439622e67eceb612a84cc5c2663bf
| 118
|
py
|
Python
|
rbac/ldap/__init__.py
|
shawnmckinney/py-fortress
|
ead12bf9b7e37e923c42ccdadd8fd3c5adf027cf
|
[
"Apache-2.0"
] | 16
|
2018-03-19T02:19:01.000Z
|
2021-12-30T15:24:40.000Z
|
rbac/ldap/__init__.py
|
shawnmckinney/py-fortress
|
ead12bf9b7e37e923c42ccdadd8fd3c5adf027cf
|
[
"Apache-2.0"
] | 1
|
2021-12-18T16:46:04.000Z
|
2021-12-18T16:46:04.000Z
|
rbac/ldap/__init__.py
|
shawnmckinney/py-fortress
|
ead12bf9b7e37e923c42ccdadd8fd3c5adf027cf
|
[
"Apache-2.0"
] | 2
|
2018-03-14T21:48:43.000Z
|
2018-03-19T03:25:40.000Z
|
'''
@copyright: 2022 - Symas Corporation
'''
from .daoex import LdapException, NotFound, NotUnique, InvalidCredentials
| 29.5
| 73
| 0.779661
| 11
| 118
| 8.363636
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.038095
| 0.110169
| 118
| 4
| 73
| 29.5
| 0.838095
| 0.305085
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
0b24f15f7d43810ffa547a769dec7aa6341cec46
| 30
|
py
|
Python
|
pcdet/version.py
|
zhangweichen2006/SRDAN_Open
|
47c1bd9d2369d8e486b18a7aea220af7324c9011
|
[
"Apache-2.0"
] | 8
|
2021-06-23T02:06:56.000Z
|
2022-03-18T08:34:32.000Z
|
pcdet/version.py
|
zhangweichen2006/SRDAN_Open
|
47c1bd9d2369d8e486b18a7aea220af7324c9011
|
[
"Apache-2.0"
] | 2
|
2021-07-17T11:19:14.000Z
|
2021-09-25T03:30:36.000Z
|
pcdet/version.py
|
zhangweichen2006/SRDAN_Open
|
47c1bd9d2369d8e486b18a7aea220af7324c9011
|
[
"Apache-2.0"
] | null | null | null |
__version__ = "0.3.0+87621d0"
| 15
| 29
| 0.7
| 5
| 30
| 3.4
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 0.1
| 30
| 1
| 30
| 30
| 0.296296
| 0
| 0
| 0
| 0
| 0
| 0.433333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0b517eefefd4da8ed0d0b1d1d4a187dc5a761153
| 546
|
py
|
Python
|
src/sage/combinat/catalog_partitions.py
|
defeo/sage
|
d8822036a9843bd4d75845024072515ede56bcb9
|
[
"BSL-1.0"
] | 2
|
2018-06-30T01:37:35.000Z
|
2018-06-30T01:37:39.000Z
|
src/sage/combinat/catalog_partitions.py
|
boothby/sage
|
1b1e6f608d1ef8ee664bb19e991efbbc68cbd51f
|
[
"BSL-1.0"
] | null | null | null |
src/sage/combinat/catalog_partitions.py
|
boothby/sage
|
1b1e6f608d1ef8ee664bb19e991efbbc68cbd51f
|
[
"BSL-1.0"
] | null | null | null |
r"""
Enumerated sets of partitions, tableaux, ...
============================================
Quickref
--------
Catalog
-------
- :ref:`sage.combinat.partition`
- :ref:`sage.combinat.tableau`
- :ref:`sage.combinat.partition_tuple`
- :ref:`sage.combinat.tableau_tuple`
- :ref:`sage.combinat.skew_partition`
- :ref:`sage.combinat.skew_tableau`
- :ref:`sage.combinat.ribbon`
- :ref:`sage.combinat.ribbon_tableau`
- :ref:`sage.combinat.core`
- :ref:`sage.combinat.k_tableau`
- :ref:`sage.combinat.rsk`
- :ref:`sage.combinat.tableau_residues`
"""
| 22.75
| 44
| 0.644689
| 63
| 546
| 5.47619
| 0.31746
| 0.243478
| 0.521739
| 0.255072
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.07326
| 546
| 23
| 45
| 23.73913
| 0.681818
| 0.981685
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0b64bfee63bd8359b4f38dc5ba1fef14ad14e269
| 185
|
py
|
Python
|
kon/model/ctr_model/layer/__init__.py
|
TIXhjq/CTR_Function
|
bbb85327151257e40526ebd35e34fe4f1b0d9398
|
[
"Apache-2.0"
] | 12
|
2020-06-23T16:10:56.000Z
|
2021-02-20T09:57:08.000Z
|
kon/model/ctr_model/layer/__init__.py
|
TIXhjq/CTR_Function
|
bbb85327151257e40526ebd35e34fe4f1b0d9398
|
[
"Apache-2.0"
] | null | null | null |
kon/model/ctr_model/layer/__init__.py
|
TIXhjq/CTR_Function
|
bbb85327151257e40526ebd35e34fe4f1b0d9398
|
[
"Apache-2.0"
] | 5
|
2020-07-10T03:27:41.000Z
|
2021-02-23T06:21:17.000Z
|
#!/usr/bin/env python
# _*_ coding:utf-8 _*_
'''=================================
@Author :tix_hjq
@Date :2020/5/29 下午4:01
@File :__init__.py.py
================================='''
| 26.428571
| 36
| 0.4
| 20
| 185
| 3.25
| 0.95
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.065868
| 0.097297
| 185
| 7
| 37
| 26.428571
| 0.323353
| 0.951351
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0b99a318e0773457760aaa9f366ba36fbff63f39
| 2,699
|
py
|
Python
|
tifa/apps/admin/page.py
|
twocucao/tifa
|
f703fd27f54000e7d51f06d2456d09cc79e0ab72
|
[
"MIT"
] | 71
|
2020-04-16T04:28:45.000Z
|
2022-03-31T22:45:11.000Z
|
tifa/apps/admin/page.py
|
twocucao/tifa
|
f703fd27f54000e7d51f06d2456d09cc79e0ab72
|
[
"MIT"
] | 6
|
2021-05-13T06:32:38.000Z
|
2022-03-04T01:18:34.000Z
|
tifa/apps/admin/page.py
|
twocucao/tifa
|
f703fd27f54000e7d51f06d2456d09cc79e0ab72
|
[
"MIT"
] | 12
|
2021-05-01T08:43:11.000Z
|
2022-03-29T00:58:54.000Z
|
"""
pageAttributeAssign(...): PageAttributeAssign
pageAttributeUnassign(...): PageAttributeUnassign
pageReorderAttributeValues(...): PageReorderAttributeValues
pageTypeReorderAttributes(...): PageTypeReorderAttributes
"""
from fastapi_utils.api_model import APIModel
from tifa.apps.admin.router import bp
from tifa.apps.admin.local import g
from tifa.models.page import Page
class TPageType(APIModel):
id: str
name: str
@bp.list("/page_types", out=TPageType, summary="PageType", tags=["PageType"])
async def get_page_types():
ins = await g.adal.first_or_404(Page)
return {"items": ins}
@bp.item("/page_type", out=TPageType, summary="PageType", tags=["PageType"])
async def get_page_type():
ins = await g.adal.first_or_404(Page)
return {"items": ins}
@bp.op("/page_type/create", out=TPageType, summary="PageType", tags=["PageType"])
async def page_type_create():
ins = await g.adal.first_or_404(Page)
return {"items": ins}
@bp.op("/page_type/update", out=TPageType, summary="PageType", tags=["PageType"])
async def page_type_update():
ins = await g.adal.first_or_404(Page)
return {"items": ins}
@bp.op("/page_type/delete", out=TPageType, summary="PageType", tags=["PageType"])
async def page_type_delete():
ins = await g.adal.first_or_404(Page)
return {"items": ins}
@bp.op("/page_type/bulk_delete", out=TPageType, summary="PageType", tags=["PageType"])
async def page_type_bulk_delete():
ins = await g.adal.first_or_404(Page)
return {"items": ins}
class TPage(APIModel):
id: str
name: str
@bp.list("/pages", out=TPage, summary="Page", tags=["Page"])
async def get_pages():
ins = await g.adal.first_or_404(Page)
return {"items": ins}
@bp.item("/page", out=TPage, summary="Page", tags=["Page"])
async def get_page():
ins = await g.adal.first_or_404(Page)
return {"items": ins}
@bp.op("/page/create", out=TPage, summary="Page", tags=["Page"])
async def page_create():
ins = await g.adal.first_or_404(Page)
return {"items": ins}
@bp.op("/page/update", out=TPage, summary="Page", tags=["Page"])
async def page_update():
ins = await g.adal.first_or_404(Page)
return {"items": ins}
@bp.op("/page/delete", out=TPage, summary="Page", tags=["Page"])
async def page_delete():
ins = await g.adal.first_or_404(Page)
return {"items": ins}
@bp.op("/page/bulk_delete", out=TPage, summary="Page", tags=["Page"])
async def page_bulk_delete():
ins = await g.adal.first_or_404(Page)
return {"items": ins}
@bp.op("/page/bulk_publish", out=TPage, summary="Page", tags=["Page"])
async def page_bulk_publish():
ins = await g.adal.first_or_404(Page)
return {"items": ins}
| 26.722772
| 86
| 0.686551
| 388
| 2,699
| 4.623711
| 0.134021
| 0.057971
| 0.065217
| 0.094203
| 0.77146
| 0.77146
| 0.77146
| 0.742475
| 0.742475
| 0.656633
| 0
| 0.01676
| 0.137829
| 2,699
| 100
| 87
| 26.99
| 0.75419
| 0.079289
| 0
| 0.483871
| 0
| 0
| 0.15866
| 0.008882
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.064516
| 0
| 0.370968
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0b99cea25ce88f6bcecbcfb736a630ec9745e045
| 5,558
|
py
|
Python
|
tests/test_sat_utils/test_sat_ephemeris.py
|
amanchokshi/mwa-satellites
|
f9e8de353e7eddf28ed715c01d7d3fb5336f0f18
|
[
"MIT"
] | 1
|
2020-08-10T11:42:55.000Z
|
2020-08-10T11:42:55.000Z
|
tests/test_sat_utils/test_sat_ephemeris.py
|
amanchokshi/mwa-satellites
|
f9e8de353e7eddf28ed715c01d7d3fb5336f0f18
|
[
"MIT"
] | 9
|
2020-11-16T03:05:16.000Z
|
2020-11-20T23:49:09.000Z
|
tests/test_sat_utils/test_sat_ephemeris.py
|
amanchokshi/mwa-satellites
|
f9e8de353e7eddf28ed715c01d7d3fb5336f0f18
|
[
"MIT"
] | 1
|
2021-12-27T02:34:30.000Z
|
2021-12-27T02:34:30.000Z
|
import shutil
from os import path
from pathlib import Path
from embers.sat_utils.sat_ephemeris import (ephem_data, epoch_ranges,
epoch_time_array, load_tle,
sat_pass, sat_plot, save_ephem)
# Save the path to this directory
dirpath = path.dirname(__file__)
# Obtain path to directory with test_data
test_data = path.abspath(path.join(dirpath, "../data"))
def test_load_tle_sats():
tle_file = f"{test_data}/sat_utils/TLE/25986.txt"
sats, epochs = load_tle(tle_file)
sat_id = sats[0].model.satnum
assert sat_id == 25986
def test_load_tle_epochs():
tle_file = f"{test_data}/sat_utils/TLE/25986.txt"
sats, epochs = load_tle(tle_file)
assert epochs[0] == 2458738.5
def test_epoch_ranges_length():
tle_file = f"{test_data}/sat_utils/TLE/25986.txt"
sats, epochs = load_tle(tle_file)
epoch_range = epoch_ranges(epochs)
assert len(epoch_range) == 311
def test_epoch_time_array_index():
tle_file = f"{test_data}/sat_utils/TLE/25986.txt"
sats, epochs = load_tle(tle_file)
epoch_range = epoch_ranges(epochs)
t_arr, index_epoch = epoch_time_array(epoch_range, index_epoch=0, cadence=10)
assert index_epoch == 0
def test_epoch_time_array_arr():
tle_file = f"{test_data}/sat_utils/TLE/25986.txt"
sats, epochs = load_tle(tle_file)
epoch_range = epoch_ranges(epochs)
t_arr, index_epoch = epoch_time_array(epoch_range, index_epoch=0, cadence=10)
assert type(t_arr).__name__ == "Time"
def test_sat_pass_passes_1():
tle_file = f"{test_data}/sat_utils/TLE/25986.txt"
sats, epochs = load_tle(tle_file)
epoch_range = epoch_ranges(epochs)
t_arr, index_epoch = epoch_time_array(epoch_range, index_epoch=0, cadence=10)
data = sat_pass(sats, t_arr, 0, location=(-26.703319, 116.670815, 337.83))
assert data[0][0][0] == 417
def test_sat_pass_passes_2():
tle_file = f"{test_data}/sat_utils/TLE/25986.txt"
sats, epochs = load_tle(tle_file)
epoch_range = epoch_ranges(epochs)
t_arr, index_epoch = epoch_time_array(epoch_range, index_epoch=1, cadence=10)
data = sat_pass(sats, t_arr, 1, location=(-26.703319, 116.670815, 337.83))
assert data[0][0][0] == 0
def test_sat_pass_alt_az():
tle_file = f"{test_data}/sat_utils/TLE/25986.txt"
sats, epochs = load_tle(tle_file)
epoch_range = epoch_ranges(epochs)
t_arr, index_epoch = epoch_time_array(epoch_range, index_epoch=0, cadence=10)
data = sat_pass(sats, t_arr, 0, location=(-26.703319, 116.670815, 337.83))
assert type(data[1]) == type(data[2])
def test_sat_pass_alt_err():
tle_file = f"{test_data}/sat_utils/TLE/44387.txt"
sats, epochs = load_tle(tle_file)
epoch_range = epoch_ranges(epochs)
t_arr, index_epoch = epoch_time_array(epoch_range, index_epoch=0, cadence=10)
data = sat_pass(sats, t_arr, 0, location=(-26.703319, 116.670815, 337.83))
assert data is None
def test_ephem_data_time():
tle_file = f"{test_data}/sat_utils/TLE/25986.txt"
sats, epochs = load_tle(tle_file)
epoch_range = epoch_ranges(epochs)
t_arr, index_epoch = epoch_time_array(epoch_range, index_epoch=0, cadence=10)
data = sat_pass(sats, t_arr, 0, location=(-26.703319, 116.670815, 337.83))
passes, alt, az = data
time_array, sat_alt, sat_az = ephem_data(t_arr, passes[0], alt, az)
assert time_array.shape[0] == 98
def test_ephem_data_alt():
tle_file = f"{test_data}/sat_utils/TLE/25986.txt"
sats, epochs = load_tle(tle_file)
epoch_range = epoch_ranges(epochs)
t_arr, index_epoch = epoch_time_array(epoch_range, index_epoch=0, cadence=10)
data = sat_pass(sats, t_arr, 0, location=(-26.703319, 116.670815, 337.83))
passes, alt, az = data
time_array, sat_alt, sat_az = ephem_data(t_arr, passes[0], alt, az)
assert sat_alt.shape[0] == 98
def test_ephem_data_az():
tle_file = f"{test_data}/sat_utils/TLE/25986.txt"
sats, epochs = load_tle(tle_file)
epoch_range = epoch_ranges(epochs)
t_arr, index_epoch = epoch_time_array(epoch_range, index_epoch=0, cadence=10)
data = sat_pass(sats, t_arr, 0, location=(-26.703319, 116.670815, 337.83))
passes, alt, az = data
time_array, sat_alt, sat_az = ephem_data(t_arr, passes[0], alt, az)
assert sat_az.shape[0] == 98
def test_sat_plot():
tle_file = f"{test_data}/sat_utils/TLE/25986.txt"
sats, epochs = load_tle(tle_file)
epoch_range = epoch_ranges(epochs)
t_arr, index_epoch = epoch_time_array(epoch_range, index_epoch=0, cadence=10)
data = sat_pass(sats, t_arr, 0, location=(-26.703319, 116.670815, 337.83))
passes, alt, az = data
time_array, sat_alt, sat_az = ephem_data(t_arr, passes[0], alt, az)
plt = sat_plot(25986, sat_alt, sat_az)
assert plt.__name__ == "matplotlib.pyplot"
def test_save_ephem_empty_file():
error = save_ephem(
12345,
f"{test_data}/sat_utils/TLE",
10,
(-26.703319, 116.670815, 337.83),
0.5,
f"{test_data}/sat_utils/ephem_tmp",
)
assert error == f"File {test_data}/sat_utils/TLE/12345 is empty, skipping"
def test_save_ephem_plot():
save_ephem(
44387,
f"{test_data}/sat_utils/TLE",
10,
(-26.703319, 116.670815, 337.83),
0.5,
f"{test_data}/sat_utils/ephem_tmp",
)
png = Path(f"{test_data}/sat_utils/ephem_tmp/ephem_plots/44387.png")
assert png.is_file() is True
if png.is_file() is True:
shutil.rmtree(f"{test_data}/sat_utils/ephem_tmp")
| 35.177215
| 81
| 0.686218
| 907
| 5,558
| 3.883131
| 0.098126
| 0.05565
| 0.062465
| 0.090857
| 0.78904
| 0.737933
| 0.737933
| 0.710108
| 0.694492
| 0.694492
| 0
| 0.089017
| 0.187478
| 5,558
| 157
| 82
| 35.401274
| 0.690877
| 0.012774
| 0
| 0.57377
| 0
| 0
| 0.133844
| 0.124362
| 0
| 0
| 0
| 0
| 0.122951
| 1
| 0.122951
| false
| 0.172131
| 0.032787
| 0
| 0.155738
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
0ba007b3ce37d21c974b775ba900a08156986d6a
| 158
|
py
|
Python
|
python/hardway/ex2.py
|
petervdb/eLearning
|
2928450f6429588fb2081f5686f9fa9f20529852
|
[
"Apache-2.0"
] | null | null | null |
python/hardway/ex2.py
|
petervdb/eLearning
|
2928450f6429588fb2081f5686f9fa9f20529852
|
[
"Apache-2.0"
] | null | null | null |
python/hardway/ex2.py
|
petervdb/eLearning
|
2928450f6429588fb2081f5686f9fa9f20529852
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# A comment, this is so you can read your program later
print("Blablabla ....");
print ("Not exactly done what is mentioned in the book")
| 22.571429
| 56
| 0.702532
| 26
| 158
| 4.269231
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.170886
| 158
| 6
| 57
| 26.333333
| 0.847328
| 0.443038
| 0
| 0
| 0
| 0
| 0.697674
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
e7e77bdaea45f50b13d5159eb2c020a2bf0a0b88
| 93
|
py
|
Python
|
test.py
|
LucasMolander/WoW-DPS-Excel-To-Python
|
01a02cbe004661fa529892cad17e0aea2c1fade2
|
[
"MIT"
] | null | null | null |
test.py
|
LucasMolander/WoW-DPS-Excel-To-Python
|
01a02cbe004661fa529892cad17e0aea2c1fade2
|
[
"MIT"
] | null | null | null |
test.py
|
LucasMolander/WoW-DPS-Excel-To-Python
|
01a02cbe004661fa529892cad17e0aea2c1fade2
|
[
"MIT"
] | null | null | null |
#
# For rapid tests of new stuff.
#
import formulas
# Do something with formulas...
| 11.625
| 32
| 0.645161
| 12
| 93
| 5
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.268817
| 93
| 7
| 33
| 13.285714
| 0.882353
| 0.634409
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
f032333b1ad99b9efb1d0a171b2ed77d4d0fe7fb
| 214
|
py
|
Python
|
dataset/classCount.py
|
a7i7/smriti
|
f3fe62f7a7b16fc5cb48a9df54a18143c9f17c30
|
[
"MIT"
] | null | null | null |
dataset/classCount.py
|
a7i7/smriti
|
f3fe62f7a7b16fc5cb48a9df54a18143c9f17c30
|
[
"MIT"
] | null | null | null |
dataset/classCount.py
|
a7i7/smriti
|
f3fe62f7a7b16fc5cb48a9df54a18143c9f17c30
|
[
"MIT"
] | null | null | null |
from math import log,ceil
for numClasses in range(10,50):
print('',end='|')
print(numClasses,end='|')
print(ceil((2**132)**(1.0/numClasses)),end='|')
print(ceil((2**264)**(1.0/numClasses)),end='|')
print('')
| 23.777778
| 48
| 0.616822
| 33
| 214
| 4
| 0.545455
| 0.242424
| 0.409091
| 0.333333
| 0.515152
| 0
| 0
| 0
| 0
| 0
| 0
| 0.082051
| 0.088785
| 214
| 8
| 49
| 26.75
| 0.594872
| 0
| 0
| 0
| 0
| 0
| 0.018692
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.142857
| 0.714286
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
f050e8ed69b6f50cd94f12b5734d162b2c7a9964
| 57
|
py
|
Python
|
tests/indexing/test_filename_parser_helpers.py
|
MyPyDavid/raman_fitting
|
a827ab578ae801e185384159f145ae4dfad39549
|
[
"MIT"
] | 3
|
2021-03-03T21:02:11.000Z
|
2021-05-14T09:24:40.000Z
|
tests/indexing/test_filename_parser_helpers.py
|
MyPyDavid/raman_fitting
|
a827ab578ae801e185384159f145ae4dfad39549
|
[
"MIT"
] | 8
|
2021-06-25T22:54:53.000Z
|
2021-08-09T10:07:30.000Z
|
tests/indexing/test_filename_parser_helpers.py
|
MyPyDavid/raman_fitting
|
a827ab578ae801e185384159f145ae4dfad39549
|
[
"MIT"
] | 2
|
2021-07-08T09:49:49.000Z
|
2022-03-19T14:43:01.000Z
|
"""
Created on Sun Aug 8 19:27:44 2021
@author: DW
"""
| 9.5
| 35
| 0.596491
| 11
| 57
| 3.090909
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 0.22807
| 57
| 5
| 36
| 11.4
| 0.522727
| 0.842105
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b2cc219b43ea8be8fb6ea77e410c0e8bd968b639
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/multidict/_multidict_base.py
|
Retraces/UkraineBot
|
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/multidict/_multidict_base.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/multidict/_multidict_base.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/5e/e8/24/13bf1f5c19adcc3760d988bd4eb1210dec662feeaa25b1481b4be22cc8
| 96
| 96
| 0.895833
| 9
| 96
| 9.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.364583
| 0
| 96
| 1
| 96
| 96
| 0.53125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
65041f5c25264368483f6121651ae65bfe2f3a84
| 134
|
py
|
Python
|
jetpack/make_hash.py
|
andymckay/amo-validator
|
d13e3644eb657e56666ee40d91a9c67382cfa725
|
[
"BSD-3-Clause"
] | 1
|
2015-07-15T20:06:09.000Z
|
2015-07-15T20:06:09.000Z
|
jetpack/make_hash.py
|
mattbasta/amo-validator
|
f4d9612c15508b991cad637be9062a10d5e38e53
|
[
"BSD-3-Clause"
] | null | null | null |
jetpack/make_hash.py
|
mattbasta/amo-validator
|
f4d9612c15508b991cad637be9062a10d5e38e53
|
[
"BSD-3-Clause"
] | null | null | null |
import hashlib
import os
import sys
hash = hashlib.sha256(open(sys.argv[1]).read()).hexdigest()
print sys.argv[1], sys.argv[2], hash
| 19.142857
| 59
| 0.723881
| 23
| 134
| 4.217391
| 0.565217
| 0.216495
| 0.164948
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.05
| 0.104478
| 134
| 6
| 60
| 22.333333
| 0.758333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.6
| null | null | 0.2
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
650f9308ef9c0e31000eaf3cf50b4d8e8473f446
| 203
|
py
|
Python
|
hdlogger/serializers/picklers/__init__.py
|
incognitoRepo/hdlogger
|
c738161ef3144469ba0f47caf89770613031e96e
|
[
"BSD-2-Clause"
] | null | null | null |
hdlogger/serializers/picklers/__init__.py
|
incognitoRepo/hdlogger
|
c738161ef3144469ba0f47caf89770613031e96e
|
[
"BSD-2-Clause"
] | null | null | null |
hdlogger/serializers/picklers/__init__.py
|
incognitoRepo/hdlogger
|
c738161ef3144469ba0f47caf89770613031e96e
|
[
"BSD-2-Clause"
] | null | null | null |
from dill import Pickler as dillPickler, Unpickler as dillUnpickler
from pickle import _Pickler as picklePickler, Unpickler as pickleUnpickler
from .try_until import TryUntilPickleable, FilteredPickler
| 40.6
| 74
| 0.862069
| 24
| 203
| 7.208333
| 0.625
| 0.150289
| 0.17341
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.118227
| 203
| 4
| 75
| 50.75
| 0.96648
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
651309a313e3066d62d8adc9de14475635c7ab7d
| 118
|
py
|
Python
|
blog/admin.py
|
skylermishkin/skylernet
|
d715c69348c050d976ba7931127a576565b67ff1
|
[
"MIT"
] | null | null | null |
blog/admin.py
|
skylermishkin/skylernet
|
d715c69348c050d976ba7931127a576565b67ff1
|
[
"MIT"
] | null | null | null |
blog/admin.py
|
skylermishkin/skylernet
|
d715c69348c050d976ba7931127a576565b67ff1
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Tag, Post
admin.site.register(Tag)
admin.site.register(Post)
| 13.111111
| 32
| 0.779661
| 18
| 118
| 5.111111
| 0.555556
| 0.195652
| 0.369565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.127119
| 118
| 8
| 33
| 14.75
| 0.893204
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
e8e5018f6ad00d578d3491b691343b23e06ddc95
| 58
|
py
|
Python
|
lab/lab05/lab05_extra.py
|
AnthonyNg404/61A
|
6b8fc656ef5438dc45e58d49b025bc653dda8655
|
[
"Unlicense"
] | null | null | null |
lab/lab05/lab05_extra.py
|
AnthonyNg404/61A
|
6b8fc656ef5438dc45e58d49b025bc653dda8655
|
[
"Unlicense"
] | null | null | null |
lab/lab05/lab05_extra.py
|
AnthonyNg404/61A
|
6b8fc656ef5438dc45e58d49b025bc653dda8655
|
[
"Unlicense"
] | null | null | null |
""" Optional questions for Lab 05 """
from lab05 import *
| 19.333333
| 37
| 0.689655
| 8
| 58
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085106
| 0.189655
| 58
| 3
| 38
| 19.333333
| 0.765957
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
e8f59a899f131eb6dc9494843ad0c1355a613abc
| 16,274
|
py
|
Python
|
wfm_watcher.py
|
85599/check
|
80148733194e03f0c0459306702868b12e37e497
|
[
"MIT"
] | null | null | null |
wfm_watcher.py
|
85599/check
|
80148733194e03f0c0459306702868b12e37e497
|
[
"MIT"
] | null | null | null |
wfm_watcher.py
|
85599/check
|
80148733194e03f0c0459306702868b12e37e497
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
# Part of https://github.com/85599/wfm_watch.git
import argparse
import colorclass
import json
import os
import requests
import sys
import terminaltables
import time
requests.packages.urllib3.disable_warnings(requests.packages.urllib3.exceptions.InsecureRequestWarning)
print('Warframe Market Watcher')
print('https://github.com/85599/wfm_watch.git')
print('')
parser = argparse.ArgumentParser()
parser.add_argument(
'-b', '--buyer',
help='watch a user\'s buy orders (can specify multiple users)',
metavar='USER',
nargs='+',
action='append'
)
parser.add_argument(
'-i', '--item',
help='watch an item\'s buy and sell orders (example: -i fleeting_expertise blind_rage streamline)',
nargs='+',
action='append'
)
parser.add_argument(
'-s', '--seller',
help='watch a user\'s sell orders (can specify multiple users)',
metavar='USER',
nargs='+',
action='append'
)
args = parser.parse_args()
if (args.buyer == None) and (args.item == None) and (args.seller == None):
parser.print_help()
sys.exit()
colorclass.Windows.enable(auto_colors = True, reset_atexit = True)
def get_order(orders, item):
item_name = item['item']['url_name']
mod_rank = str(item.get('mod_rank', 0))
if not item_name in orders:
orders[item_name] = {}
if not mod_rank in orders[item_name]:
orders[item_name][mod_rank] = {}
result = requests.get('https://api.warframe.market/v1/items/' + item_name + '/orders', verify=False, headers={'Connection': 'close'})
data = json.loads(result.text)
for order in data['payload']['orders']:
if (order['region'] == 'en') and (order['platform'] == 'pc') and (str(order.get('mod_rank', 0)) == mod_rank):
if order['order_type'] == 'buy':
if (order['user']['status'] == 'ingame') or (order['user']['status'] == 'online'):
if 'buy' in orders[item_name][mod_rank]:
if order['platinum'] > orders[item_name][mod_rank]['buy']['platinum']:
orders[item_name][mod_rank]['buy'] = order
else:
orders[item_name][mod_rank]['buy'] = order
elif order['order_type'] == 'sell':
if (order['user']['status'] == 'ingame') or (order['user']['status'] == 'online'):
if 'sell' in orders[item_name][mod_rank]:
if order['platinum'] < orders[item_name][mod_rank]['sell']['platinum']:
orders[item_name][mod_rank]['sell'] = order
else:
orders[item_name][mod_rank]['sell'] = order
diffs = {
'buy': [],
'sell': []
}
for order in data['payload']['orders']:
if (order['region'] == 'en') and (order['platform'] == 'pc') and (str(order.get('mod_rank', 0)) == mod_rank):
if order['order_type'] == 'buy':
if (order['user']['status'] == 'ingame') or (order['user']['status'] == 'online'):
if order['id'] != item['id']:
if order['platinum'] <= orders[item_name][mod_rank]['buy']['platinum']:
diffs['buy'].append(order['platinum'])
elif order['order_type'] == 'sell':
if (order['user']['status'] == 'ingame') or (order['user']['status'] == 'online'):
if order['id'] != item['id']:
if order['platinum'] >= orders[item_name][mod_rank]['sell']['platinum']:
diffs['sell'].append(order['platinum'])
diffs['buy'].sort(reverse=True)
diffs['sell'].sort()
if 'buy' in orders[item_name][mod_rank]:
orders[item_name][mod_rank]['buy']['previous'] = diffs['buy'][0] if len(diffs['buy']) > 0 else 0
if 'sell' in orders[item_name][mod_rank]:
orders[item_name][mod_rank]['sell']['previous'] = diffs['sell'][0] if len(diffs['sell']) > 0 else 0
def get_stats(stats, item):
item_name = item['item']['url_name']
mod_rank = str(item.get('mod_rank', 0))
if not item_name in stats:
stats[item_name] = {}
if not mod_rank in stats[item_name]:
stats[item_name][mod_rank] = {
'buy_48_hr': 0,
'buy_90_day': 0,
'sell_48_hr': 0,
'sell_90_day': 0
}
result = requests.get('https://api.warframe.market/v1/items/' + item_name + '/statistics', verify=False, headers={'Connection': 'close'})
data = json.loads(result.text)
b48 = 0
b90 = 0
s48 = 0
s90 = 0
for stat in data['payload']['statistics_live']['48hours']:
if (order['region'] == 'en') and (order['platform'] == 'pc') and (str(stat.get('mod_rank', 0)) == mod_rank):
if stat['order_type'] == 'buy':
stats[item_name][mod_rank]['buy_48_hr'] += stat['avg_price']
b48 += 1
elif stat['order_type'] == 'sell':
stats[item_name][mod_rank]['sell_48_hr'] += stat['avg_price']
s48 += 1
for stat in data['payload']['statistics_live']['90days']:
if (order['region'] == 'en') and (order['platform'] == 'pc') and (str(stat.get('mod_rank', 0)) == mod_rank):
if stat['order_type'] == 'buy':
stats[item_name][mod_rank]['buy_90_day'] += stat['avg_price']
b90 += 1
elif stat['order_type'] == 'sell':
stats[item_name][mod_rank]['sell_90_day'] += stat['avg_price']
s90 += 1
if b48 > 0:
stats[item_name][mod_rank]['buy_48_hr'] /= b48
if b90 > 0:
stats[item_name][mod_rank]['buy_90_day'] /= b90
if s48 > 0:
stats[item_name][mod_rank]['sell_48_hr'] /= s48
if s90 > 0:
stats[item_name][mod_rank]['sell_90_day'] /= s90
while True:
os.system('cls') if os.name == 'nt' else os.system('clear')
stats = {}
if args.buyer:
for users in args.buyer:
for user in users:
result = requests.get('https://api.warframe.market/v1/profile/' + user + '/orders', verify=False, headers={'Connection': 'close'})
data = json.loads(result.text)
orders = {}
buy_orders = []
for order in data['payload']['buy_orders']:
if (order['region'] == 'en') and (order['platform'] == 'pc'):
item_name = order['item']['url_name']
mod_rank = str(order.get('mod_rank', 0))
get_order(orders, order)
get_stats(stats, order)
buy_orders.append(
[
colorclass.Color(order['item']['en']['item_name']),
colorclass.Color(mod_rank),
colorclass.Color(str(order['item'].get('mod_max_rank', 0))),
colorclass.Color(str(order['quantity'])),
colorclass.Color(str(int(round(stats[item_name][mod_rank]['buy_90_day']))) + 'p'),
colorclass.Color(str(int(round(stats[item_name][mod_rank]['buy_48_hr']))) + 'p'),
colorclass.Color(str(int(order['platinum'])) + 'p'),
colorclass.Color('{higreen}' + str(int(order['platinum'] - orders[item_name][mod_rank]['buy']['previous'])) + 'p diff: yes{/green}' if ((order['id'] == orders[item_name][mod_rank]['buy']['id']) or (order['platinum'] == orders[item_name][mod_rank]['buy']['platinum'])) else '{hired}' + '[+' + str(orders[item_name][mod_rank]['buy']['user']['reputation']) + '] ' + orders[item_name][mod_rank]['buy']['user']['ingame_name'] + ': ' + str(int(orders[item_name][mod_rank]['buy']['platinum'])) + 'p{/red}')
])
buy_orders.sort(key=lambda order: order[0])
buy_orders.insert(0,
[
colorclass.Color('Item'),
colorclass.Color('Rank'),
colorclass.Color('Max'),
colorclass.Color('Qty'),
colorclass.Color('90 day avg'),
colorclass.Color('48 hr avg'),
colorclass.Color('Price'),
colorclass.Color('Highest')
])
output = terminaltables.SingleTable(buy_orders, colorclass.Color(' {hicyan}' + user + '\'s bids{/cyan} '));
output.inner_heading_row_border = True
output.inner_row_border = True
output.justify_columns = {
0: 'left',
1: 'right',
4: 'right',
5: 'right',
6: 'right',
7: 'right'
}
print(output.table)
if args.item:
for items in args.item:
for item in items:
result = requests.get('https://api.warframe.market/v1/items/' + item + '/orders', verify=False, headers={'Connection': 'close'})
data = json.loads(result.text)
buy_orders = []
sell_orders = []
for order in data['payload']['orders']:
if (order['region'] == 'en') and (order['platform'] == 'pc'):
if (order['user']['status'] == 'ingame') or (order['user']['status'] == 'online'):
mod_rank = str(order.get('mod_rank', 0))
order['item'] = {
'url_name': item
}
get_stats(stats, order)
if order['order_type'] == 'buy':
buy_orders.append(
[
colorclass.Color('[+' + str(order['user']['reputation']) + '] ' + order['user']['ingame_name']),
colorclass.Color(mod_rank),
colorclass.Color(str(order['quantity'])),
colorclass.Color(str(int(round(stats[item][mod_rank]['buy_90_day']))) + 'p'),
colorclass.Color(str(int(round(stats[item][mod_rank]['buy_48_hr']))) + 'p'),
colorclass.Color(str(int(order['platinum']))),
])
elif order['order_type'] == 'sell':
sell_orders.append(
[
colorclass.Color('[+' + str(order['user']['reputation']) + '] ' + order['user']['ingame_name']),
colorclass.Color(mod_rank),
colorclass.Color(str(order['quantity'])),
colorclass.Color(str(int(round(stats[item][mod_rank]['sell_90_day']))) + 'p'),
colorclass.Color(str(int(round(stats[item][mod_rank]['sell_48_hr']))) + 'p'),
colorclass.Color(str(int(order['platinum']))),
])
buy_orders.sort(key=lambda order: order[5], reverse=True)
for order in buy_orders:
order[5] = colorclass.Color(str(order[5]) + 'p')
buy_orders.insert(0,
[
colorclass.Color('User'),
colorclass.Color('Rank'),
colorclass.Color('Qty'),
colorclass.Color('90 day avg'),
colorclass.Color('48 hr avg'),
colorclass.Color('Price'),
])
sell_orders.sort(key=lambda order: order[5])
for order in sell_orders:
order[5] = colorclass.Color(str(order[5]) + 'p')
sell_orders.insert(0,
[
colorclass.Color('User'),
colorclass.Color('Rank'),
colorclass.Color('Qty'),
colorclass.Color('90 day avg'),
colorclass.Color('48 hr avg'),
colorclass.Color('Price'),
])
output = terminaltables.SingleTable(buy_orders, colorclass.Color(' {hicyan}Bids for ' + item + '{/cyan} '));
output.inner_heading_row_border = True
output.inner_row_border = True
output.justify_columns = {
1: 'right',
3: 'right',
4: 'right',
5: 'right'
}
print(output.table)
output = terminaltables.SingleTable(sell_orders, colorclass.Color(' {hicyan}Sales for ' + item + '{/cyan} '));
output.inner_heading_row_border = True
output.inner_row_border = True
output.justify_columns = {
1: 'right',
3: 'right',
4: 'right',
5: 'right'
}
print(output.table)
if args.seller:
for users in args.seller:
for user in users:
result = requests.get('https://api.warframe.market/v1/profile/' + user + '/orders', verify=False, headers={'Connection': 'close'})
data = json.loads(result.text)
orders = {}
sell_orders = []
for order in data['payload']['sell_orders']:
if (order['region'] == 'en') and (order['platform'] == 'pc'):
item_name = order['item']['url_name']
mod_rank = str(order.get('mod_rank', 0))
get_order(orders, order)
get_stats(stats, order)
sell_orders.append(
[
colorclass.Color(order['item']['en']['item_name']),
colorclass.Color(mod_rank),
colorclass.Color(str(order['item'].get('mod_max_rank', 0))),
colorclass.Color(str(order['quantity'])),
colorclass.Color(str(int(round(stats[item_name][mod_rank]['sell_90_day']))) + 'p'),
colorclass.Color(str(int(round(stats[item_name][mod_rank]['sell_48_hr']))) + 'p'),
colorclass.Color(str(int(order['platinum'])) + 'p'),
colorclass.Color('{higreen}' + str(orders[item_name][mod_rank]['sell']['previous'] - order['platinum']) + 'p diff: yes{/green}' if ((order['id'] == orders[item_name][mod_rank]['sell']['id']) or (order['platinum'] == orders[item_name][mod_rank]['sell']['platinum'])) else '{hired}' + '[+' + str(orders[item_name][mod_rank]['sell']['user']['reputation']) + '] ' + orders[item_name][mod_rank]['sell']['user']['ingame_name'] + ': ' + str(orders[item_name][mod_rank]['sell']['platinum']) + 'p{/red}')
])
sell_orders.sort(key=lambda order: order[0])
sell_orders.insert(0,
[
colorclass.Color('Item'),
colorclass.Color('Rank'),
colorclass.Color('Max'),
colorclass.Color('Qty'),
colorclass.Color('90 day avg'),
colorclass.Color('48 hr avg'),
colorclass.Color('Price'),
colorclass.Color('Lowest')
])
output = terminaltables.SingleTable(sell_orders, colorclass.Color(' {hicyan}' + user + '\'s sales{/cyan} '));
output.inner_heading_row_border = True
output.inner_row_border = True
output.justify_columns = {
0: 'left',
1: 'right',
4: 'right',
5: 'right',
6: 'right',
7: 'right'
}
print(output.table)
time.sleep(300)
| 41.835476
| 527
| 0.480828
| 1,698
| 16,274
| 4.456419
| 0.107185
| 0.062905
| 0.063962
| 0.079292
| 0.840756
| 0.823312
| 0.807321
| 0.715475
| 0.645302
| 0.613717
| 0
| 0.01703
| 0.357749
| 16,274
| 388
| 528
| 41.943299
| 0.706946
| 0.003994
| 0
| 0.571895
| 0
| 0.003268
| 0.147837
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.006536
| false
| 0
| 0.026144
| 0
| 0.03268
| 0.026144
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
33035b63348e4f380ff2ae25c4a0c855bf4c52d6
| 69
|
py
|
Python
|
checkov/common/bridgecrew/integration_features/__init__.py
|
antonblr/checkov
|
9415c6593c537945c08f7a19f28bdd8b96966f67
|
[
"Apache-2.0"
] | 4,013
|
2019-12-09T13:16:54.000Z
|
2022-03-31T14:31:01.000Z
|
checkov/common/bridgecrew/integration_features/__init__.py
|
antonblr/checkov
|
9415c6593c537945c08f7a19f28bdd8b96966f67
|
[
"Apache-2.0"
] | 1,258
|
2019-12-17T09:55:51.000Z
|
2022-03-31T19:17:17.000Z
|
checkov/common/bridgecrew/integration_features/__init__.py
|
antonblr/checkov
|
9415c6593c537945c08f7a19f28bdd8b96966f67
|
[
"Apache-2.0"
] | 638
|
2019-12-19T08:57:38.000Z
|
2022-03-30T21:38:37.000Z
|
from checkov.common.bridgecrew.integration_features.features import *
| 69
| 69
| 0.884058
| 8
| 69
| 7.5
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.043478
| 69
| 1
| 69
| 69
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
33180f661271e296a9950f6788a3e4257e578be6
| 42
|
py
|
Python
|
src/server_design/algorithms/compressor/designSolutions/sol_562.py
|
robertpardillo/Funnel
|
f45e419f55e085bbb95e17c47b4c94a7c625ba9b
|
[
"MIT"
] | 1
|
2021-05-18T16:10:49.000Z
|
2021-05-18T16:10:49.000Z
|
src/server_design/algorithms/compressor/designSolutions/sol_562.py
|
robertpardillo/Funnel
|
f45e419f55e085bbb95e17c47b4c94a7c625ba9b
|
[
"MIT"
] | null | null | null |
src/server_design/algorithms/compressor/designSolutions/sol_562.py
|
robertpardillo/Funnel
|
f45e419f55e085bbb95e17c47b4c94a7c625ba9b
|
[
"MIT"
] | null | null | null |
def sol562(design_parameters):
pass
| 14
| 31
| 0.714286
| 5
| 42
| 5.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 0.214286
| 42
| 3
| 32
| 14
| 0.787879
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
3318bc1d0c998c22a4432ff7643567b236ac3e5c
| 125
|
py
|
Python
|
escrutiniosocial/core/admin.py
|
juanmafx/escrutiniosocial
|
6db28cece5b9860e0a522a70eb34b2887b8396d6
|
[
"BSD-3-Clause"
] | 1
|
2015-05-15T18:08:54.000Z
|
2015-05-15T18:08:54.000Z
|
escrutiniosocial/core/admin.py
|
juanmafx/escrutiniosocial
|
6db28cece5b9860e0a522a70eb34b2887b8396d6
|
[
"BSD-3-Clause"
] | null | null | null |
escrutiniosocial/core/admin.py
|
juanmafx/escrutiniosocial
|
6db28cece5b9860e0a522a70eb34b2887b8396d6
|
[
"BSD-3-Clause"
] | null | null | null |
from django.contrib import admin
from core.models import Eleccion, Opcion
admin.register(Eleccion)
admin.register(Opcion)
| 15.625
| 40
| 0.816
| 17
| 125
| 6
| 0.588235
| 0.254902
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.112
| 125
| 7
| 41
| 17.857143
| 0.918919
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
332038c7b084687d08a31f2b53249472e4537a26
| 61
|
py
|
Python
|
app/controle/saida.py
|
jaddmn/utilitarios-para-fundacoes-profundas
|
749aea0fef6de62d1f18492a47697823ae014ac0
|
[
"MIT"
] | null | null | null |
app/controle/saida.py
|
jaddmn/utilitarios-para-fundacoes-profundas
|
749aea0fef6de62d1f18492a47697823ae014ac0
|
[
"MIT"
] | null | null | null |
app/controle/saida.py
|
jaddmn/utilitarios-para-fundacoes-profundas
|
749aea0fef6de62d1f18492a47697823ae014ac0
|
[
"MIT"
] | null | null | null |
class output:
def __init__():
super().__init__()
| 15.25
| 26
| 0.57377
| 6
| 61
| 4.5
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.278689
| 61
| 3
| 27
| 20.333333
| 0.613636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
33699993863e892f61f6880e06bc0c9d1be33565
| 300
|
py
|
Python
|
src/fastapi_aad_auth/__init__.py
|
Alex-Chekh/fastapi_aad_auth
|
4ad21fa76e7422da5d0799695bb547cd3e6224e0
|
[
"MIT"
] | 29
|
2020-09-04T08:39:42.000Z
|
2022-01-21T08:43:48.000Z
|
src/fastapi_aad_auth/__init__.py
|
Alex-Chekh/fastapi_aad_auth
|
4ad21fa76e7422da5d0799695bb547cd3e6224e0
|
[
"MIT"
] | 86
|
2020-07-30T20:51:19.000Z
|
2022-03-30T16:55:24.000Z
|
src/fastapi_aad_auth/__init__.py
|
Alex-Chekh/fastapi_aad_auth
|
4ad21fa76e7422da5d0799695bb547cd3e6224e0
|
[
"MIT"
] | 11
|
2020-10-16T07:17:16.000Z
|
2022-02-09T17:13:55.000Z
|
from fastapi_aad_auth.auth import Authenticator # noqa F401
from fastapi_aad_auth.config import Config # noqa F401
from fastapi_aad_auth._base.state import AuthenticationState # noqa F401
from fastapi_aad_auth._version import get_versions
__version__ = get_versions()['version']
del get_versions
| 37.5
| 73
| 0.836667
| 43
| 300
| 5.44186
| 0.372093
| 0.188034
| 0.239316
| 0.307692
| 0.333333
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0.033835
| 0.113333
| 300
| 7
| 74
| 42.857143
| 0.845865
| 0.096667
| 0
| 0
| 0
| 0
| 0.026217
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
6827a4b1d04b7345edcb7fe4f9ba44a507f47a7c
| 168
|
py
|
Python
|
utils/lr.py
|
icmlsubmission-spec/spec-gnn
|
450835af23dc8f95181dc42f38046bb51d77d05b
|
[
"MIT"
] | null | null | null |
utils/lr.py
|
icmlsubmission-spec/spec-gnn
|
450835af23dc8f95181dc42f38046bb51d77d05b
|
[
"MIT"
] | null | null | null |
utils/lr.py
|
icmlsubmission-spec/spec-gnn
|
450835af23dc8f95181dc42f38046bb51d77d05b
|
[
"MIT"
] | null | null | null |
def warm_up_lr(batch, num_batch_warm_up, init_lr, optimizer):
for params in optimizer.param_groups:
params['lr'] = batch**3 * init_lr / num_batch_warm_up**3
| 56
| 64
| 0.72619
| 29
| 168
| 3.827586
| 0.482759
| 0.162162
| 0.216216
| 0.252252
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014184
| 0.160714
| 168
| 3
| 64
| 56
| 0.77305
| 0
| 0
| 0
| 0
| 0
| 0.011834
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
683be215d102da8a26236f31a555a01901a616b0
| 169
|
py
|
Python
|
clutils/strategies/__init__.py
|
AndreaCossu/ContinualLearning_RecurrentNetworks
|
8cbc247f1f660f7acb94868696d128e538ad72f4
|
[
"MIT"
] | 2
|
2021-05-27T14:43:11.000Z
|
2021-05-28T00:47:05.000Z
|
clutils/strategies/__init__.py
|
AndreaCossu/ContinualLearning_RecurrentNetworks
|
8cbc247f1f660f7acb94868696d128e538ad72f4
|
[
"MIT"
] | null | null | null |
clutils/strategies/__init__.py
|
AndreaCossu/ContinualLearning_RecurrentNetworks
|
8cbc247f1f660f7acb94868696d128e538ad72f4
|
[
"MIT"
] | null | null | null |
from .ewc.EWC import EWC
from .mas.MAS import MAS
from .lwf.LWF import LWF
from .gem.GEM import GEM, AGEM
from .rehearsal.rehearsal import Rehearsal
from . import utils
| 24.142857
| 42
| 0.781065
| 29
| 169
| 4.551724
| 0.310345
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.147929
| 169
| 6
| 43
| 28.166667
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.