hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
119bf70de472ba3cab193d2bf98c10883d62420b
| 30
|
py
|
Python
|
build/lib/AccountManager/test doc.py
|
maartenelgar/Block_Fund_Trading
|
0ced0f4ac5bb8785ca1b75e55dee7df1db5030a8
|
[
"MIT"
] | null | null | null |
build/lib/AccountManager/test doc.py
|
maartenelgar/Block_Fund_Trading
|
0ced0f4ac5bb8785ca1b75e55dee7df1db5030a8
|
[
"MIT"
] | 4
|
2020-03-24T16:17:31.000Z
|
2021-06-01T22:48:07.000Z
|
build/lib/AccountManager/test doc.py
|
maartenelgar/Block_Fund_Trading
|
0ced0f4ac5bb8785ca1b75e55dee7df1db5030a8
|
[
"MIT"
] | null | null | null |
from Keys import keys
| 6
| 23
| 0.6
| 4
| 30
| 4.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.4
| 30
| 4
| 24
| 7.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
11ffa6aa23050da19bff683dc02e8ea752daf62c
| 161
|
py
|
Python
|
by-session/class-921/week2/input-output.py
|
amiraliakbari/sharif-mabani-python
|
5d14a08d165267fe71c28389ddbafe29af7078c5
|
[
"MIT"
] | 2
|
2015-04-29T20:59:35.000Z
|
2018-09-26T13:33:43.000Z
|
by-session/class-921/week2/input-output.py
|
amiraliakbari/sharif-mabani-python
|
5d14a08d165267fe71c28389ddbafe29af7078c5
|
[
"MIT"
] | null | null | null |
by-session/class-921/week2/input-output.py
|
amiraliakbari/sharif-mabani-python
|
5d14a08d165267fe71c28389ddbafe29af7078c5
|
[
"MIT"
] | null | null | null |
a = input("?")
print a
b = raw_input("?")
print b
c = raw_input("please input an integer")
print int(c)
d = raw_input("please input an float")
print float(d)
| 13.416667
| 40
| 0.658385
| 29
| 161
| 3.551724
| 0.413793
| 0.23301
| 0.271845
| 0.368932
| 0.407767
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.180124
| 161
| 11
| 41
| 14.636364
| 0.780303
| 0
| 0
| 0
| 0
| 0
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.5
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
ee9a1158727d2281c8375b20ff0b934706e0a84e
| 43
|
py
|
Python
|
__init__.py
|
manishrawat4u/plugin.video.bloimediaplayer
|
d561c095fd0862bbe21620daef80d0c5fde36ca5
|
[
"MIT"
] | 1
|
2019-01-27T23:49:49.000Z
|
2019-01-27T23:49:49.000Z
|
__init__.py
|
manishrawat4u/plugin.video.bloimediaplayer
|
d561c095fd0862bbe21620daef80d0c5fde36ca5
|
[
"MIT"
] | null | null | null |
__init__.py
|
manishrawat4u/plugin.video.bloimediaplayer
|
d561c095fd0862bbe21620daef80d0c5fde36ca5
|
[
"MIT"
] | null | null | null |
print('calling init from home directory')
| 14.333333
| 41
| 0.767442
| 6
| 43
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.139535
| 43
| 2
| 42
| 21.5
| 0.891892
| 0
| 0
| 0
| 0
| 0
| 0.761905
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
e107c95e09a7b7a3ece413c2cae93649bc1ece1d
| 254
|
py
|
Python
|
goodsongs/errors.py
|
italopaiva/goodsongs
|
90182e5b372e9736517989b74ca637e02b52a688
|
[
"BSD-3-Clause"
] | null | null | null |
goodsongs/errors.py
|
italopaiva/goodsongs
|
90182e5b372e9736517989b74ca637e02b52a688
|
[
"BSD-3-Clause"
] | null | null | null |
goodsongs/errors.py
|
italopaiva/goodsongs
|
90182e5b372e9736517989b74ca637e02b52a688
|
[
"BSD-3-Clause"
] | null | null | null |
"""Module to define specific errors."""
class NotFoundError(ValueError):
"""Raised when some application object could not be found."""
class InvalidRecordError(ValueError):
"""Raised when some application object did not passed validation."""
| 25.4
| 72
| 0.740157
| 29
| 254
| 6.482759
| 0.724138
| 0.170213
| 0.212766
| 0.255319
| 0.43617
| 0.43617
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153543
| 254
| 9
| 73
| 28.222222
| 0.874419
| 0.598425
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
e123321d7b022876f6d68813b71b9d34fc114dea
| 58,372
|
py
|
Python
|
tests/regressiontests/livre_journal.py
|
Starou/Colbert
|
a2b4abaebe7e0606f90c09b98c9267e76d3fd3fc
|
[
"BSD-3-Clause"
] | 1
|
2015-09-30T20:18:14.000Z
|
2015-09-30T20:18:14.000Z
|
tests/regressiontests/livre_journal.py
|
Starou/Colbert
|
a2b4abaebe7e0606f90c09b98c9267e76d3fd3fc
|
[
"BSD-3-Clause"
] | 6
|
2015-01-17T10:02:12.000Z
|
2020-05-09T15:19:55.000Z
|
tests/regressiontests/livre_journal.py
|
Starou/Colbert
|
a2b4abaebe7e0606f90c09b98c9267e76d3fd3fc
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import sys
import unittest
import codecs
import io
import datetime
import json
from colbert.livre_journal import livre_journal_to_list
from decimal import Decimal
CURRENT_DIR = os.path.dirname(__file__)
VERSION_INFO = sys.version_info
class LivreJournalTestCase(unittest.TestCase):
def test_check_livre_journal(self):
from colbert.livre_journal import check_livre_journal
livre_journal = codecs.open(os.path.join(CURRENT_DIR, "livre-journal.txt"),
mode="r", encoding="utf-8")
result = check_livre_journal(livre_journal)
self.assertEqual(result, [
['18/03/2011 - Frais de constitution de la société CFE Paris.',
'OK : débit = crédit (90.45).'],
['18/03/2011 - Frais de constitution de la société - Annonce légale.',
'OK : débit = crédit (99.00).'],
['31/03/2011 - Facture 2011-01 MyClient1',
'OK : débit = crédit (5 980.00).'],
['01/04/2011 - Résultat arrêté compte',
'OK : débit = crédit (48.00).'],
['02/04/2011 - Capital initial', 'OK : débit = crédit (1 500.00).'],
['04/04/2011 - Facture 2011-02 MyClient2',
'OK : débit = crédit (1 794.00).'],
['28/04/2011 - Cotisation Option PRO LCL',
'OK : débit = crédit (15.00).'],
['02/05/2011 - Abonnement LCL Access',
'OK : débit = crédit (3.00).'],
['11/06/2011 - BHV - Fournitures des bureau (Livres comptables).',
'OK : débit = crédit (25.65).'],
['15/06/2011 - Remise chèque XXXXXXX règlement facture 2011-02',
'OK : débit = crédit (2 088.00).'],
['05/07/2011 - Traitement mois de juin gérant.',
'OK : débit = crédit (3 000.00).'],
['08/08/2011 - Chèque XXXXXXY', 'OK : débit = crédit (393.00).'],
['02/09/2011 - Virement MyClient1 ZZZZZZZZZZZ',
'OK : débit = crédit (6 960.00).'],
['03/09/2011 - Abonnement LCL Access',
'OK : débit = crédit (3.00).'],
['28/09/2011 - Facture 2011-04 MyClient1',
'OK : débit = crédit (13 156.00).'],
['30/09/2011 - Solde des comptes de TVA du 01/03/2011 au 30/09/2011',
'OK : débit = crédit (1 274.00).'],
['06/10/2011 - Chèque WWWWWWW',
'OK : débit = crédit (1 240.00).'],
['01/11/2011 - Facture 2011-05 MyClient1',
'OK : débit = crédit (5 382.00).'],
['17/11/2011 - Chèque ZZZZZZZ', 'OK : débit = crédit (402.00).'],
['01/12/2011 - Abonnement LCL Access',
'OK : débit = crédit (3.00).'],
['01/12/2011 - Virement MyClient1 WWWWWWWWWW',
'OK : débit = crédit (21 576.00).'],
['01/12/2011 - Facture 2011-06 MyClient3',
'OK : débit = crédit (8 372.00).'],
['31/12/2011 - Solde des comptes de TVA du 01/10/2011 au 31/12/2011',
'OK : débit = crédit (3 038.00).'],
['31/12/2011 - Prestation MyClient1 décembre 2011',
'OK : débit = crédit (13 156.00).'],
['01/01/2012 - Prestation MyClient1 décembre 2011',
'OK : débit = crédit (13 156.00).'],
['03/01/2012 - Facture 2012-01 MyClient1',
'OK : débit = crédit (13 156.00).'],
["01/02/2012 - Restaurant La Tour d'argent",
'OK : débit = crédit (49.80).']
])
def test_check_ecriture_livre_journal(self):
from colbert.livre_journal import check_ecriture_livre_journal
ecriture = {
'date': datetime.date(2011, 3, 18),
'numero_ligne_debut': 13,
'numero_ligne_fin': 17,
'intitule': [' Frais de constitution de la société CFE Paris.'],
'ecritures': [
{
'credit': Decimal('0.00'),
'debit': Decimal('80.00'),
'nom_compte': "Achats - Frais d'actes et de contentieux",
'numero_compte_credit': '',
'numero_compte_debit': '6227'
},
{
'credit': Decimal('0.00'),
'debit': Decimal('10.45'),
'nom_compte': 'T.V.A. déductible sur autres biens et services',
'numero_compte_credit': '',
'numero_compte_debit': '44566'
},
{
'credit': Decimal('90.45'),
'debit': Decimal('0.00'),
'nom_compte': 'Associés - Comptes courants',
'numero_compte_credit': '455',
'numero_compte_debit': ''
}
],
}
self.assertEqual(
check_ecriture_livre_journal(ecriture),
['18/03/2011 - Frais de constitution de la société CFE Paris.', 'OK : débit = crédit (90.45).']
)
# Erreur dans la colonne du compte.
ecriture = {
'date': datetime.date(2011, 3, 18),
'numero_ligne_debut': 13,
'numero_ligne_fin': 17,
'intitule': [' Frais de constitution de la société CFE Paris.'],
'ecritures': [
{
'credit': Decimal('0.00'),
'debit': Decimal('80.00'),
'nom_compte': "Achats - Frais d'actes et de contentieux",
'numero_compte_credit': '',
'numero_compte_debit': '6227'
},
{
'credit': Decimal('0.00'),
'debit': Decimal('10.45'),
'nom_compte': 'T.V.A. déductible sur autres biens et services',
'numero_compte_credit': '',
'numero_compte_debit': '44566'
},
{
'credit': Decimal('90.45'),
'debit': Decimal('0.00'),
'nom_compte': 'Associés - Comptes courants',
'numero_compte_credit': '',
'numero_compte_debit': '455'
}
],
}
self.assertEqual(
check_ecriture_livre_journal(ecriture),
['18/03/2011 - Frais de constitution de la société CFE Paris.',
'ERREUR : incohérence entre les colonnes numéro de compte et montant']
)
ecriture = {
'date': datetime.date(2011, 3, 18),
'numero_ligne_debut': 13,
'numero_ligne_fin': 17,
'intitule': [' Frais de constitution de la société CFE Paris.'],
'ecritures': [
{
'credit': Decimal('0.00'),
'debit': Decimal('80.00'),
'nom_compte': "Achats - Frais d'actes et de contentieux",
'numero_compte_credit': '6227',
'numero_compte_debit': ''
},
{
'credit': Decimal('0.00'),
'debit': Decimal('10.45'),
'nom_compte': 'T.V.A. déductible sur autres biens et services',
'numero_compte_credit': '',
'numero_compte_debit': '44566'
},
{
'credit': Decimal('90.45'),
'debit': Decimal('0.00'),
'nom_compte': 'Associés - Comptes courants',
'numero_compte_credit': '455',
'numero_compte_debit': ''
}
],
}
self.assertEqual(
check_ecriture_livre_journal(ecriture),
['18/03/2011 - Frais de constitution de la société CFE Paris.',
'ERREUR : incohérence entre les colonnes numéro de compte et montant']
)
def test_ecritures_de_cloture(self):
from colbert.livre_journal import ecritures_de_cloture
balance_des_comptes = codecs.open(os.path.join(CURRENT_DIR, "balance_des_comptes-2011.json"),
mode="r", encoding="utf-8")
edc = ecritures_de_cloture(json.loads(balance_des_comptes.read()))
self.maxDiff = None
self.assertEqual(
edc,
[{'date': datetime.date(2011, 12, 31),
'ecritures': [{'credit': Decimal('0.00'),
'debit': Decimal('40000.00'),
'nom_compte': 'Produits - prestations de services',
'numero_compte_credit': '',
'numero_compte_debit': '706'},
{'credit': Decimal('0.00'),
'debit': Decimal('0.34'),
'nom_compte': 'Produits divers de gestion courante',
'numero_compte_credit': '',
'numero_compte_debit': '758'},
{'credit': Decimal('40000.34'),
'debit': Decimal('0.0'),
'nom_compte': 'Regroupement des comptes de produits',
'numero_compte_credit': '127',
'numero_compte_debit': ''}],
'intitule': ['Ecritures de clôture des comptes.']},
{'date': datetime.date(2011, 12, 31),
'ecritures': [{'credit': Decimal('0.0'),
'debit': Decimal('4048.44'),
'nom_compte': 'Regroupement des comptes de charges',
'numero_compte_credit': '',
'numero_compte_debit': '126'},
{'credit': Decimal('21.44'),
'debit': Decimal('0.00'),
'nom_compte': 'Achats - Fournitures de bureau',
'numero_compte_credit': '60225',
'numero_compte_debit': ''},
{'credit': Decimal('160.00'),
'debit': Decimal('0.00'),
'nom_compte': "Achats - Frais d'actes et de contentieux",
'numero_compte_credit': '6227',
'numero_compte_debit': ''},
{'credit': Decimal('72.00'),
'debit': Decimal('0.00'),
'nom_compte': 'Autres frais de commission sur prestations de services',
'numero_compte_credit': '6278-LCL',
'numero_compte_debit': ''},
{'credit': Decimal('3000.00'),
'debit': Decimal('0.00'),
'nom_compte': 'Charges - Salaires et appointements',
'numero_compte_credit': '6411',
'numero_compte_debit': ''},
{'credit': Decimal('393.00'),
'debit': Decimal('0.00'),
'nom_compte': 'Charges - cotisations RSI',
'numero_compte_credit': '6411-RSI',
'numero_compte_debit': ''},
{'credit': Decimal('161.80'),
'debit': Decimal('0.00'),
'nom_compte': 'Charges - cotisations URSSAF - Allocations familliales',
'numero_compte_credit': '6411-URSF1',
'numero_compte_debit': ''},
{'credit': Decimal('153.31'),
'debit': Decimal('0.00'),
'nom_compte': 'Charges - cotisations URSSAF - CSG/RDS déductible',
'numero_compte_credit': '6411-URSF2',
'numero_compte_debit': ''},
{'credit': Decimal('86.89'),
'debit': Decimal('0.00'),
'nom_compte': 'Charges - cotisations URSSAF - CSG/RDS non-déductible',
'numero_compte_credit': '6411-URSF3',
'numero_compte_debit': ''}],
'intitule': ['Ecritures de clôture des comptes.']},
{'date': datetime.date(2011, 12, 31),
'ecritures': [{'credit': Decimal('0.0'),
'debit': Decimal('40000.34'),
'nom_compte': 'Regroupement des comptes de produits',
'numero_compte_credit': '',
'numero_compte_debit': '127'},
{'credit': Decimal('4048.44'),
'debit': Decimal('0.0'),
'nom_compte': 'Regroupement des comptes de charges',
'numero_compte_credit': '126',
'numero_compte_debit': ''},
{'credit': Decimal('35951.90'),
'debit': Decimal('0.0'),
'nom_compte': "résultat de l'exercice (bénéfice)",
'numero_compte_credit': '120',
'numero_compte_debit': ''}],
'intitule': ["Enregistrement du résultat net de l'exercice"]}]
)
def test_ecritures_to_livre_journal(self):
from colbert.livre_journal import ecritures_to_livre_journal
ecritures = codecs.open(os.path.join(CURRENT_DIR, "ecritures_de_cloture-2011.json"),
mode="r", encoding="utf-8")
output = io.StringIO()
ecritures_to_livre_journal(json.loads(ecritures.read()), output)
self.maxDiff = None
self.assertEqual(output.getvalue(),
"""+---------------------------------------------------------------------------------------------------------------------------------------------------------+
| Ecritures pour le Livre-journal |
+=============+=================+=================+==============================================================+=================+=================+====+
|| 31/12/2011 || || || Ecritures de clôture des comptes. || || | |
|| || 706 || || Produits - prestations de services || 40 000.00 || | |
|| || 758 || || Produits divers de gestion courante || 0.34 || | |
|| || || 127 || Regroupement des comptes de produits || || 40 000.34 | |
+-------------+-----------------+-----------------+--------------------------------------------------------------+-----------------+-----------------+----+
|| 31/12/2011 || || || Ecritures de clôture des comptes. || || | |
|| || 126 || || Regroupement des comptes de charges || 4 048.44 || | |
|| || || 60225 || Achats - Fournitures de bureau || || 21.44 | |
|| || || 6227 || Achats - Frais d'actes et de contentieux || || 160.00 | |
|| || || 6278-LCL || Autres frais de commission sur prestations de services || || 72.00 | |
|| || || 6411 || Charges - Salaires et appointements || || 3 000.00 | |
|| || || 6411-RSI || Charges - cotisations RSI || || 393.00 | |
|| || || 6411-URSF1 || Charges - cotisations URSSAF - Allocations familliales || || 161.80 | |
|| || || 6411-URSF2 || Charges - cotisations URSSAF - CSG/RDS déductible || || 153.31 | |
|| || || 6411-URSF3 || Charges - cotisations URSSAF - CSG/RDS non-déductible || || 86.89 | |
+-------------+-----------------+-----------------+--------------------------------------------------------------+-----------------+-----------------+----+
|| 31/12/2011 || || || Enregistrement du résultat net de l'exercice || || | |
|| || 127 || || Regroupement des comptes de produits || 40 000.34 || | |
|| || || 126 || Regroupement des comptes de charges || || 4 048.44 | |
|| || || 120 || résultat de l'exercice (bénéfice) || || 35 951.90 | |
+-------------+-----------------+-----------------+--------------------------------------------------------------+-----------------+-----------------+----+
""")
def test_get_solde_compte(self):
from colbert.livre_journal import get_solde_compte
livre_journal = codecs.open(os.path.join(CURRENT_DIR, "livre-journal.txt"),
mode="r", encoding="utf-8")
livre_journal_list = livre_journal_to_list(livre_journal)
date_debut = datetime.date(2011, 1, 1)
date_fin = datetime.date(2011, 12, 31)
debit, credit = get_solde_compte(livre_journal_list, "512", date_debut, date_fin)
# TODO verifier.
self.assertEqual(debit, Decimal("22679.35"))
self.assertEqual(credit, Decimal("0.00"))
def test_livre_journal_to_list(self):
from colbert.livre_journal import RX_DATE_INTITULE, RX_SUITE_INTITULE, RX_ECRITURE
self.maxDiff = None
# Première ligne d'écriture.
s = "|| 31/03/2011 || || || Facture 2011-01 AdenClassifieds || || | | "
if VERSION_INFO >= (2, 7):
self.assertRegex(s, RX_DATE_INTITULE)
m = RX_DATE_INTITULE.match(s)
self.assertEqual(m.groupdict(), {'intitule': ' Facture 2011-01 AdenClassifieds ',
'credit': ' ',
'debit': ' ',
'date': '31/03/2011',
'numero_compte_credit': ' ',
'numero_compte_debit': ' ',
'checked': ' '})
# Ligne supplementaire d'intitulé.
s = "|| || || || suite et fin de l'intitulé || || | |"
if VERSION_INFO >= (2, 7):
self.assertRegex(s, RX_SUITE_INTITULE)
m = RX_SUITE_INTITULE.match(s)
# Ecriture.
s = "|| || 4111-clie || || Clients - ventes de biens ou prestations de services || 8 372 || | |"
if VERSION_INFO >= (2, 7):
self.assertRegex(s, RX_ECRITURE)
m = RX_ECRITURE.match(s)
self.assertEqual(m.groupdict(), {'nom_compte': 'Clients - ventes de biens ou prestations de services ',
'credit': ' ',
'debit': ' 8 372 ',
'date': ' ',
'numero_compte_credit': ' ',
'numero_compte_debit': ' 4111-clie ',
'checked': ' '})
# Conversion du livre journal.
livre_journal = codecs.open(os.path.join(CURRENT_DIR, "livre-journal.txt"),
mode="r", encoding="utf-8")
livre_journal_list = livre_journal_to_list(livre_journal)
self.assertEqual(livre_journal_list, [
{'date': datetime.date(2011, 3, 18),
'ecritures': [{'credit': Decimal('0.00'),
'debit': Decimal('80.00'),
'nom_compte': "Achats - Frais d'actes et de contentieux",
'numero_compte_credit': '',
'numero_compte_debit': '6227'},
{'credit': Decimal('0.00'),
'debit': Decimal('10.45'),
'nom_compte': 'T.V.A. déductible sur autres biens et services',
'numero_compte_credit': '',
'numero_compte_debit': '44566'},
{'credit': Decimal('90.45'),
'debit': Decimal('0.00'),
'nom_compte': 'Associés - Comptes courants',
'numero_compte_credit': '455',
'numero_compte_debit': ''}],
'intitule': [' Frais de constitution de la société CFE Paris.'],
'numero_ligne_debut': 13,
'numero_ligne_fin': 17},
{'date': datetime.date(2011, 3, 18),
'ecritures': [{'credit': Decimal('0.00'),
'debit': Decimal('80.00'),
'nom_compte': "Achats - Frais d'actes et de contentieux MONTANT à vérifier",
'numero_compte_credit': '',
'numero_compte_debit': '6227'},
{'credit': Decimal('0.00'),
'debit': Decimal('19.00'),
'nom_compte': 'T.V.A. déductible sur autres biens et services',
'numero_compte_credit': '',
'numero_compte_debit': '44566'},
{'credit': Decimal('99.00'),
'debit': Decimal('0.00'),
'nom_compte': 'Associés - Comptes courants',
'numero_compte_credit': '455',
'numero_compte_debit': ''}],
'intitule': [' Frais de constitution de la société - Annonce légale.'],
'numero_ligne_debut': 18,
'numero_ligne_fin': 22},
{'date': datetime.date(2011, 3, 31),
'ecritures': [{'credit': Decimal('0.00'),
'debit': Decimal('5980.00'),
'nom_compte': 'Clients - ventes de biens ou prestations de services',
'numero_compte_credit': '',
'numero_compte_debit': '4111-CL1'},
{'credit': Decimal('5000.00'),
'debit': Decimal('0.00'),
'nom_compte': 'Produits - prestations de services',
'numero_compte_credit': '706',
'numero_compte_debit': ''},
{'credit': Decimal('980.00'),
'debit': Decimal('0.00'),
'nom_compte': 'Taxes sur le CA sur factures à établir',
'numero_compte_credit': '44587',
'numero_compte_debit': ''}],
'intitule': [' Facture 2011-01 MyClient1',
' Prestation MyClient1 mars 2011'],
'numero_ligne_debut': 23,
'numero_ligne_fin': 28},
{'date': datetime.date(2011, 4, 1),
'ecritures': [{'credit': Decimal('0.00'),
'debit': Decimal('48.00'),
'nom_compte': 'Autres frais de commission sur prestations de services',
'numero_compte_credit': '',
'numero_compte_debit': '6278-LCL'},
{'credit': Decimal('48.00'),
'debit': Decimal('0.00'),
'nom_compte': 'Banques',
'numero_compte_credit': '512',
'numero_compte_debit': ''}],
'intitule': [' Résultat arrêté compte'],
'numero_ligne_debut': 31,
'numero_ligne_fin': 34},
{'date': datetime.date(2011, 4, 2),
'ecritures': [{'credit': Decimal('0.00'),
'debit': Decimal('1500.00'),
'nom_compte': 'Banques',
'numero_compte_credit': '',
'numero_compte_debit': '512'},
{'credit': Decimal('1500.00'),
'debit': Decimal('0.00'),
'nom_compte': "Capital et compte de l'exploitant",
'numero_compte_credit': '100',
'numero_compte_debit': ''}],
'intitule': [' Capital initial',
' Dépôt de 1500 € par Stanislas Guerra',
' au LCL Ledru Rollin'],
'numero_ligne_debut': 35,
'numero_ligne_fin': 40},
{'date': datetime.date(2011, 4, 4),
'ecritures': [{'credit': Decimal('0.00'),
'debit': Decimal('1794.00'),
'nom_compte': 'Clients - ventes de biens ou prestations de services',
'numero_compte_credit': '',
'numero_compte_debit': '4111-CL2'},
{'credit': Decimal('1500.00'),
'debit': Decimal('0.00'),
'nom_compte': 'Produits - prestations de services',
'numero_compte_credit': '706',
'numero_compte_debit': ''},
{'credit': Decimal('294.00'),
'debit': Decimal('0.00'),
'nom_compte': 'Taxes sur le CA sur factures à établir',
'numero_compte_credit': '44587',
'numero_compte_debit': ''}],
'intitule': [' Facture 2011-02 MyClient2', ' Prestation MyClient2'],
'numero_ligne_debut': 41,
'numero_ligne_fin': 46},
{'date': datetime.date(2011, 4, 28),
'ecritures': [{'credit': Decimal('0.00'),
'debit': Decimal('15.00'),
'nom_compte': 'Autres frais de commission sur prestations de services',
'numero_compte_credit': '',
'numero_compte_debit': '6278-LCL'},
{'credit': Decimal('15.00'),
'debit': Decimal('0.00'),
'nom_compte': 'Banques',
'numero_compte_credit': '512',
'numero_compte_debit': ''}],
'intitule': [' Cotisation Option PRO LCL'],
'numero_ligne_debut': 47,
'numero_ligne_fin': 50},
{'date': datetime.date(2011, 5, 2),
'ecritures': [{'credit': Decimal('0.00'),
'debit': Decimal('3.00'),
'nom_compte': 'Autres frais de commission sur prestations de services',
'numero_compte_credit': '',
'numero_compte_debit': '6278-LCL'},
{'credit': Decimal('3.00'),
'debit': Decimal('0.00'),
'nom_compte': 'Banques',
'numero_compte_credit': '512',
'numero_compte_debit': ''}],
'intitule': [' Abonnement LCL Access'],
'numero_ligne_debut': 53,
'numero_ligne_fin': 56},
{'date': datetime.date(2011, 6, 11),
'ecritures': [{'credit': Decimal('0.00'),
'debit': Decimal('21.44'),
'nom_compte': 'Achats - Fournitures de bureau',
'numero_compte_credit': '',
'numero_compte_debit': '60225'},
{'credit': Decimal('0.00'),
'debit': Decimal('4.21'),
'nom_compte': 'T.V.A. déductible sur autres biens et services',
'numero_compte_credit': '',
'numero_compte_debit': '44566'},
{'credit': Decimal('25.65'),
'debit': Decimal('0.00'),
'nom_compte': 'Banques',
'numero_compte_credit': '512',
'numero_compte_debit': ''}],
'intitule': [' BHV - Fournitures des bureau (Livres comptables).'],
'numero_ligne_debut': 59,
'numero_ligne_fin': 63},
{'date': datetime.date(2011, 6, 15),
'ecritures': [{'credit': Decimal('0.00'),
'debit': Decimal('1794.00'),
'nom_compte': 'Banques',
'numero_compte_credit': '',
'numero_compte_debit': '512'},
{'credit': Decimal('0.00'),
'debit': Decimal('294.00'),
'nom_compte': 'Taxes sur le CA sur factures à établir',
'numero_compte_credit': '',
'numero_compte_debit': '44587'},
{'credit': Decimal('1794.00'),
'debit': Decimal('0.00'),
'nom_compte': 'Clients - ventes de biens ou prestations de services',
'numero_compte_credit': '4111-CL2',
'numero_compte_debit': ''},
{'credit': Decimal('294.00'),
'debit': Decimal('0.00'),
'nom_compte': 'T.V.A. Collectée',
'numero_compte_credit': '44571',
'numero_compte_debit': ''}],
'intitule': [' Remise chèque XXXXXXX règlement facture 2011-02'],
'numero_ligne_debut': 64,
'numero_ligne_fin': 69},
{'date': datetime.date(2011, 7, 5),
'ecritures': [{'credit': Decimal('0.00'),
'debit': Decimal('3000.00'),
'nom_compte': 'Charges - Salaires et appointements',
'numero_compte_credit': '',
'numero_compte_debit': '6411'},
{'credit': Decimal('3000.00'),
'debit': Decimal('0.00'),
'nom_compte': 'Banques',
'numero_compte_credit': '512',
'numero_compte_debit': ''}],
'intitule': [' Traitement mois de juin gérant.'],
'numero_ligne_debut': 72,
'numero_ligne_fin': 75},
{'date': datetime.date(2011, 8, 8),
'ecritures': [{'credit': Decimal('0.00'),
'debit': Decimal('393.00'),
'nom_compte': 'Charges - cotisations RSI',
'numero_compte_credit': '',
'numero_compte_debit': '6411-RSI'},
{'credit': Decimal('393.00'),
'debit': Decimal('0.00'),
'nom_compte': 'Banques',
'numero_compte_credit': '512',
'numero_compte_debit': ''}],
'intitule': [' Chèque XXXXXXY',
' Cotisation trimestrielle RSI/Prévadiès.'],
'numero_ligne_debut': 78,
'numero_ligne_fin': 82},
{'date': datetime.date(2011, 9, 2),
'ecritures': [{'credit': Decimal('0.00'),
'debit': Decimal('5980.00'),
'nom_compte': 'Banques',
'numero_compte_credit': '',
'numero_compte_debit': '512'},
{'credit': Decimal('0.00'),
'debit': Decimal('980.00'),
'nom_compte': 'Taxes sur le CA sur factures à établir',
'numero_compte_credit': '',
'numero_compte_debit': '44587'},
{'credit': Decimal('5980.00'),
'debit': Decimal('0.00'),
'nom_compte': 'Clients - ventes de biens ou prestations de services',
'numero_compte_credit': '4111-CL1',
'numero_compte_debit': ''},
{'credit': Decimal('980.00'),
'debit': Decimal('0.00'),
'nom_compte': 'T.V.A. Collectée',
'numero_compte_credit': '44571',
'numero_compte_debit': ''}],
'intitule': [' Virement MyClient1 ZZZZZZZZZZZ', ' Facture 2011-01'],
'numero_ligne_debut': 85,
'numero_ligne_fin': 91},
{'date': datetime.date(2011, 9, 3),
'ecritures': [{'credit': Decimal('0.00'),
'debit': Decimal('3.00'),
'nom_compte': 'Autres frais de commission sur prestations de services',
'numero_compte_credit': '',
'numero_compte_debit': '6278-LCL'},
{'credit': Decimal('3.00'),
'debit': Decimal('0.00'),
'nom_compte': 'Banques',
'numero_compte_credit': '512',
'numero_compte_debit': ''}],
'intitule': [' Abonnement LCL Access'],
'numero_ligne_debut': 92,
'numero_ligne_fin': 95},
{'date': datetime.date(2011, 9, 28),
'ecritures': [{'credit': Decimal('0.00'),
'debit': Decimal('13156.00'),
'nom_compte': 'Clients - ventes de biens ou prestations de services',
'numero_compte_credit': '',
'numero_compte_debit': '4111-CL1'},
{'credit': Decimal('11000.00'),
'debit': Decimal('0.00'),
'nom_compte': 'Produits - prestations de services',
'numero_compte_credit': '706',
'numero_compte_debit': ''},
{'credit': Decimal('2156.00'),
'debit': Decimal('0.00'),
'nom_compte': 'Taxes sur le CA sur factures à établir',
'numero_compte_credit': '44587',
'numero_compte_debit': ''}],
'intitule': [' Facture 2011-04 MyClient1', ' Prestation aout 2011'],
'numero_ligne_debut': 96,
'numero_ligne_fin': 101},
{'date': datetime.date(2011, 9, 30),
'ecritures': [{'credit': Decimal('0.00'),
'debit': Decimal('1274.00'),
'nom_compte': 'TVA collecté',
'numero_compte_credit': '',
'numero_compte_debit': '44571'},
{'credit': Decimal('33.66'),
'debit': Decimal('0.00'),
'nom_compte': 'TVA déductible sur autres biens et services',
'numero_compte_credit': '44566',
'numero_compte_debit': ''},
{'credit': Decimal('1240.00'),
'debit': Decimal('0.00'),
'nom_compte': 'TVA à décaisser',
'numero_compte_credit': '44551',
'numero_compte_debit': ''},
{'credit': Decimal('0.34'),
'debit': Decimal('0.00'),
'nom_compte': 'Produits divers de gestion courante',
'numero_compte_credit': '758',
'numero_compte_debit': ''}],
'intitule': [' Solde des comptes de TVA du 01/03/2011 au 30/09/2011'],
'numero_ligne_debut': 104,
'numero_ligne_fin': 109},
{'date': datetime.date(2011, 10, 6),
'ecritures': [{'credit': Decimal('0.00'),
'debit': Decimal('1240.00'),
'nom_compte': 'TVA à décaisser',
'numero_compte_credit': '',
'numero_compte_debit': '44551'},
{'credit': Decimal('1240.00'),
'debit': Decimal('0.00'),
'nom_compte': 'Banques',
'numero_compte_credit': '512',
'numero_compte_debit': ''}],
'intitule': [' Chèque WWWWWWW',
' Règlement de la TVA trimestrielle'],
'numero_ligne_debut': 112,
'numero_ligne_fin': 116},
{'date': datetime.date(2011, 11, 1),
'ecritures': [{'credit': Decimal('0.00'),
'debit': Decimal('5382.00'),
'nom_compte': 'Clients - ventes de biens ou prestations de services',
'numero_compte_credit': '',
'numero_compte_debit': '4111-CL1'},
{'credit': Decimal('4500.00'),
'debit': Decimal('0.00'),
'nom_compte': 'Produits - prestations de services',
'numero_compte_credit': '706',
'numero_compte_debit': ''},
{'credit': Decimal('882.00'),
'debit': Decimal('0.00'),
'nom_compte': 'Taxes sur le CA sur factures à établir',
'numero_compte_credit': '44587',
'numero_compte_debit': ''}],
'intitule': [' Facture 2011-05 MyClient1',
' Prestation septembre 2011'],
'numero_ligne_debut': 119,
'numero_ligne_fin': 124},
{'date': datetime.date(2011, 11, 17),
'ecritures': [{'credit': Decimal('0.00'),
'debit': Decimal('161.80'),
'nom_compte': 'Charges - cotisations URSSAF - Allocations familliales',
'numero_compte_credit': '',
'numero_compte_debit': '6411-URSF1'},
{'credit': Decimal('0.00'),
'debit': Decimal('153.31'),
'nom_compte': 'Charges - cotisations URSSAF - CSG/RDS déductible',
'numero_compte_credit': '',
'numero_compte_debit': '6411-URSF2'},
{'credit': Decimal('0.00'),
'debit': Decimal('86.89'),
'nom_compte': 'Charges - cotisations URSSAF - CSG/RDS non-déductible',
'numero_compte_credit': '',
'numero_compte_debit': '6411-URSF3'},
{'credit': Decimal('402.00'),
'debit': Decimal('0.00'),
'nom_compte': 'Banques',
'numero_compte_credit': '512',
'numero_compte_debit': ''}],
'intitule': [' Chèque ZZZZZZZ',
' Cotisation sociales Urssaf 4ème trimestre.'],
'numero_ligne_debut': 125,
'numero_ligne_fin': 131},
{'date': datetime.date(2011, 12, 1),
'ecritures': [{'credit': Decimal('0.00'),
'debit': Decimal('3.00'),
'nom_compte': 'Autres frais de commission sur prestations de services',
'numero_compte_credit': '',
'numero_compte_debit': '6278-LCL'},
{'credit': Decimal('3.00'),
'debit': Decimal('0.00'),
'nom_compte': 'Banques',
'numero_compte_credit': '512',
'numero_compte_debit': ''}],
'intitule': [' Abonnement LCL Access'],
'numero_ligne_debut': 134,
'numero_ligne_fin': 137},
{'date': datetime.date(2011, 12, 1),
'ecritures': [{'credit': Decimal('0.00'),
'debit': Decimal('18538.00'),
'nom_compte': 'Banques',
'numero_compte_credit': '',
'numero_compte_debit': '512'},
{'credit': Decimal('0.00'),
'debit': Decimal('2156.00'),
'nom_compte': 'Taxes sur le CA sur factures à établir',
'numero_compte_credit': '',
'numero_compte_debit': '44587'},
{'credit': Decimal('0.00'),
'debit': Decimal('882.00'),
'nom_compte': 'Taxes sur le CA sur factures à établir',
'numero_compte_credit': '',
'numero_compte_debit': '44587'},
{'credit': Decimal('18538.00'),
'debit': Decimal('0.00'),
'nom_compte': 'Clients - ventes de biens ou prestations de services',
'numero_compte_credit': '4111-CL1',
'numero_compte_debit': ''},
{'credit': Decimal('2156.00'),
'debit': Decimal('0.00'),
'nom_compte': 'T.V.A. Collectée',
'numero_compte_credit': '44571',
'numero_compte_debit': ''},
{'credit': Decimal('882.00'),
'debit': Decimal('0.00'),
'nom_compte': 'T.V.A. Collectée',
'numero_compte_credit': '44571',
'numero_compte_debit': ''}],
'intitule': [' Virement MyClient1 WWWWWWWWWW',
' Facture 2011-04, 2011-05'],
'numero_ligne_debut': 138,
'numero_ligne_fin': 146},
{'date': datetime.date(2011, 12, 1),
'ecritures': [{'credit': Decimal('0.00'),
'debit': Decimal('8372.00'),
'nom_compte': 'Clients - ventes de biens ou prestations de services',
'numero_compte_credit': '',
'numero_compte_debit': '4111-CL3'},
{'credit': Decimal('7000.00'),
'debit': Decimal('0.00'),
'nom_compte': 'Produits - prestations de services',
'numero_compte_credit': '706',
'numero_compte_debit': ''},
{'credit': Decimal('1372.00'),
'debit': Decimal('0.00'),
'nom_compte': 'Taxes sur le CA sur factures à établir',
'numero_compte_credit': '44587',
'numero_compte_debit': ''}],
'intitule': [' Facture 2011-06 MyClient3'],
'numero_ligne_debut': 147,
'numero_ligne_fin': 151},
{'date': datetime.date(2011, 12, 31),
'ecritures': [{'credit': Decimal('0.00'),
'debit': Decimal('3038.00'),
'nom_compte': 'TVA collecté',
'numero_compte_credit': '',
'numero_compte_debit': '44571'},
{'credit': Decimal('3038.00'),
'debit': Decimal('0.00'),
'nom_compte': 'TVA à décaisser',
'numero_compte_credit': '44551',
'numero_compte_debit': ''}],
'intitule': [' Solde des comptes de TVA du 01/10/2011 au 31/12/2011'],
'numero_ligne_debut': 154,
'numero_ligne_fin': 157},
{'date': datetime.date(2011, 12, 31),
'ecritures': [{'credit': Decimal('0.00'),
'debit': Decimal('13156.00'),
'nom_compte': 'Clients - Factures à établir',
'numero_compte_credit': '',
'numero_compte_debit': '4181'},
{'credit': Decimal('11000.00'),
'debit': Decimal('0.00'),
'nom_compte': 'Produits - prestations de services',
'numero_compte_credit': '706',
'numero_compte_debit': ''},
{'credit': Decimal('2156.00'),
'debit': Decimal('0.00'),
'nom_compte': 'Taxes sur le CA sur factures à établir',
'numero_compte_credit': '44587',
'numero_compte_debit': ''}],
'intitule': [' Prestation MyClient1 décembre 2011'],
'numero_ligne_debut': 160,
'numero_ligne_fin': 164},
{'date': datetime.date(2012, 1, 1),
'ecritures': [{'credit': Decimal('0.00'),
'debit': Decimal('11000.00'),
'nom_compte': 'Produits - prestations de services',
'numero_compte_credit': '',
'numero_compte_debit': '706'},
{'credit': Decimal('0.00'),
'debit': Decimal('2156.00'),
'nom_compte': 'Taxes sur le CA sur factures à établir',
'numero_compte_credit': '',
'numero_compte_debit': '44587'},
{'credit': Decimal('13156.00'),
'debit': Decimal('0.00'),
'nom_compte': 'Clients - Factures à établir',
'numero_compte_credit': '4181',
'numero_compte_debit': ''}],
'intitule': [' Prestation MyClient1 décembre 2011'],
'numero_ligne_debut': 169,
'numero_ligne_fin': 173},
{'date': datetime.date(2012, 1, 3),
'ecritures': [{'credit': Decimal('0.00'),
'debit': Decimal('13156.00'),
'nom_compte': 'Clients - ventes de biens ou prestations de services',
'numero_compte_credit': '',
'numero_compte_debit': '4111-CL1'},
{'credit': Decimal('11000.00'),
'debit': Decimal('0.00'),
'nom_compte': 'Produits - prestations de services',
'numero_compte_credit': '706',
'numero_compte_debit': ''},
{'credit': Decimal('2156.00'),
'debit': Decimal('0.00'),
'nom_compte': 'Taxes sur le CA sur factures à établir',
'numero_compte_credit': '44587',
'numero_compte_debit': ''}],
'intitule': [' Facture 2012-01 MyClient1',
' Prestation décembre 2011'],
'numero_ligne_debut': 176,
'numero_ligne_fin': 181},
{'date': datetime.date(2012, 2, 1),
'ecritures': [{'credit': Decimal('0.00'),
'debit': Decimal('46.02'),
'nom_compte': 'Charges - Réceptions',
'numero_compte_credit': '',
'numero_compte_debit': '6257'},
{'credit': Decimal('0.00'),
'debit': Decimal('3.78'),
'nom_compte': 'T.V.A. déductible sur autres biens et services',
'numero_compte_credit': '',
'numero_compte_debit': '44566'},
{'credit': Decimal('49.80'),
'debit': Decimal('0.00'),
'nom_compte': 'Banques',
'numero_compte_credit': '512',
'numero_compte_debit': ''}],
'intitule': [" Restaurant La Tour d'argent",
" Déjeuner d'affaire avec Steve Jobs",
' 0.88€ TVA 19.6% ; 2.90€ TVA 7.0%'],
'numero_ligne_debut': 184,
'numero_ligne_fin': 190}
])
def test_update_ecriture(self):
from colbert.livre_journal import update_ecriture
ecriture = {
'date': "12/11/2014",
'intitule': "Restaurant La Tour d'argent Déjeuner d'affaire avec Vladimir P.",
'ecritures': [
{'credit': '0.00',
'debit': '49.80',
'nom_compte': 'Charges - Réceptions',
'numero_compte_credit': '',
'numero_compte_debit': '6257'},
{'credit': '49.80',
'debit': '0.00',
'nom_compte': 'Banques',
'numero_compte_credit': '512',
'numero_compte_debit': ''}
]
}
update_ecriture(ecriture, date="23/12/2014", montants=["33.40"])
self.assertEqual(ecriture, {
'date': "23/12/2014",
'intitule': "Restaurant La Tour d'argent Déjeuner d'affaire avec Vladimir P.",
'ecritures': [
{'credit': '0.00',
'debit': '33.40',
'nom_compte': 'Charges - Réceptions',
'numero_compte_credit': '',
'numero_compte_debit': '6257'},
{'credit': '33.40',
'debit': '0.00',
'nom_compte': 'Banques',
'numero_compte_credit': '512',
'numero_compte_debit': ''}
]
})
# Avec plusieurs montants.
ecriture = {
'date': "12/11/2014",
'intitule': "Restaurant La Tour d'argent Déjeuner d'affaire avec Vladimir P.",
'ecritures': [
{'credit': '0.00',
'debit': '46.02',
'nom_compte': 'Charges - Réceptions',
'numero_compte_credit': '',
'numero_compte_debit': '6257'},
{'credit': '0.00',
'debit': '3.78',
'nom_compte': 'T.V.A. déductible sur autres biens et services',
'numero_compte_credit': '',
'numero_compte_debit': '44566'},
{'credit': '49.80',
'debit': '0.00',
'nom_compte': 'Banques',
'numero_compte_credit': '512',
'numero_compte_debit': ''}
]
}
update_ecriture(ecriture, date="23/12/2014", montants=["30.10", "3.30", "33.40"])
self.assertEqual(ecriture, {
'date': "23/12/2014",
'intitule': "Restaurant La Tour d'argent Déjeuner d'affaire avec Vladimir P.",
'ecritures': [
{'credit': '0.00',
'debit': '30.10',
'nom_compte': 'Charges - Réceptions',
'numero_compte_credit': '',
'numero_compte_debit': '6257'},
{'credit': '0.00',
'debit': '3.30',
'nom_compte': 'T.V.A. déductible sur autres biens et services',
'numero_compte_credit': '',
'numero_compte_debit': '44566'},
{'credit': '33.40',
'debit': '0.00',
'nom_compte': 'Banques',
'numero_compte_credit': '512',
'numero_compte_debit': ''}
]
})
def test_rechercher_ecriture(self):
from colbert.livre_journal import rechercher_ecriture
livre_journal = codecs.open(os.path.join(CURRENT_DIR, "livre-journal.txt"),
mode="r", encoding="utf-8")
livre_journal_list = livre_journal_to_list(livre_journal)
self.assertEqual(list(rechercher_ecriture("lcl", livre_journal_list)), [
{'date': datetime.date(2011, 4, 2),
'ecritures': [{'credit': Decimal('0.00'),
'debit': Decimal('1500.00'),
'nom_compte': 'Banques',
'numero_compte_credit': '',
'numero_compte_debit': '512'},
{'credit': Decimal('1500.00'),
'debit': Decimal('0.00'),
'nom_compte': "Capital et compte de l'exploitant",
'numero_compte_credit': '100',
'numero_compte_debit': ''}],
'intitule': [' Capital initial',
' Dépôt de 1500 € par Stanislas Guerra',
' au LCL Ledru Rollin'],
'numero_ligne_debut': 35,
'numero_ligne_fin': 40},
{'date': datetime.date(2011, 4, 28),
'ecritures': [{'credit': Decimal('0.00'),
'debit': Decimal('15.00'),
'nom_compte': 'Autres frais de commission sur prestations de services',
'numero_compte_credit': '',
'numero_compte_debit': '6278-LCL'},
{'credit': Decimal('15.00'),
'debit': Decimal('0.00'),
'nom_compte': 'Banques',
'numero_compte_credit': '512',
'numero_compte_debit': ''}],
'intitule': [' Cotisation Option PRO LCL'],
'numero_ligne_debut': 47,
'numero_ligne_fin': 50},
{'date': datetime.date(2011, 5, 2),
'ecritures': [{'credit': Decimal('0.00'),
'debit': Decimal('3.00'),
'nom_compte': 'Autres frais de commission sur prestations de services',
'numero_compte_credit': '',
'numero_compte_debit': '6278-LCL'},
{'credit': Decimal('3.00'),
'debit': Decimal('0.00'),
'nom_compte': 'Banques',
'numero_compte_credit': '512',
'numero_compte_debit': ''}],
'intitule': [' Abonnement LCL Access'],
'numero_ligne_debut': 53,
'numero_ligne_fin': 56},
{'date': datetime.date(2011, 9, 3),
'ecritures': [{'credit': Decimal('0.00'),
'debit': Decimal('3.00'),
'nom_compte': 'Autres frais de commission sur prestations de services',
'numero_compte_credit': '',
'numero_compte_debit': '6278-LCL'},
{'credit': Decimal('3.00'),
'debit': Decimal('0.00'),
'nom_compte': 'Banques',
'numero_compte_credit': '512',
'numero_compte_debit': ''}],
'intitule': [' Abonnement LCL Access'],
'numero_ligne_debut': 92,
'numero_ligne_fin': 95},
{'date': datetime.date(2011, 12, 1),
'ecritures': [{'credit': Decimal('0.00'),
'debit': Decimal('3.00'),
'nom_compte': 'Autres frais de commission sur prestations de services',
'numero_compte_credit': '',
'numero_compte_debit': '6278-LCL'},
{'credit': Decimal('3.00'),
'debit': Decimal('0.00'),
'nom_compte': 'Banques',
'numero_compte_credit': '512',
'numero_compte_debit': ''}],
'intitule': [' Abonnement LCL Access'],
'numero_ligne_debut': 134,
'numero_ligne_fin': 137}
])
def suite():
suite = unittest.TestLoader().loadTestsFromTestCase(LivreJournalTestCase)
return suite
| 55.911877
| 158
| 0.41155
| 4,796
| 58,372
| 4.821518
| 0.08236
| 0.128697
| 0.096523
| 0.063311
| 0.830998
| 0.792856
| 0.768509
| 0.724875
| 0.703684
| 0.667142
| 0
| 0.087129
| 0.445916
| 58,372
| 1,043
| 159
| 55.965484
| 0.627709
| 0.003324
| 0
| 0.674112
| 0
| 0.003046
| 0.373479
| 0.001084
| 0
| 0
| 0
| 0.000959
| 0.017259
| 1
| 0.009137
| false
| 0
| 0.017259
| 0
| 0.028426
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
014f40ddff30525556d2ec687b6f4e7d387eca21
| 733
|
py
|
Python
|
dFL/Utils/config.py
|
a-dirir/decentralized_FL
|
5a2e75d02ec77f7bb0f124d2498e0087b1bc1f0e
|
[
"MIT"
] | null | null | null |
dFL/Utils/config.py
|
a-dirir/decentralized_FL
|
5a2e75d02ec77f7bb0f124d2498e0087b1bc1f0e
|
[
"MIT"
] | null | null | null |
dFL/Utils/config.py
|
a-dirir/decentralized_FL
|
5a2e75d02ec77f7bb0f124d2498e0087b1bc1f0e
|
[
"MIT"
] | null | null | null |
config = {
"root_directory": "D:\\dFL",
"main_server": {
"ip": "127.0.0.1",
"port": 5000,
"url": "http://127.0.0.1:5000",
"encryption_key": "2d2d2d2d2d424547494e205055424c4943204b45592d2d2d2d2d0a4d436f77425159444b325675417945413531744d7137345949476543742b5a5059554b6d364e526f7a697470477467576f564c6d662f694d4d6a413d0a2d2d2d2d2d454e44205055424c4943204b45592d2d2d2d2d0a",
"signature_key": "2d2d2d2d2d424547494e205055424c4943204b45592d2d2d2d2d0a4d436f77425159444b32567741794541494b52736d323769723269384d7251696573364173554734646b323657473158536f4354312b39325742553d0a2d2d2d2d2d454e44205055424c4943204b45592d2d2d2d2d0a"
},
"database": {
"ip": "127.0.0.1",
"port": 27017,
"name": "dFL"
}
}
| 38.578947
| 251
| 0.789905
| 37
| 733
| 15.540541
| 0.621622
| 0.02087
| 0.026087
| 0.031304
| 0.041739
| 0.041739
| 0
| 0
| 0
| 0
| 0
| 0.60274
| 0.103683
| 733
| 18
| 252
| 40.722222
| 0.272451
| 0
| 0
| 0.133333
| 0
| 0
| 0.794521
| 0.619178
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0184e6dfc8d5507b4ffde3cb1c395e87919bf667
| 139
|
py
|
Python
|
other/ipypublish/filters/replace_string.py
|
KGerring/metaproj
|
e957de611f5268978df10184e4cedbd229ef617a
|
[
"MIT"
] | 2
|
2021-04-11T01:43:09.000Z
|
2021-07-08T00:17:57.000Z
|
other/ipypublish/filters/replace_string.py
|
KGerring/metaproj
|
e957de611f5268978df10184e4cedbd229ef617a
|
[
"MIT"
] | 1
|
2021-08-21T23:39:26.000Z
|
2021-08-21T23:39:26.000Z
|
other/ipypublish/filters/replace_string.py
|
KGerring/metaproj
|
e957de611f5268978df10184e4cedbd229ef617a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from __future__ import annotations
def replace_string(line, find, replace):
return line.replace(find, replace)
| 19.857143
| 40
| 0.76259
| 19
| 139
| 5.315789
| 0.736842
| 0.217822
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136691
| 139
| 6
| 41
| 23.166667
| 0.841667
| 0.143885
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
|
0
| 6
|
018b710f7ebec73a1adb46f00adf4743bd6b0cc1
| 4,613
|
py
|
Python
|
mcmctools/pytorch/data_generation/batchconfigdatagenerator.py
|
statphysandml/MCMCEvaluationLib
|
f722b2c7df88b1b33cd29335a22eef53bdad9665
|
[
"MIT"
] | 2
|
2021-06-04T04:52:04.000Z
|
2021-06-04T19:32:58.000Z
|
mcmctools/pytorch/data_generation/batchconfigdatagenerator.py
|
statphysandml/MCMCEvaluationLib
|
f722b2c7df88b1b33cd29335a22eef53bdad9665
|
[
"MIT"
] | null | null | null |
mcmctools/pytorch/data_generation/batchconfigdatagenerator.py
|
statphysandml/MCMCEvaluationLib
|
f722b2c7df88b1b33cd29335a22eef53bdad9665
|
[
"MIT"
] | null | null | null |
from mcmctools.pytorch.data_generation.configdatagenerator import ConfigDataGenerator
# Avoids that samples are loaded one by another from the self.data dataframe - instead, a batch is extracted directly
# This leads to a performance boost since the underlying data frame is accessed via slicing
# i.e: batch = self.data.iloc[i:i+batch_size].values instead of
# : batch = np.stack([self.data.iloc[j] for j in range(i, i+batch_size])
class BatchConfigDataGenerator(ConfigDataGenerator):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.batch_size = kwargs.pop('batch_size')
def sample_target_config(self):
if self.iterator >= len(self.data):
self.iterator = 0 # Reset iterator
self.data = self.get_next_chunk_collection(resample=True) # load data
# Needs to be set again if get_next_chunk_collection is called here for the first time
self.determine_target_and_input_size()
self.iterator += self.batch_size
if self.iterator > len(self.data):
batch, target = list(self.data[self.labels].iloc[self.iterator - self.batch_size:len(self.data)].values.reshape((-1, self.inp_size))) , \
list(self.data["Config"].iloc[self.iterator - self.batch_size:len(self.data)].values.reshape((-1, self.tar_size)))
if self.chunk_iterator < self.total_chunks:
# Load next chunk and reset iterator
n_missing_configs = self.iterator - len(self.data)
self.iterator = n_missing_configs # Reset iterator
self.data = self.get_next_chunk_collection(resample=True) # load data
batch += list(self.data[self.labels].iloc[0:n_missing_configs].values.reshape((-1, self.inp_size)))
target += list(self.data["Config"].iloc[0:n_missing_configs].values.reshape((-1, self.tar_size)))
return batch, target
else:
# End of files has been reached
# Prepare next data iteration
self.iterator = 0 # Reset iterator
self.data = self.get_next_chunk_collection(resample=True) # load data
# Return last samples of previous data iteration
return batch, target
else:
return list(self.data[self.labels].iloc[self.iterator - self.batch_size:self.iterator].values.reshape((-1, self.inp_size))) , \
list(self.data["Config"].iloc[self.iterator - self.batch_size:self.iterator].values.reshape((-1, self.tar_size)))
def sample_target_param(self):
if self.iterator == len(self.data):
# Load next chunk and reset iterator
self.iterator = 0 # Reset iterator
self.data = self.get_next_chunk_collection(resample=True) # load data
# Needs to be set again if get_next_chunk_collection is called here for the first time
self.determine_target_and_input_size()
self.iterator += self.batch_size
if self.iterator > len(self.data):
batch, target = list(self.data["Config"].iloc[self.iterator - self.batch_size:len(self.data)].values.reshape(-1, self.inp_size)), \
list(self.data[self.labels].iloc[self.iterator - self.batch_size:len(self.data)].values.reshape((-1, self.tar_size)))
if self.chunk_iterator < self.total_chunks:
# Load next chunk and reset iterator
n_missing_configs = self.iterator - len(self.data)
self.iterator = n_missing_configs # Reset iterator
self.data = self.get_next_chunk_collection(resample=True) # load data
batch += list(self.data["Config"].iloc[0:n_missing_configs].values.reshape(-1, self.inp_size))
target += list(self.data[self.labels].iloc[0:n_missing_configs].values.reshape((-1, self.tar_size)))
return batch, target
else:
# End of files has been reached
# Prepare next data iteration
self.iterator = 0 # Reset iterator
self.data = self.get_next_chunk_collection(resample=True) # load data
# Return last samples of previous data iteration
return batch, target
else:
return list(self.data["Config"].iloc[self.iterator - self.batch_size:self.iterator].values.reshape(-1, self.inp_size)), \
list(self.data[self.labels].iloc[self.iterator - self.batch_size:self.iterator].values.reshape((-1, self.tar_size)))
| 58.392405
| 149
| 0.636679
| 599
| 4,613
| 4.751252
| 0.171953
| 0.08714
| 0.063247
| 0.075896
| 0.827477
| 0.827477
| 0.817287
| 0.796908
| 0.796908
| 0.796908
| 0
| 0.005836
| 0.2571
| 4,613
| 79
| 150
| 58.392405
| 0.824628
| 0.211576
| 0
| 0.588235
| 0
| 0
| 0.012746
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.019608
| 0
| 0.215686
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6df2506f3625ea63c6bc971197b8a1fac0029dca
| 239
|
py
|
Python
|
expfactory/database/__init__.py
|
YanivD/expfactory
|
a34ba21016ef01a44998764935be20ec99fdd0a8
|
[
"BSD-3-Clause"
] | 26
|
2016-09-02T22:25:39.000Z
|
2021-02-03T16:09:33.000Z
|
expfactory/database/__init__.py
|
YanivD/expfactory
|
a34ba21016ef01a44998764935be20ec99fdd0a8
|
[
"BSD-3-Clause"
] | 157
|
2016-08-09T20:17:58.000Z
|
2022-03-23T21:20:01.000Z
|
expfactory/database/__init__.py
|
YanivD/expfactory
|
a34ba21016ef01a44998764935be20ec99fdd0a8
|
[
"BSD-3-Clause"
] | 14
|
2016-09-02T22:25:42.000Z
|
2022-03-04T11:40:48.000Z
|
from expfactory.defaults import EXPFACTORY_DATABASE
if EXPFACTORY_DATABASE == "filesystem":
from .filesystem import *
else:
from .relational import *
if EXPFACTORY_DATABASE.startswith("sqlite"):
from .sqlite import *
| 23.9
| 51
| 0.732218
| 25
| 239
| 6.88
| 0.44
| 0.313953
| 0.232558
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.188285
| 239
| 9
| 52
| 26.555556
| 0.886598
| 0
| 0
| 0
| 0
| 0
| 0.066946
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.571429
| 0
| 0.571429
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
0970bdaa09faf6aac7a2ee9f8663253eaca5bc6c
| 49
|
py
|
Python
|
question28.py
|
gusenov/test-tech-mail-ru-python2
|
70e37a3de447b6f7c4da5add75f65df1b51405fe
|
[
"MIT"
] | null | null | null |
question28.py
|
gusenov/test-tech-mail-ru-python2
|
70e37a3de447b6f7c4da5add75f65df1b51405fe
|
[
"MIT"
] | null | null | null |
question28.py
|
gusenov/test-tech-mail-ru-python2
|
70e37a3de447b6f7c4da5add75f65df1b51405fe
|
[
"MIT"
] | null | null | null |
s = "\nAlice\n"
s.rstrip()
print s # \nAlice\n
| 9.8
| 20
| 0.571429
| 9
| 49
| 3.111111
| 0.555556
| 0.5
| 0.571429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.204082
| 49
| 4
| 21
| 12.25
| 0.717949
| 0.183673
| 0
| 0
| 0
| 0
| 0.236842
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.333333
| 1
| 1
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
09b5410337055cb5d9ae000a32984d3ed5535dd4
| 113
|
py
|
Python
|
fixtures/__init__.py
|
daryasary/instagram-graph-api
|
8a3fdf0b1bfb15d198da9889ac0bfe747f8bc019
|
[
"Apache-2.0"
] | 11
|
2019-03-05T18:41:33.000Z
|
2020-10-16T13:54:06.000Z
|
fixtures/__init__.py
|
daryasary/instagram-graph-api
|
8a3fdf0b1bfb15d198da9889ac0bfe747f8bc019
|
[
"Apache-2.0"
] | 1
|
2019-05-30T11:52:56.000Z
|
2019-05-31T19:51:14.000Z
|
fixtures/__init__.py
|
daryasary/instagram-graph-api
|
8a3fdf0b1bfb15d198da9889ac0bfe747f8bc019
|
[
"Apache-2.0"
] | 6
|
2019-04-30T10:23:46.000Z
|
2020-05-12T17:26:53.000Z
|
try:
from fixtures.local_variables import *
except ImportError:
from fixtures.default_variables import *
| 22.6
| 44
| 0.778761
| 13
| 113
| 6.615385
| 0.692308
| 0.27907
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.168142
| 113
| 4
| 45
| 28.25
| 0.914894
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
09c9406be5d4b2e3aa21d4a7eb381b39d7a5a284
| 24,741
|
py
|
Python
|
examples/SendSales/tax_code_list.py
|
avasachinbaijal/AvaTax-REST-V2-Python-SDK
|
b6e12550fa11b08cd8f57195c41d9b31000553de
|
[
"Apache-2.0"
] | 13
|
2018-04-13T07:07:24.000Z
|
2021-05-06T21:08:03.000Z
|
examples/SendSales/tax_code_list.py
|
avasachinbaijal/AvaTax-REST-V2-Python-SDK
|
b6e12550fa11b08cd8f57195c41d9b31000553de
|
[
"Apache-2.0"
] | 22
|
2018-03-21T18:44:20.000Z
|
2021-06-11T18:42:16.000Z
|
examples/SendSales/tax_code_list.py
|
avasachinbaijal/AvaTax-REST-V2-Python-SDK
|
b6e12550fa11b08cd8f57195c41d9b31000553de
|
[
"Apache-2.0"
] | 27
|
2017-12-27T21:21:00.000Z
|
2022-03-29T17:00:51.000Z
|
"""Hold tax code list."""
tax_codes = ["D0000000", "D9999999", "DA010000", "DA030000", "DA040000", "DA040100", "DA051011", "DA051012", "DA051013", "DA059399", "DB010000", "DB020000", "DB031013", "DB031014", "DB031015", "DB031016", "DC010000", "DC010100", "DC010200", "DC010300", "DC010400", "DC010500", "DC010600", "DC011000", "DC020000", "DC020100", "DC020200", "DC020300", "DC020400", "DC020402", "DC020500", "DC020501", "DC020502", "DC020600", "DC060000", "DC070000", "DD020000", "DD040000", "DG010000", "DG010100", "DG010200", "DG010201", "DG010300", "DG010301", "DG010302", "DG020000", "DI010000", "DI010100", "DI010200", "DI010201", "DL020000", "DM010100", "DM020000", "DM020100", "DM020200", "DM030000", "DM030100", "DM030200", "DM030201", "DM040000", "DM040100", "DM040200", "DM040201", "DN010000", "DO010000", "DP010000", "DP010100", "DP010200", "DP010201", "DV010000", "DV010100", "DV010200", "DV010201", "DV017194", "DV021007", "DV021008", "DV021009", "DV021010", "DV029398", "FR000000", "FR010000", "FR010100", "FR010200", "FR020000", "FR020100", "FR020200", "FR020400", "FR020500", "FR020800", "FR020900", "FR021004", "FR022000", "FR030000", "FR030700", "FR040000", "FR059314", "FR060000", "FR070100", "FR999999", "O0000000", "O9999999", "OA020000", "OA020100", "OA020200", "OA020300", "OA020400", "OA020500", "OA020600", "OA020700", "OA026346", "OA029338", "OC030000", "OC040000", "OC040100", "OC040200", "OD010000", "OD020000", "OD020400", "OD020500", "OD030000", "OE010100", "OF020000", "OF030000", "OF040002", "OF040003", "OF040004", "OH010000", "OM010000", "ON010000", "ON030000", "OO028842", "OR040000", "OR070000", "OT010100", "OT010300", "OT010400", "P0000000", "P000000H", "P000100H", "P9999999", "PA020003", "PA020100", "PA020111", "PA020113", "PA020659", "PA020661", "PA020662", "PA020664", "PA020665", "PA020668", "PA020738", "PA021078", "PA028802", "PA028858", "PA029612", "PA029613", "PA029614", "PA100000", "PA200522", "PA200546", "PA300741", "PB100000", "PB100200", "PB100300", "PB100400", "PB100817", "PB100818", "PB100819", "PB200742", "PB200743", "PB308786", "PC002386", "PC010000", "PC020000", "PC030000", "PC030100", "PC030101", "PC030103", "PC030105", "PC030106", "PC030108", "PC030109", "PC030110", "PC030111", "PC030113", "PC030115", "PC030116", "PC030117", "PC030118", "PC030119", "PC030120", "PC030121", "PC030122", "PC030123", "PC030124", "PC030125", "PC030126", "PC030127", "PC030128", "PC030129", "PC030130", "PC030131", "PC030133", "PC030134", "PC030135", "PC030137", "PC030138", "PC030139", "PC030140", "PC030142", "PC030143", "PC030144", "PC030145", "PC030146", "PC030147", "PC030148", "PC030150", "PC030151", "PC030152", "PC030153", "PC030155", "PC030156", "PC030157", "PC030158", "PC030168", "PC030200", "PC030201", "PC030202", "PC030203", "PC030204", "PC030205", "PC030206", "PC030207", "PC030208", "PC030209", "PC030210", "PC030300", "PC030301", "PC030302", "PC030303", "PC030304", "PC030305", "PC030306", "PC030307", "PC030308", "PC030309", "PC030310", "PC030311", "PC030312", "PC030313", "PC030314", "PC030315", "PC030316", "PC030400", "PC030401", "PC030402", "PC030403", "PC030404", "PC030405", "PC030406", "PC030407", "PC030408", "PC030409", "PC030410", "PC030411", "PC030412", "PC030413", "PC030414", "PC030415", "PC030500", "PC030600", "PC030601", "PC030602", "PC030605", "PC031034", "PC031042", "PC031050", "PC031098", "PC031114", "PC031138", "PC031146", "PC031210", "PC031226", "PC031234", "PC031242", "PC039328", "PC040000", "PC040065", "PC040100", "PC040101", "PC040103", "PC040105", "PC040106", "PC040108", "PC040109", "PC040110", "PC040111", "PC040113", "PC040115", "PC040116", "PC040117", "PC040118", "PC040119", "PC040120", "PC040121", "PC040122", "PC040123", "PC040124", "PC040125", "PC040126", "PC040127", "PC040128", "PC040129", "PC040130", "PC040131", "PC040132", "PC040133", "PC040134", "PC040135", "PC040137", "PC040138", "PC040139", "PC040140", "PC040142", "PC040143", "PC040144", "PC040145", "PC040146", "PC040147", "PC040148", "PC040150", "PC040151", "PC040152", "PC040153", "PC040155", "PC040156", "PC040157", "PC040158", "PC040168", "PC040200", "PC040201", "PC040202", "PC040203", "PC040204", "PC040205", "PC040206", "PC040207", "PC040208", "PC040209", "PC040210", "PC040300", "PC040301", "PC040302", "PC040303", "PC040304", "PC040305", "PC040306", "PC040307", "PC040308", "PC040309", "PC040310", "PC040311", "PC040312", "PC040313", "PC040314", "PC040315", "PC040316", "PC040400", "PC040401", "PC040402", "PC040403", "PC040404", "PC040405", "PC040406", "PC040407", "PC040408", "PC040409", "PC040410", "PC040411", "PC040412", "PC040413", "PC040414", "PC040415", "PC040500", "PC040501", "PC040502", "PC040503", "PC040600", "PC040601", "PC040602", "PC040605", "PC041034", "PC041042", "PC041050", "PC041098", "PC041114", "PC041138", "PC041146", "PC041210", "PC041226", "PC041234", "PC041242", "PC049328", "PC060000", "PC070000", "PC070100", "PC070200", "PC070300", "PC070400", "PC070500", "PC070600", "PC070601", "PC077426", "PC078954", "PC080000", "PC080100", "PC080200", "PC080300", "PC080400", "PC080500", "PC080600", "PC080601", "PC087426", "PC088954", "PC098212", "PC098213", "PC100000", "PC168330", "PC168346", "PC168354", "PD010000", "PD071521", "PD078658", "PD078666", "PD078690", "PD078738", "PD078746", "PE020100", "PE070000", "PE070200", "PE070201", "PE070202", "PE070203", "PE070204", "PE070205", "PE070206", "PE070209", "PE080000", "PE080100", "PE080101", "PE080200", "PE080201", "PE080772", "PE089124", "PE200706", "PE308914", "PF012394", "PF030011", "PF030746", "PF040100", "PF040723", "PF040724", "PF040726", "PF040727", "PF040728", "PF040729", "PF040730", "PF040731", "PF040732", "PF040733", "PF040739", "PF041200", "PF049391", "PF050001", "PF050002", "PF050012", "PF050032", "PF050062", "PF050064", "PF050067", "PF050068", "PF050069", "PF050070", "PF050071", "PF050072", "PF050073", "PF050075", "PF050076", "PF050077", "PF050078", "PF050079", "PF050080", "PF050081", "PF050082", "PF050083", "PF050084", "PF050085", "PF050086", "PF050087", "PF050088", "PF050089", "PF050091", "PF050092", "PF050093", "PF050094", "PF050095", "PF050096", "PF050097", "PF050099", "PF050100", "PF050101", "PF050102", "PF050103", "PF050104", "PF050105", "PF050106", "PF050107", "PF050108", "PF050109", "PF050110", "PF050111", "PF050112", "PF050113", "PF050114", "PF050115", "PF050117", "PF050118", "PF050119", "PF050120", "PF050121", "PF050122", "PF050123", "PF050124", "PF050125", "PF050126", "PF050200", "PF050201", "PF050202", "PF050203", "PF050204", "PF050205", "PF050206", "PF050208", "PF05020H", "PF050211", "PF050214", "PF050215", "PF050216", "PF050218", "PF050219", "PF050220", "PF050221", "PF050222", "PF050224", "PF050226", "PF050227", "PF050232", "PF050233", "PF050236", "PF050237", "PF050300", "PF050301", "PF050302", "PF050303", "PF050304", "PF050305", "PF050306", "PF050307", "PF050309", "PF050311", "PF050313", "PF050314", "PF050323", "PF050401", "PF050402", "PF050403", "PF050404", "PF050405", "PF050406", "PF050407", "PF050408", "PF050409", "PF050410", "PF050411", "PF050412", "PF050413", "PF050414", "PF050415", "PF050416", "PF050417", "PF050500", "PF050501", "PF050502", "PF050503", "PF050507", "PF050600", "PF050612", "PF050613", "PF050654", "PF050655", "PF050667", "PF050669", "PF050670", "PF050671", "PF050672", "PF050673", "PF050674", "PF050675", "PF050676", "PF050677", "PF050685", "PF050686", "PF050687", "PF050700", "PF050701", "PF050702", "PF050707", "PF050709", "PF050711", "PF050712", "PF050713", "PF050714", "PF050715", "PF050716", "PF050717", "PF050718", "PF050720", "PF050721", "PF050722", "PF050801", "PF050802", "PF050806", "PF050807", "PF050900", "PF050901", "PF050902", "PF050903", "PF050904", "PF050905", "PF050908", "PF050909", "PF051000", "PF051004", "PF051100", "PF051101", "PF051102", "PF051103", "PF051104", "PF051105", "PF051338", "PF051346", "PF051354", "PF051370", "PF051378", "PF051410", "PF051418", "PF051419", "PF051426", "PF051427", "PF051428", "PF051434", "PF051450", "PF051458", "PF051459", "PF051474", "PF051475", "PF051490", "PF051498", "PF051506", "PF051507", "PF051514", "PF051522", "PF051530", "PF051531", "PF051538", "PF051546", "PF051554", "PF051562", "PF051570", "PF051571", "PF051578", "PF051579", "PF051586", "PF051594", "PF051690", "PF051698", "PF051706", "PF051714", "PF051722", "PF051730", "PF051778", "PF051786", "PF051794", "PF051802", "PF051810", "PF051811", "PF051818", "PF051826", "PF051834", "PF051842", "PF051850", "PF051858", "PF051866", "PF051874", "PF051882", "PF051890", "PF051898", "PF051906", "PF051914", "PF051930", "PF051946", "PF051954", "PF051962", "PF051970", "PF051971", "PF051977", "PF051978", "PF051986", "PF051994", "PF052002", "PF052018", "PF052025", "PF052026", "PF052034", "PF052050", "PF052058", "PF052066", "PF052074", "PF052082", "PF052090", "PF052098", "PF052106", "PF052114", "PF052122", "PF052130", "PF052131", "PF052132", "PF052133", "PF052138", "PF052139", "PF052140", "PF052141", "PF052142", "PF052143", "PF052144", "PF052145", "PF052146", "PF052147", "PF052154", "PF052162", "PF052167", "PF052168", "PF052169", "PF052170", "PF052178", "PF052202", "PF052210", "PF052234", "PF052242", "PF052250", "PF052322", "PF055195", "PF058754", "PF058762", "PF058770", "PF059392", "PF090123", "PF0F0215", "PF101000", "PF110000", "PF110300", "PF110301", "PF110302", "PF110303", "PF110304", "PF110305", "PF110400", "PF110401", "PF110402", "PF110403", "PF110404", "PF110405", "PF110700", "PF110701", "PF110702", "PF110703", "PF110704", "PF110705", "PF110800", "PF110801", "PF110802", "PF110803", "PF110804", "PF110805", "PF120000", "PF120012", "PF120101", "PF120603", "PF120900", "PF160014", "PF160015", "PF160016", "PF160017", "PF160019", "PF160020", "PF160021", "PF160022", "PF160023", "PF160024", "PF160025", "PF160026", "PF160027", "PF160030", "PF160036", "PF160039", "PF160040", "PF160041", "PF160042", "PF160043", "PF160045", "PF160046", "PF160047", "PF160048", "PF160049", "PF160050", "PF160051", "PF160052", "PF160053", "PF160054", "PF160055", "PF160056", "PF160057", "PF160058", "PF160059", "PF170000", "PF190677", "PF199605", "PF199606", "PF199607", "PF199608", "PF199609", "PF199610", "PF199611", "PF220744", "PF220745", "PF232402", "PG050000", "PG050715", "PG050716", "PG050717", "PG068810", "PG076546", "PG081610", "PG081618", "PH000794", "PH000802", "PH050000", "PH050051", "PH050100", "PH050101", "PH050102", "PH050103", "PH050104", "PH050105", "PH050106", "PH050107", "PH050108", "PH050109", "PH050110", "PH050111", "PH050112", "PH050113", "PH050114", "PH050115", "PH050116", "PH050117", "PH050118", "PH050122", "PH050124", "PH050125", "PH050126", "PH050127", "PH050140", "PH050141", "PH050142", "PH050143", "PH050144", "PH050145", "PH050146", "PH050147", "PH050148", "PH050149", "PH050150", "PH050151", "PH050152", "PH050153", "PH050154", "PH050155", "PH050200", "PH050201", "PH050202", "PH050207", "PH050208", "PH050213", "PH050214", "PH050215", "PH050216", "PH050217", "PH050218", "PH050219", "PH050220", "PH050221", "PH050222", "PH050223", "PH050224", "PH050225", "PH050226", "PH050227", "PH050228", "PH050229", "PH050230", "PH050231", "PH050232", "PH050233", "PH050234", "PH050235", "PH050236", "PH050237", "PH050238", "PH050239", "PH050240", "PH050241", "PH050242", "PH050243", "PH050244", "PH050245", "PH050246", "PH050247", "PH050300", "PH050301", "PH050302", "PH050310", "PH050311", "PH050312", "PH050313", "PH050400", "PH050401", "PH050402", "PH050407", "PH050408", "PH050413", "PH050414", "PH050419", "PH050420", "PH050425", "PH050426", "PH050440", "PH050441", "PH050442", "PH050443", "PH050444", "PH050445", "PH050446", "PH050447", "PH050448", "PH050449", "PH050450", "PH050451", "PH050452", "PH050453", "PH050454", "PH050455", "PH050456", "PH050457", "PH050458", "PH050459", "PH050460", "PH050461", "PH050462", "PH050463", "PH050464", "PH050465", "PH050500", "PH050501", "PH050507", "PH050508", "PH050509", "PH050511", "PH050514", "PH050526", "PH050527", "PH050528", "PH050529", "PH050530", "PH050531", "PH050532", "PH050533", "PH050600", "PH050601", "PH050623", "PH050639", "PH050676", "PH050677", "PH050700", "PH050709", "PH050720", "PH050730", "PH050731", "PH050732", "PH050733", "PH050859", "PH050860", "PH058922", "PH060754", "PH060762", "PH060770", "PH060771", "PH101010", "PH101011", "PH101012", "PH101013", "PH101014", "PH101015", "PH101016", "PH101017", "PH101018", "PH101020", "PH101030", "PH101040", "PH101050", "PH101060", "PH101070", "PH101080", "PH101090", "PH101100", "PH101110", "PH101120", "PH101130", "PH101140", "PH108866", "PH150101", "PH150102", "PH150103", "PH150104", "PH150105", "PH150106", "PH150107", "PH150108", "PH150111", "PH150112", "PH150113", "PH150114", "PH150115", "PH150116", "PH150117", "PH150118", "PH150121", "PH150122", "PH150123", "PH150124", "PH150125", "PH150126", "PH150127", "PH150128", "PH150131", "PH150132", "PH150133", "PH150134", "PH150135", "PH150136", "PH150137", "PH150138", "PH150141", "PH150142", "PH150143", "PH150144", "PH150145", "PH150146", "PH150147", "PH150148", "PH400004", "PH400005", "PH400007", "PH400008", "PH400009", "PH400014", "PH400031", "PH400033", "PH400035", "PH400036", "PH400038", "PH400044", "PH400167", "PH400652", "PH400653", "PH400656", "PH400778", "PH400787", "PH400788", "PH400789", "PH400826", "PH400827", "PH400834", "PH400842", "PH400850", "PH400874", "PH400882", "PH400898", "PH400899", "PH400906", "PH400914", "PH400922", "PH400923", "PH400924", "PH400930", "PH400938", "PH400946", "PH400954", "PH400955", "PH400956", "PH400957", "PH400958", "PH400962", "PH400963", "PH400970", "PH400972", "PH400986", "PH400994", "PH401002", "PH401010", "PH401011", "PH401018", "PH401019", "PH401020", "PH401021", "PH401022", "PH401023", "PH402810", "PH402818", "PH402826", "PH402834", "PH402842", "PH402850", "PH402858", "PH402874", "PH402882", "PH402891", "PH402898", "PH402906", "PH402914", "PH402922", "PH402923", "PH402930", "PH402938", "PH402946", "PH402954", "PH402962", "PH402970", "PH402978", "PH402986", "PH402994", "PH403002", "PH403003", "PH403004", "PH403010", "PH403018", "PH403026", "PH403034", "PH403042", "PH403050", "PH403058", "PH403066", "PH403067", "PH403074", "PH403082", "PH403090", "PH403098", "PH403106", "PH403114", "PH403122", "PH403130", "PH403138", "PH403210", "PH403218", "PH403234", "PH403242", "PH403250", "PH403274", "PH403282", "PH403290", "PH403298", "PH403306", "PH403314", "PH403322", "PH403330", "PH403338", "PH403346", "PH403362", "PH403370", "PH403378", "PH403386", "PH403394", "PH403402", "PH403410", "PH403418", "PH403426", "PH403434", "PH403442", "PH403450", "PH403458", "PH403466", "PH403474", "PH403482", "PH403490", "PH403498", "PH403506", "PH403522", "PH403530", "PH403538", "PH403546", "PH403554", "PH403562", "PH403570", "PH403578", "PH403586", "PH403594", "PH403602", "PH403610", "PH403618", "PH403626", "PH403642", "PH403650", "PH403658", "PH403666", "PH403674", "PH403682", "PH403683", "PH403684", "PH403690", "PH403691", "PH403698", "PH403699", "PH403700", "PH403706", "PH403714", "PH403722", "PH403730", "PH403738", "PH403746", "PH403754", "PH403762", "PH403770", "PH403778", "PH403786", "PH403794", "PH403802", "PH403810", "PH403818", "PH403826", "PH403834", "PH403842", "PH403850", "PH403858", "PH403866", "PH403867", "PH403874", "PH403882", "PH403883", "PH403890", "PH403898", "PH403906", "PH403914", "PH403915", "PH403916", "PH403917", "PH403922", "PH403930", "PH403938", "PH403939", "PH403940", "PH403946", "PH403954", "PH403962", "PH403970", "PH403978", "PH403979", "PH403980", "PH403981", "PH403982", "PH403986", "PH403994", "PH404002", "PH404018", "PH404026", "PH404034", "PH404042", "PH404050", "PH404058", "PH404066", "PH404074", "PH404082", "PH404090", "PH404098", "PH404106", "PH404114", "PH404115", "PH404122", "PH404123", "PH404124", "PH404130", "PH404131", "PH404138", "PH404146", "PH404154", "PH404162", "PH404170", "PH404178", "PH404186", "PH404187", "PH404188", "PH404194", "PH404202", "PH404210", "PH404218", "PH404226", "PH404227", "PH404228", "PH404234", "PH404242", "PH404250", "PH404258", "PH404266", "PH404274", "PH404282", "PH404290", "PH404298", "PH404306", "PH404314", "PH404322", "PH404330", "PH404338", "PH404346", "PH404354", "PH404362", "PH404370", "PH404378", "PH404386", "PH404394", "PH404402", "PH404410", "PH404418", "PH404426", "PH404434", "PH404442", "PH404450", "PH404458", "PH404466", "PH404474", "PH404482", "PH404490", "PH404498", "PH404506", "PH404514", "PH404522", "PH404530", "PH404538", "PH404546", "PH404554", "PH404562", "PH404570", "PH404578", "PH404586", "PH404594", "PH404602", "PH404610", "PH404618", "PH404626", "PH404634", "PH404642", "PH404650", "PH404658", "PH404666", "PH404674", "PH404682", "PH404690", "PH404698", "PH404706", "PH404714", "PH404722", "PH404730", "PH404738", "PH404746", "PH404754", "PH404762", "PH404770", "PH404778", "PH404786", "PH404802", "PH404810", "PH404818", "PH404826", "PH404834", "PH404842", "PH404850", "PH404858", "PH404866", "PH404874", "PH404882", "PH404890", "PH404898", "PH404906", "PH404914", "PH404922", "PH404930", "PH404938", "PH404946", "PH404954", "PH404962", "PH404970", "PH404978", "PH404986", "PH404994", "PH405002", "PH405010", "PH405018", "PH405026", "PH405034", "PH405042", "PH405050", "PH405051", "PH405058", "PH405059", "PH405066", "PH405082", "PH405090", "PH405098", "PH405106", "PH405114", "PH405122", "PH405123", "PH405130", "PH405138", "PH405146", "PH405154", "PH405170", "PH405179", "PH405186", "PH405194", "PH405202", "PH405210", "PH405218", "PH405226", "PH405234", "PH405235", "PH405242", "PH405243", "PH405250", "PH405258", "PH405266", "PH405274", "PH405282", "PH405290", "PH405298", "PH405306", "PH405314", "PH405322", "PH405323", "PH405324", "PH405330", "PH405338", "PH405354", "PH405370", "PH405378", "PH405386", "PH405394", "PH405402", "PH405410", "PH405418", "PH405426", "PH405434", "PH405442", "PH405450", "PH405458", "PH405466", "PH405474", "PH408930", "PH409601", "PH409618", "PI011642", "PI011650", "PI011658", "PI011666", "PI040006", "PI040578", "PI040594", "PI040597", "PI040599", "PI040600", "PI040601", "PI040610", "PI041073", "PI041074", "PI041075", "PI041076", "PI041077", "PI041078", "PI041079", "PI041080", "PI041082", "PI041090", "PI041106", "PI041107", "PL018834", "PM008826", "PM020400", "PM020404", "PM020700", "PM020704", "PM030000", "PM030100", "PM030108", "PM030110", "PM030811", "PM030812", "PM039393", "PM039394", "PM039395", "PM039396", "PM039397", "PM062306", "PN050814", "PN052314", "PN058970", "PN060815", "PN070816", "PO100000", "PP030000", "PP030001", "PP030100", "PP030106", "PP030107", "PP030108", "PP030109", "PP030110", "PP030113", "PP030771", "PP050736", "PP050737", "PP051195", "PP051196", "PP140029", "PP140037", "PP140235", "PP140236", "PP148090", "PP148211", "PP198874", "PP208778", "PR058882", "PR060298", "PR061200", "PR062482", "PR062498", "PR082546", "PR082626", "PR082658", "PR082666", "PR097018", "PS050000", "PS050100", "PS050101", "PS050102", "PS050103", "PS050104", "PS050105", "PS050106", "PS050107", "PS050108", "PS050109", "PS050110", "PS050111", "PS050112", "PS050113", "PS050114", "PS050115", "PS050116", "PS050117", "PS050118", "PS050119", "PS050120", "PS050121", "PS050122", "PS050123", "PS050124", "PS050125", "PS050126", "PS050127", "PS050300", "PS050302", "PS050303", "PS050401", "PS050402", "PS050403", "PS050404", "PS050405", "PS060000", "PS060100", "PS060101", "PS060102", "PS060103", "PS060104", "PS060105", "PS060106", "PS060107", "PS060108", "PS060109", "PS060110", "PS060111", "PS060112", "PS060113", "PS060114", "PS060115", "PS060116", "PS060117", "PS060118", "PS060119", "PS060120", "PS060121", "PS060122", "PS060123", "PS060124", "PS060125", "PS060126", "PS060127", "PS060300", "PS060302", "PS060303", "PS060401", "PS060404", "PS060405", "PS078938", "PS080101", "PS081250", "PS081258", "PS081266", "PS081274", "PS081282", "PS101298", "PS116242", "PS118898", "PS160747", "PS160748", "PS160749", "PS160750", "PS160751", "PS160800", "PS160801", "PS160802", "PT030000", "PT030400", "PT030500", "PT030501", "PT030600", "PT118906", "PW032474", "S0000000", "S0000001", "S0557082", "S9999999", "SA010000", "SA010100", "SA010200", "SA010300", "SA010400", "SA011060", "SA016850", "SA030000", "SA030200", "SA030300", "SA030400", "SA030401", "SA035898", "SA035906", "SA035914", "SA035922", "SA035930", "SA035938", "SA035946", "SA035954", "SA036298", "SA046706", "SA046866", "SA070000", "SA080000", "SA090000", "SA090101", "SA090102", "SA090103", "SA090400", "SA090500", "SA090700", "SA097130", "SA186314", "SB010000", "SB010100", "SB010200", "SB010300", "SB016682", "SB030000", "SB040100", "SB045978", "SB045994", "SB046002", "SB046003", "SB046004", "SB046010", "SB070000", "SB070400", "SB070500", "SB070700", "SB071000", "SB156266", "SC010000", "SC060000", "SC070000", "SC070100", "SC070101", "SC070102", "SC070103", "SC070104", "SC070121", "SC070131", "SC070200", "SC070201", "SC070202", "SC070203", "SC070204", "SC070300", "SC070301", "SC070302", "SC070303", "SC070304", "SC070305", "SC070306", "SC070321", "SC070331", "SC080000", "SC080100", "SC080101", "SC080102", "SC080103", "SC080121", "SC080200", "SC080201", "SC080202", "SC080203", "SC080300", "SC080301", "SC080302", "SC080303", "SC080304", "SC080305", "SC080306", "SC080307", "SC080308", "SC080309", "SC080321", "SC090000", "SC090100", "SC090101", "SC090102", "SC090103", "SC090200", "SC090201", "SC090202", "SC090300", "SC090301", "SC090302", "SC090303", "SC090400", "SC090401", "SC090402", "SC090500", "SC090501", "SC090502", "SC090503", "SC090600", "SC090601", "SC090602", "SC100000", "SC100100", "SC100101", "SC100102", "SC100103", "SC100121", "SC100122", "SC100200", "SC100201", "SC100202", "SC100203", "SC100221", "SC100222", "SC100300", "SC100301", "SC100302", "SC100303", "SC100400", "SC100401", "SC100402", "SC100500", "SC100501", "SC100502", "SC100503", "SC100600", "SC100601", "SC100602", "SC100603", "SC117274", "SC117314", "SC120000", "SC120100", "SC120101", "SC120102", "SC120200", "SC120201", "SC120202", "SC120300", "SC120301", "SC120302", "SC120400", "SC120401", "SC120402", "SC120500", "SC120501", "SC120502", "SC120600", "SC120601", "SC120602", "SC130000", "SC130100", "SC130101", "SC130102", "SC130121", "SC130122", "SC130200", "SC130201", "SC130202", "SC130221", "SC130222", "SC130300", "SC130301", "SC130302", "SC130400", "SC130401", "SC130402", "SC130500", "SC130501", "SC130502", "SC130600", "SC130601", "SC130602", "SC150100", "SC150156", "SC150157", "SC150158", "SC150200", "SC150205", "SC150300", "SC150305", "SC150306", "SC155866", "SC155874", "SC155882", "SC155890", "SC156194", "SC156210", "SC160000", "SC160500", "SC160900", "SC161000", "SC161100", "SC166938", "SC210000", "SC223100", "SD016410", "SD020100", "SD020900", "SD020901", "SD020902", "SD021100", "SD036434", "SD040100", "SD050000", "SD086570", "SD110100", "SD140000", "SE020000", "SE040200", "SE040301", "SE050000", "SE050100", "SE050200", "SE050300", "SE050400", "SE066490", "SE076498", "SE090000", "SE100100", "SE109350", "SF010000", "SF072442", "SF072450", "SF086522", "SF086530", "SF096362", "SF096370", "SF096386", "SF106514", "SG016250", "SG016962", "SG017226", "SG030000", "SG030100", "SG030200", "SG046826", "SH020400", "SH026466", "SH026626", "SI010001", "SI010002", "SI010003", "SI010004", "SI016642", "SI020100", "SI020200", "SI026666", "SI046674", "SI060000", "SI080000", "SI086690", "SJ010000", "SJ010100", "SJ010300", "SL006714", "SL020159", "SL026730", "SL027170", "SL066754", "SM016778", "SM020200", "SM020400", "SM050200", "SM060000", "SM060400", "SM060500", "SM060700", "SM076818", "SM146794", "SM146802", "SP010000", "SP036858", "SP040000", "SP060000", "SP076882", "SP076890", "SP102800", "SP106394", "SP106914", "SP140000", "SP156226", "SP186834", "SR060000", "SR060100", "SR060101", "SR060200", "SR060201", "SR066978", "SR066986", "SR067002", "SR087010", "SS020000", "SS040000", "SS040200", "SS040300", "SS040400", "SS040500", "SS046322", "SS046330", "SS046634", "SS050200", "SS050400", "SS066906", "SS087058", "SS087066", "SS127090", "SS130000", "SS186898", "SS197074", "SS227026", "ST017098", "ST027106", "ST037114", "ST040100", "ST050100", "ST080000", "ST087634", "ST096306", "ST096738", "ST097154", "ST112506", "ST117162", "ST147146", "ST157138", "SW019478", "SW037234", "SW040000", "SW040400", "SW040500", "SW040700", "SW050000", "SW050300", "SW050400", "SW050401", "SW050500", "SW052000", "SW052010", "SW052020", "SW052201", "SW052202", "SW052301", "SW052302", "SW052400", "SW053000", "SW053001", "SW053002", "SW054000", "SW054001", "SW054002", "SW054100", "SW054101", "SW054102", "SW057242", "SW077250", "SW077260", "SW090000", "SW110000", "SW117202", "SW117210", "SW117218", "SY010100", "SY010200"]
# Slice out sample list of codes to import to send_sales.
sample_codes = tax_codes[0:50]
| 4,123.5
| 24,624
| 0.66695
| 2,074
| 24,741
| 7.954195
| 0.996143
| 0.00097
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.543341
| 0.083748
| 24,741
| 5
| 24,625
| 4,948.2
| 0.184393
| 0.003072
| 0
| 0
| 0
| 0
| 0.665396
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
09f96beb17de21326a86a1bc1e0deade8b798629
| 30,455
|
py
|
Python
|
remodet_repository_wdh_part/Projects/PyLib/NetLib/PvaNet.py
|
UrwLee/Remo_experience
|
a59d5b9d6d009524672e415c77d056bc9dd88c72
|
[
"MIT"
] | null | null | null |
remodet_repository_wdh_part/Projects/PyLib/NetLib/PvaNet.py
|
UrwLee/Remo_experience
|
a59d5b9d6d009524672e415c77d056bc9dd88c72
|
[
"MIT"
] | null | null | null |
remodet_repository_wdh_part/Projects/PyLib/NetLib/PvaNet.py
|
UrwLee/Remo_experience
|
a59d5b9d6d009524672e415c77d056bc9dd88c72
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import sys
import caffe
from caffe import layers as L
from caffe import params as P
from caffe.proto import caffe_pb2
sys.dont_write_bytecode = True
def smCReLULayer(net, from_layer, out_layer, channels=32, use_reduced_layer=False, reduced_layers=[], \
lr=1, decay=1):
bn_kwargs = {
'param': [dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0)],
'batch_norm_param': dict(use_global_stats=True),
}
scale_kwargs = {
'bias_term': True,
'param': [dict(lr_mult=lr, decay_mult=0), dict(lr_mult=lr, decay_mult=0)],
}
power_kwargs = {'power': 1, 'scale': -1.0, 'shift': 0}
conv_kwargs = {
'param': [dict(lr_mult=lr, decay_mult=decay)],
'weight_filler': dict(type='xavier'),
'bias_term': False,
}
start_layer = from_layer
# 1x1 convLayer
if use_reduced_layer:
name = "{}/reduced/conv".format(out_layer)
net[name] = L.Convolution(net[start_layer], num_output=reduced_layers[0], \
kernel_size=1, pad=0, stride=1, **conv_kwargs)
start_layer = name
name = "{}/reduced/bn".format(out_layer)
net[name] = L.BatchNorm(net[start_layer], in_place=True, **bn_kwargs)
start_layer = name
name = "{}/reduced/scale".format(out_layer)
net[name] = L.Scale(net[start_layer], in_place=True, **scale_kwargs)
start_layer = name
name = "{}/reduced/relu".format(out_layer)
net[name] = L.ReLU(net[start_layer], in_place=True)
start_layer = name
# 3x3 convLayer
if use_reduced_layer:
name = "{}/inter/conv".format(out_layer)
net[name] = L.Convolution(net[start_layer], num_output=reduced_layers[1], \
kernel_size=3, pad=1, stride=1, **conv_kwargs)
start_layer = name
name = "{}/inter/bn".format(out_layer)
net[name] = L.BatchNorm(net[start_layer], in_place=False, **bn_kwargs)
start_layer = name
neg_name = "{}/inter/neg".format(out_layer)
net[neg_name] = L.Power(net[start_layer], **power_kwargs)
name = "{}/inter/concat".format(out_layer)
net[name] = L.Concat(net[start_layer], net[neg_name], axis=1)
start_layer = name
name = "{}/inter/scale".format(out_layer)
net[name] = L.Scale(net[start_layer], in_place=True, **scale_kwargs)
start_layer = name
name = "{}/inter/relu".format(out_layer)
net[name] = L.ReLU(net[start_layer], in_place=True)
start_layer = name
else:
name = "{}/conv".format(out_layer)
net[name] = L.Convolution(net[start_layer], num_output=channels, \
kernel_size=3, pad=1, stride=1, **conv_kwargs)
start_layer = name
name = "{}/bn".format(out_layer)
net[name] = L.BatchNorm(net[start_layer], in_place=False, **bn_kwargs)
start_layer = name
neg_name = "{}/neg".format(out_layer)
net[neg_name] = L.Power(net[start_layer], **power_kwargs)
name = "{}/concat".format(out_layer)
net[name] = L.Concat(net[start_layer], net[neg_name], axis=1)
start_layer = name
name = "{}/scale".format(out_layer)
net[name] = L.Scale(net[start_layer], in_place=True, **scale_kwargs)
start_layer = name
name = "{}/relu".format(out_layer)
net[name] = L.ReLU(net[start_layer], in_place=True)
start_layer = name
# 1x1
if use_reduced_layer:
name = "{}/out/conv".format(out_layer)
net[name] = L.Convolution(net[start_layer], num_output=reduced_layers[2], \
kernel_size=1, pad=0, stride=1, **conv_kwargs)
start_layer = name
name = "{}/out/bn".format(out_layer)
net[name] = L.BatchNorm(net[start_layer], in_place=True, **bn_kwargs)
start_layer = name
name = "{}/out/scale".format(out_layer)
net[name] = L.Scale(net[start_layer], in_place=True, **scale_kwargs)
start_layer = name
name = "{}/out/relu".format(out_layer)
net[name] = L.ReLU(net[start_layer], in_place=True)
start_layer = name
return net
def smCReLULayer_NBN(net, from_layer, out_layer, channels=32, use_reduced_layer=False, reduced_layers=[], \
lr=1, decay=1):
bn_kwargs = {
'param': [dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0)],
'batch_norm_param': dict(use_global_stats=True),
}
scale_kwargs = {
'bias_term': True,
'param': [dict(lr_mult=lr, decay_mult=0), dict(lr_mult=lr, decay_mult=0)],
}
power_kwargs = {'power': 1, 'scale': -1.0, 'shift': 0}
conv_kwargs = {
'param': [dict(lr_mult=lr, decay_mult=decay), dict(lr_mult=2*lr, decay_mult=0)],
'weight_filler': dict(type='xavier'),
'bias_filler': dict(type='constant', value=0)
}
conv_nb_kwargs = {
'param': [dict(lr_mult=lr, decay_mult=decay)],
'weight_filler': dict(type='xavier'),
'bias_term': False,
}
start_layer = from_layer
# 1x1 convLayer
if use_reduced_layer:
name = "{}/reduced/conv".format(out_layer)
net[name] = L.Convolution(net[start_layer], num_output=reduced_layers[0], \
kernel_size=1, pad=0, stride=1, **conv_kwargs)
start_layer = name
name = "{}/reduced/relu".format(out_layer)
net[name] = L.ReLU(net[start_layer], in_place=True)
start_layer = name
# 3x3 convLayer
if use_reduced_layer:
name = "{}/inter/conv".format(out_layer)
net[name] = L.Convolution(net[start_layer], num_output=reduced_layers[1], \
kernel_size=3, pad=1, stride=1, **conv_nb_kwargs)
start_layer = name
name = "{}/inter/bn".format(out_layer)
net[name] = L.BatchNorm(net[start_layer], in_place=False, **bn_kwargs)
start_layer = name
neg_name = "{}/inter/neg".format(out_layer)
net[neg_name] = L.Power(net[start_layer], **power_kwargs)
name = "{}/inter/concat".format(out_layer)
net[name] = L.Concat(net[start_layer], net[neg_name], axis=1)
start_layer = name
name = "{}/inter/scale".format(out_layer)
net[name] = L.Scale(net[start_layer], in_place=True, **scale_kwargs)
start_layer = name
name = "{}/inter/relu".format(out_layer)
net[name] = L.ReLU(net[start_layer], in_place=True)
start_layer = name
else:
name = "{}/conv".format(out_layer)
net[name] = L.Convolution(net[start_layer], num_output=channels, \
kernel_size=3, pad=1, stride=1, **conv_nb_kwargs)
start_layer = name
name = "{}/bn".format(out_layer)
net[name] = L.BatchNorm(net[start_layer], in_place=False, **bn_kwargs)
start_layer = name
neg_name = "{}/neg".format(out_layer)
net[neg_name] = L.Power(net[start_layer], **power_kwargs)
name = "{}/concat".format(out_layer)
net[name] = L.Concat(net[start_layer], net[neg_name], axis=1)
start_layer = name
name = "{}/scale".format(out_layer)
net[name] = L.Scale(net[start_layer], in_place=True, **scale_kwargs)
start_layer = name
name = "{}/relu".format(out_layer)
net[name] = L.ReLU(net[start_layer], in_place=True)
start_layer = name
# 1x1
if use_reduced_layer:
name = "{}/out/conv".format(out_layer)
net[name] = L.Convolution(net[start_layer], num_output=reduced_layers[2], \
kernel_size=1, pad=0, stride=1, **conv_kwargs)
start_layer = name
name = "{}/out/relu".format(out_layer)
net[name] = L.ReLU(net[start_layer], in_place=True)
start_layer = name
return net
def mCReLULayer(net, from_layer, out_layer, reduced_channels=24, \
inter_channels=24, output_channels=48, lr=1, decay=1, \
use_prior_bn=True, cross_stage=False, has_pool=False):
bn_kwargs = {
'param': [dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0)],
'batch_norm_param': dict(use_global_stats=True),
}
scale_kwargs = {
'bias_term': True,
'param': [dict(lr_mult=lr, decay_mult=0), dict(lr_mult=lr, decay_mult=0)],
}
power_kwargs = {'power': 1, 'scale': -1.0, 'shift': 0}
input_kwargs = {'power': 1, 'scale': 1, 'shift': 0}
conv_kwargs = {
'param': [dict(lr_mult=lr, decay_mult=decay), dict(lr_mult=2*lr, decay_mult=0)],
'weight_filler': dict(type='xavier'),
'bias_filler': dict(type='constant', value=0)
}
eltwise_kwargs = {'operation': 1, 'coeff': [1, 1]}
# conv/1: bn/scale/relu/conv
start_layer = from_layer
if use_prior_bn:
layer_name = "{}/1/bn".format(out_layer)
name = "{}/1/pre".format(out_layer)
net[name] = L.BatchNorm(net[start_layer], name=layer_name, in_place=False, **bn_kwargs)
start_layer = name
layer_name = "{}/1/bn_scale".format(out_layer)
name = "{}/1/bn_scale".format(out_layer)
net[name] = L.Scale(net[start_layer], name=layer_name, in_place=True, **scale_kwargs)
start_layer = name
layer_name = "{}/1/relu".format(out_layer)
name = "{}/1/relu".format(out_layer)
net[name] = L.ReLU(net[start_layer], name=layer_name, in_place=True)
start_layer = name
layer_name = "{}/1/conv".format(out_layer)
name = "{}/1".format(out_layer)
if has_pool:
stride = 2
else:
stride = 1
net[name] = L.Convolution(net[start_layer], name=layer_name, num_output=reduced_channels, \
kernel_size=1, pad=0, stride=stride, **conv_kwargs)
start_layer = name
# conv/2: bn/scale/relu/conv
layer_name = "{}/2/bn".format(out_layer)
name = "{}/2/pre".format(out_layer)
net[name] = L.BatchNorm(net[start_layer], name=layer_name, in_place=False, **bn_kwargs)
start_layer = name
layer_name = "{}/2/bn_scale".format(out_layer)
name = "{}/2/bn_scale".format(out_layer)
net[name] = L.Scale(net[start_layer], name=layer_name, in_place=True, **scale_kwargs)
start_layer = name
layer_name = "{}/2/relu".format(out_layer)
name = "{}/2/relu".format(out_layer)
net[name] = L.ReLU(net[start_layer], name=layer_name, in_place=True)
start_layer = name
layer_name = "{}/2/conv".format(out_layer)
name = "{}/2".format(out_layer)
net[name] = L.Convolution(net[start_layer], name=layer_name, num_output=inter_channels, \
kernel_size=3, pad=1, stride=1, **conv_kwargs)
start_layer = name
# conv/3: bn/neg/concat/scale/relu/conv
feaLayers = []
bn_layer = "{}/3/bn".format(out_layer)
bn_name = "{}/3/pre".format(out_layer)
net[bn_name] = L.BatchNorm(net[start_layer], name=bn_layer, in_place=False, **bn_kwargs)
feaLayers.append(net[bn_name])
start_layer = bn_name
neg_layer = "{}/3/neg".format(out_layer)
neg_name = "{}/3/neg".format(out_layer)
net[neg_name] = L.Power(net[start_layer], name=neg_layer, **power_kwargs)
feaLayers.append(net[neg_name])
concat_layer = "{}/3/concat".format(out_layer)
concat_name = "{}/3/preAct".format(out_layer)
net[concat_name] = L.Concat(*feaLayers, name=concat_layer, axis=1)
layer_name = "{}/3/scale".format(out_layer)
name = "{}/3/scale".format(out_layer)
net[name] = L.Scale(net[concat_name], name=layer_name, in_place=True, **scale_kwargs)
start_layer = name
layer_name = "{}/3/relu".format(out_layer)
name = "{}/3/relu".format(out_layer)
net[name] = L.ReLU(net[start_layer], name=layer_name, in_place=True)
start_layer = name
layer_name = "{}/3/conv".format(out_layer)
name = "{}/3".format(out_layer)
net[name] = L.Convolution(net[start_layer], name=layer_name, num_output=output_channels, \
kernel_size=1, pad=0, stride=1, **conv_kwargs)
start_layer = name
mlayers = []
mlayers.append(net[name])
# proj or input
if cross_stage:
layer_name = "{}/proj".format(out_layer)
name = "{}/proj".format(out_layer)
if has_pool:
start_layer = "{}/1/pre".format(out_layer)
stride = 2
else:
start_layer = from_layer
stride = 1
net[name] = L.Convolution(net[start_layer], name=layer_name, num_output=output_channels, \
kernel_size=1, pad=0, stride=stride, **conv_kwargs)
mlayers.append(net[name])
else:
layer_name = "{}/input".format(out_layer)
name = "{}/input".format(out_layer)
start_layer = from_layer
net[name] = L.Power(net[start_layer], name=layer_name, **input_kwargs)
mlayers.append(net[name])
# eltwise
layer_name = out_layer
name = out_layer
net[name] = L.Eltwise(*mlayers, name=layer_name, **eltwise_kwargs)
return net
def ResInceptionLayer(net, from_layer, out_layer, cross_stage=False, channels_1=64, \
channels_3=[48,128], channels_5=[24,48,128],channels_pool=128, \
channels_output=256, lr=1, decay=1, out_bn=False):
assert len(channels_3) == 2
assert len(channels_5) == 3
bn_kwargs = {
'param': [dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0)],
'batch_norm_param': dict(use_global_stats=True),
}
scale_kwargs = {
'bias_term': True,
'param': [dict(lr_mult=lr, decay_mult=0), dict(lr_mult=lr, decay_mult=0)],
}
input_kwargs = {'power': 1, 'scale': 1, 'shift': 0}
conv_kwargs = {
'param': [dict(lr_mult=lr, decay_mult=decay)],
'weight_filler': dict(type='xavier'),
'bias_term': False,
}
convbias_kwargs = {
'param': [dict(lr_mult=lr, decay_mult=decay), dict(lr_mult=2*lr, decay_mult=0)],
'weight_filler': dict(type='xavier'),
'bias_filler': dict(type='constant', value=0)
}
eltwise_kwargs = {'operation': 1, 'coeff': [1, 1]}
start_layer = from_layer
if cross_stage:
stride = 2
else:
stride = 1
# pre-stage: bn/scale/relu
layer_name = "{}/incep/bn".format(out_layer)
name = "{}/incep/pre".format(out_layer)
net[name] = L.BatchNorm(net[start_layer], name=layer_name, in_place=False, **bn_kwargs)
start_layer = name
layer_name = "{}/incep/bn_scale".format(out_layer)
name = "{}/incep/bn_scale".format(out_layer)
net[name] = L.Scale(net[start_layer], name=layer_name, in_place=True, **scale_kwargs)
start_layer = name
layer_name = "{}/incep/relu".format(out_layer)
name = "{}/incep/relu".format(out_layer)
net[name] = L.ReLU(net[start_layer], name=layer_name, in_place=True)
fea_layer = name
mlayers = []
# conv-1x1
layer_name = "{}/incep/0/conv".format(out_layer)
name = "{}/incep/0".format(out_layer)
net[name] = L.Convolution(net[fea_layer], name=layer_name, num_output=channels_1, \
kernel_size=1, pad=0, stride=stride, **conv_kwargs)
start_layer = name
layer_name = "{}/incep/0/bn".format(out_layer)
name = "{}/incep/0/bn".format(out_layer)
net[name] = L.BatchNorm(net[start_layer], name=layer_name, in_place=True, **bn_kwargs)
start_layer = name
layer_name = "{}/incep/0/bn_scale".format(out_layer)
name = "{}/incep/0/bn_scale".format(out_layer)
net[name] = L.Scale(net[start_layer], name=layer_name, in_place=True, **scale_kwargs)
start_layer = name
layer_name = "{}/incep/0/relu".format(out_layer)
name = "{}/incep/0/relu".format(out_layer)
net[name] = L.ReLU(net[start_layer], name=layer_name, in_place=True)
mlayers.append(net[name])
# conv-3x3
layer_name = "{}/incep/1_reduce/conv".format(out_layer)
name = "{}/incep/1_reduce".format(out_layer)
net[name] = L.Convolution(net[fea_layer], name=layer_name, num_output=channels_3[0], \
kernel_size=1, pad=0, stride=stride, **conv_kwargs)
start_layer = name
layer_name = "{}/incep/1_reduce/bn".format(out_layer)
name = "{}/incep/1_reduce/bn".format(out_layer)
net[name] = L.BatchNorm(net[start_layer], name=layer_name, in_place=True, **bn_kwargs)
start_layer = name
layer_name = "{}/incep/1_reduce/bn_scale".format(out_layer)
name = "{}/incep/1_reduce/bn_scale".format(out_layer)
net[name] = L.Scale(net[start_layer], name=layer_name, in_place=True, **scale_kwargs)
start_layer = name
layer_name = "{}/incep/1_reduce/relu".format(out_layer)
name = "{}/incep/1_reduce/relu".format(out_layer)
net[name] = L.ReLU(net[start_layer], name=layer_name, in_place=True)
start_layer = name
layer_name = "{}/incep/1_0/conv".format(out_layer)
name = "{}/incep/1_0".format(out_layer)
net[name] = L.Convolution(net[start_layer], name=layer_name, num_output=channels_3[1], \
kernel_size=3, pad=1, stride=1, **conv_kwargs)
start_layer = name
layer_name = "{}/incep/1_0/bn".format(out_layer)
name = "{}/incep/1_0/bn".format(out_layer)
net[name] = L.BatchNorm(net[start_layer], name=layer_name, in_place=True, **bn_kwargs)
start_layer = name
layer_name = "{}/incep/1_0/bn_scale".format(out_layer)
name = "{}/incep/1_0/bn_scale".format(out_layer)
net[name] = L.Scale(net[start_layer], name=layer_name, in_place=True, **scale_kwargs)
start_layer = name
layer_name = "{}/incep/1_0/relu".format(out_layer)
name = "{}/incep/1_0/relu".format(out_layer)
net[name] = L.ReLU(net[start_layer], name=layer_name, in_place=True)
mlayers.append(net[name])
# conv-5x5
layer_name = "{}/incep/2_reduce/conv".format(out_layer)
name = "{}/incep/2_reduce".format(out_layer)
net[name] = L.Convolution(net[fea_layer], name=layer_name, num_output=channels_5[0], \
kernel_size=1, pad=0, stride=stride, **conv_kwargs)
start_layer = name
layer_name = "{}/incep/2_reduce/bn".format(out_layer)
name = "{}/incep/2_reduce/bn".format(out_layer)
net[name] = L.BatchNorm(net[start_layer], name=layer_name, in_place=True, **bn_kwargs)
start_layer = name
layer_name = "{}/incep/2_reduce/bn_scale".format(out_layer)
name = "{}/incep/2_reduce/bn_scale".format(out_layer)
net[name] = L.Scale(net[start_layer], name=layer_name, in_place=True, **scale_kwargs)
start_layer = name
layer_name = "{}/incep/2_reduce/relu".format(out_layer)
name = "{}/incep/2_reduce/relu".format(out_layer)
net[name] = L.ReLU(net[start_layer], name=layer_name, in_place=True)
start_layer = name
layer_name = "{}/incep/2_0/conv".format(out_layer)
name = "{}/incep/2_0".format(out_layer)
net[name] = L.Convolution(net[start_layer], name=layer_name, num_output=channels_5[1], \
kernel_size=3, pad=1, stride=1, **conv_kwargs)
start_layer = name
layer_name = "{}/incep/2_0/bn".format(out_layer)
name = "{}/incep/2_0/bn".format(out_layer)
net[name] = L.BatchNorm(net[start_layer], name=layer_name, in_place=True, **bn_kwargs)
start_layer = name
layer_name = "{}/incep/2_0/bn_scale".format(out_layer)
name = "{}/incep/2_0/bn_scale".format(out_layer)
net[name] = L.Scale(net[start_layer], name=layer_name, in_place=True, **scale_kwargs)
start_layer = name
layer_name = "{}/incep/2_0/relu".format(out_layer)
name = "{}/incep/2_0/relu".format(out_layer)
net[name] = L.ReLU(net[start_layer], name=layer_name, in_place=True)
start_layer = name
layer_name = "{}/incep/2_1/conv".format(out_layer)
name = "{}/incep/2_1".format(out_layer)
net[name] = L.Convolution(net[start_layer], name=layer_name, num_output=channels_5[2], \
kernel_size=3, pad=1, stride=1, **conv_kwargs)
start_layer = name
layer_name = "{}/incep/2_1/bn".format(out_layer)
name = "{}/incep/2_1/bn".format(out_layer)
net[name] = L.BatchNorm(net[start_layer], name=layer_name, in_place=True, **bn_kwargs)
start_layer = name
layer_name = "{}/incep/2_1/bn_scale".format(out_layer)
name = "{}/incep/2_1/bn_scale".format(out_layer)
net[name] = L.Scale(net[start_layer], name=layer_name, in_place=True, **scale_kwargs)
start_layer = name
layer_name = "{}/incep/2_1/relu".format(out_layer)
name = "{}/incep/2_1/relu".format(out_layer)
net[name] = L.ReLU(net[start_layer], name=layer_name, in_place=True)
mlayers.append(net[name])
# pool
if cross_stage:
layer_name = "{}/incep/pool".format(out_layer)
name = "{}/incep/pool".format(out_layer)
net[name] = L.Pooling(net[fea_layer], pool=P.Pooling.MAX, kernel_size=3, stride=2)
start_layer = name
layer_name = "{}/incep/poolproj/conv".format(out_layer)
name = "{}/incep/poolproj".format(out_layer)
net[name] = L.Convolution(net[start_layer], name=layer_name, num_output=channels_pool, \
kernel_size=1, pad=0, stride=1, **conv_kwargs)
start_layer = name
layer_name = "{}/incep/poolproj/bn".format(out_layer)
name = "{}/incep/poolproj/bn".format(out_layer)
net[name] = L.BatchNorm(net[start_layer], name=layer_name, in_place=True, **bn_kwargs)
start_layer = name
layer_name = "{}/incep/poolproj/bn_scale".format(out_layer)
name = "{}/incep/poolproj/bn_scale".format(out_layer)
net[name] = L.Scale(net[start_layer], name=layer_name, in_place=True, **scale_kwargs)
start_layer = name
layer_name = "{}/incep/poolproj/relu".format(out_layer)
name = "{}/incep/poolproj/relu".format(out_layer)
net[name] = L.ReLU(net[start_layer], name=layer_name, in_place=True)
mlayers.append(net[name])
# incep
layer_name = "{}/incep".format(out_layer)
name = "{}/incep".format(out_layer)
net[name] = L.Concat(*mlayers, name=layer_name, axis=1)
start_layer = name
# out-conv
scLayers = []
if not out_bn:
layer_name = "{}/out/conv".format(out_layer)
name = "{}/out".format(out_layer)
net[name] = L.Convolution(net[start_layer], name=layer_name, num_output=channels_output, \
kernel_size=1, pad=0, stride=1, **convbias_kwargs)
scLayers.append(net[name])
else:
layer_name = "{}/out/conv".format(out_layer)
name = "{}/out".format(out_layer)
net[name] = L.Convolution(net[start_layer], name=layer_name, num_output=channels_output, \
kernel_size=1, pad=0, stride=1, **conv_kwargs)
start_layer = name
layer_name = "{}/out/bn".format(out_layer)
name = "{}/out/bn".format(out_layer)
net[name] = L.BatchNorm(net[start_layer], name=layer_name, in_place=True, **bn_kwargs)
start_layer = name
layer_name = "{}/out/bn_scale".format(out_layer)
name = "{}/out/bn_scale".format(out_layer)
net[name] = L.Scale(net[start_layer], name=layer_name, in_place=True, **scale_kwargs)
scLayers.append(net[name])
# proj or input
if cross_stage:
layer_name = "{}/proj".format(out_layer)
name = "{}/proj".format(out_layer)
net[name] = L.Convolution(net[from_layer], name=layer_name, num_output=channels_output, \
kernel_size=1, pad=0, stride=2, **convbias_kwargs)
scLayers.append(net[name])
else:
layer_name = "{}/input".format(out_layer)
name = "{}/input".format(out_layer)
net[name] = L.Power(net[from_layer], name=layer_name, **input_kwargs)
scLayers.append(net[name])
# Eltwise
layer_name = out_layer
name = out_layer
net[name] = L.Eltwise(*scLayers, name=layer_name, **eltwise_kwargs)
return net
def pva_convHeader(net, from_layer, out_layer, use_pool=True, lr=1, decay=1):
bn_kwargs = {
'param': [dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0)],
'batch_norm_param': dict(use_global_stats=True),
}
scale_kwargs = {
'bias_term': True,
'param': [dict(lr_mult=lr, decay_mult=0), dict(lr_mult=lr, decay_mult=0)],
}
power_kwargs = {'power': 1, 'scale': -1.0, 'shift': 0}
conv_kwargs = {
'param': [dict(lr_mult=lr, decay_mult=decay)],
'weight_filler': dict(type='xavier'),
'bias_term': False,
}
layer_name = "{}/conv".format(out_layer)
name = "{}/conv".format(out_layer)
net[name] = L.Convolution(net[from_layer], name=layer_name, num_output=16, \
kernel_size=7, pad=3, stride=2, **conv_kwargs)
start_layer = name
layer_name = "{}/bn".format(out_layer)
name = "{}/bn".format(out_layer)
net[name] = L.BatchNorm(net[start_layer], name=layer_name, in_place=True, **bn_kwargs)
feaLayers = []
feaLayers.append(net[name])
start_layer = name
neg_layer = "{}/neg".format(out_layer)
neg_name = "{}/neg".format(out_layer)
net[neg_name] = L.Power(net[start_layer], name=neg_layer, **power_kwargs)
feaLayers.append(net[neg_name])
concat_layer = "{}/concat".format(out_layer)
concat_name = out_layer
net[concat_name] = L.Concat(*feaLayers, name=concat_layer, axis=1)
start_layer = concat_name
layer_name = "{}/scale".format(out_layer)
name = "{}/scale".format(out_layer)
net[name] = L.Scale(net[start_layer], name=layer_name, in_place=True, **scale_kwargs)
start_layer = name
layer_name = "{}/relu".format(out_layer)
name = "{}/relu".format(out_layer)
net[name] = L.ReLU(net[start_layer], name=layer_name, in_place=True)
start_layer = name
# pool
if use_pool:
layer_name = "pool1"
name = "pool1"
net[name] = L.Pooling(net[start_layer], pool=P.Pooling.MAX, kernel_size=3, stride=2)
return net
def PvaNet(net, from_layer="data", lr=1, decay=1):
# input Layer
pva_convHeader(net, from_layer, "conv1_1", use_pool=True, lr=lr, decay=decay)
# conv2_1
mCReLULayer(net, "pool1", "conv2_1", reduced_channels=24, \
inter_channels=24, output_channels=64, lr=lr, decay=decay, \
use_prior_bn=False, cross_stage=True, has_pool=False)
# conv2_2
mCReLULayer(net, "conv2_1", "conv2_2", reduced_channels=24, \
inter_channels=24, output_channels=64, lr=lr, decay=decay, \
use_prior_bn=True, cross_stage=False, has_pool=False)
# conv2_3
mCReLULayer(net, "conv2_2", "conv2_3", reduced_channels=24, \
inter_channels=24, output_channels=64, lr=lr, decay=decay, \
use_prior_bn=True, cross_stage=False, has_pool=False)
# conv3_1
mCReLULayer(net, "conv2_3", "conv3_1", reduced_channels=48, \
inter_channels=48, output_channels=128, lr=lr, decay=decay, \
use_prior_bn=True, cross_stage=True, has_pool=True)
# conv3_2
mCReLULayer(net, "conv3_1", "conv3_2", reduced_channels=48, \
inter_channels=48, output_channels=128, lr=lr, decay=decay, \
use_prior_bn=True, cross_stage=False, has_pool=False)
# conv3_3
mCReLULayer(net, "conv3_2", "conv3_3", reduced_channels=48, \
inter_channels=48, output_channels=128, lr=lr, decay=decay, \
use_prior_bn=True, cross_stage=False, has_pool=False)
# conv3_4
mCReLULayer(net, "conv3_3", "conv3_4", reduced_channels=48, \
inter_channels=48, output_channels=128, lr=lr, decay=decay, \
use_prior_bn=True, cross_stage=False, has_pool=False)
# conv4_1
ResInceptionLayer(net, "conv3_4", "conv4_1", cross_stage=True, channels_1=64, \
channels_3=[48,128], channels_5=[24,48,48],channels_pool=128, \
channels_output=256, lr=lr, decay=decay)
# conv4_2
ResInceptionLayer(net, "conv4_1", "conv4_2", cross_stage=False, channels_1=64, \
channels_3=[64,128], channels_5=[24,48,48], \
channels_output=256, lr=lr, decay=decay)
# conv4_3
ResInceptionLayer(net, "conv4_2", "conv4_3", cross_stage=False, channels_1=64, \
channels_3=[64,128], channels_5=[24,48,48], \
channels_output=256, lr=lr, decay=decay)
# conv4_4
ResInceptionLayer(net, "conv4_3", "conv4_4", cross_stage=False, channels_1=64, \
channels_3=[64,128], channels_5=[24,48,48], \
channels_output=256, lr=lr, decay=decay)
# conv5_1
ResInceptionLayer(net, "conv4_4", "conv5_1", cross_stage=True, channels_1=64, \
channels_3=[96,192], channels_5=[32,64,64],channels_pool=128, \
channels_output=384, lr=lr, decay=decay)
# conv5_2
ResInceptionLayer(net, "conv5_1", "conv5_2", cross_stage=False, channels_1=64, \
channels_3=[96,192], channels_5=[32,64,64], \
channels_output=384, lr=lr, decay=decay)
# conv5_3
ResInceptionLayer(net, "conv5_2", "conv5_3", cross_stage=False, channels_1=64, \
channels_3=[96,192], channels_5=[32,64,64], \
channels_output=384, lr=lr, decay=decay)
# conv5_4
ResInceptionLayer(net, "conv5_3", "conv5_4", cross_stage=False, channels_1=64, \
channels_3=[96,192], channels_5=[32,64,64], \
channels_output=384, lr=lr, decay=decay, out_bn=True)
# build last bn/scale/relu
bn_kwargs = {
'param': [dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0)],
'batch_norm_param': dict(use_global_stats=True),
}
scale_kwargs = {
'bias_term': True,
'param': [dict(lr_mult=lr, decay_mult=0), dict(lr_mult=lr, decay_mult=0)],
}
start_layer = net.keys()[-1]
layer_name = "conv5_4/last_bn"
name = layer_name
net[name] = L.BatchNorm(net[start_layer], name=layer_name, in_place=True, **bn_kwargs)
start_layer = name
layer_name = "conv5_4/last_bn_scale"
name = layer_name
net[name] = L.Scale(net[start_layer], name=layer_name, in_place=True, **scale_kwargs)
start_layer = name
layer_name = "conv5_4/last_relu"
name = layer_name
net[name] = L.ReLU(net[start_layer], name=layer_name, in_place=True)
return net
| 46.07413
| 111
| 0.629617
| 4,375
| 30,455
| 4.121143
| 0.031314
| 0.163727
| 0.122684
| 0.096839
| 0.917471
| 0.892845
| 0.860788
| 0.827953
| 0.802163
| 0.773378
| 0
| 0.027698
| 0.215203
| 30,455
| 660
| 112
| 46.143939
| 0.726664
| 0.01494
| 0
| 0.654424
| 0
| 0
| 0.097196
| 0.016722
| 0
| 0
| 0
| 0
| 0.003339
| 1
| 0.010017
| false
| 0
| 0.010017
| 0
| 0.03005
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
61f3e40393e4b4ac252ffdd140e4f37b9c407899
| 44
|
py
|
Python
|
tests/test_base.py
|
djvaroli/samsung_oct
|
83924a36d18a56b6cdaadffaf47a9218c7084264
|
[
"MIT"
] | 2
|
2021-07-04T16:34:08.000Z
|
2021-07-07T23:55:18.000Z
|
tests/test_base.py
|
janhavi-giri/samsung_oct
|
83924a36d18a56b6cdaadffaf47a9218c7084264
|
[
"MIT"
] | null | null | null |
tests/test_base.py
|
janhavi-giri/samsung_oct
|
83924a36d18a56b6cdaadffaf47a9218c7084264
|
[
"MIT"
] | 3
|
2021-07-10T01:14:00.000Z
|
2021-09-03T04:22:28.000Z
|
def test_always_true():
assert 1 == 1
| 8.8
| 23
| 0.613636
| 7
| 44
| 3.571429
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0625
| 0.272727
| 44
| 4
| 24
| 11
| 0.71875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
61ffc61776967eaabe37ad03a4601f921af591c1
| 34,891
|
py
|
Python
|
tasks-deploy/zip-crypt/generate.py
|
chankruze/qctf-school-2018
|
1e732cf264ee0a94bc2fc1fd8cf3a20660d57605
|
[
"MIT"
] | null | null | null |
tasks-deploy/zip-crypt/generate.py
|
chankruze/qctf-school-2018
|
1e732cf264ee0a94bc2fc1fd8cf3a20660d57605
|
[
"MIT"
] | null | null | null |
tasks-deploy/zip-crypt/generate.py
|
chankruze/qctf-school-2018
|
1e732cf264ee0a94bc2fc1fd8cf3a20660d57605
|
[
"MIT"
] | null | null | null |
TITLE = 'Заброшенный архив'
STATEMENT = '''
Рассказывают, что старое полуразваленное здание возле пруда когда-то было не просто служебным помещением, а использовалось как самый настоящий архив материалов для служебного пользования.
Порядок приёма документов был полностью автоматизирован: вся входящая корреспонденция сперва сжималась, а затем надёжно шифровалась секретным ключом. Это позволяло сохранять в тайне все материалы, не занимая при этом много места.
Как только стало ясно, что продолжать деятельность АЭС не представляется возможным, все архивы были вывезены.
Всё что сохранилось — [система шифрования](https://zip-crypt.contest.qctf.ru/{token}/) и [часть её
исходного кода](/static/files/27a5v3gnz3/utils.py).
'''
tokens = ['HzQt8U0ytJ9MoDkJQSAG42BHBZFy1KzP4-PXlIYmQ0RNyTJ0Zr8SwNsPJvrrkev1', 'HzQt8U0ytJ9MoDkJQSAG42lGz-LgebFxeqrkeI-7sFWhkoWwPHss53EOu3o0BhrA', 'HzQt8U0ytJ9MoDkJQSAG47SnvW3mBiu9bjKcsRDa2Htf48TbgkAoQ1LcSrwQ-pVa', 'HzQt8U0ytJ9MoDkJQSAG44hHu-Lm609I0dO3VF34Y4IWRB0mRfyG21FtoZ5LDG4C', 'HzQt8U0ytJ9MoDkJQSAG46P_4_EyLjLe8iymBrHfOvK7jwEALDeEB2ykdmMEu4Q5', 'HzQt8U0ytJ9MoDkJQSAG443N_c4z-G41mQ6Sn6OaQfNsU9Bhy_Ge0iKvztuCuCIl', 'HzQt8U0ytJ9MoDkJQSAG4w0HzItpDBX62nZxtz1TewUPJylpzYMP63vRRJxxN2S8', 'HzQt8U0ytJ9MoDkJQSAG4xFY6xASSkXzP57DwePGCmvOqs1WxwPX0HcuAZeqYKPr', 'HzQt8U0ytJ9MoDkJQSAG41hlUz5-UltJoJYWophxvGXXDlFlIvcl8_P3W2VG7dFO', 'HzQt8U0ytJ9MoDkJQSAG4-z-WrI0SoS51jJ-7BJ66rBbciR1Wk0aRAoqImMDPhx6', 'HzQt8U0ytJ9MoDkJQSAG40VC_Z8feUO_z-nDKeZiTT2szDuFrCPGWCfHVO2ck9gP', 'HzQt8U0ytJ9MoDkJQSAG4yp60a1_sB0nzpWWGmP-L72Uwn4pH8JnFCMRQGJ36rAu', 'HzQt8U0ytJ9MoDkJQSAG4yr7mCrlb2eeMDY7wExbKZDJLhN8n9rmHteLxoLPOmpW', 'HzQt8U0ytJ9MoDkJQSAG4yuy1dj7FMcfWR0UrSUT7ZPkIz3Pr4teTqNosx2a4xqT', 'HzQt8U0ytJ9MoDkJQSAG46d2xKrpHJJdkMsdpSpFpBkm0JP6P1F7Mg8dntaOzE9C', 'HzQt8U0ytJ9MoDkJQSAG4-EZgCg5UGJsxN9-f720FpzPxwhTUY4dkO9yQJX-kytt', 'HzQt8U0ytJ9MoDkJQSAG46j4lGgCI-7cpphVGWA5uBvofSS2o6snVFTlcjGehzU0', 'HzQt8U0ytJ9MoDkJQSAG413Z6vQugJDuo1enORqUt3jSZ4ss5-bo_uu4cUXssgsT', 'HzQt8U0ytJ9MoDkJQSAG4wsPXIamYX9nbcwsoGjodcbWfXRAVbpevdmp27rY-6Bl', 'HzQt8U0ytJ9MoDkJQSAG44nE0MK8bVfVO03rVJa9Qdu49JQtBnT7FwwPi4mIGok2', 'HzQt8U0ytJ9MoDkJQSAG41n9VcewDU54tlkt9njfXu3CEEGg9WuWpnuyMoxffXyD', 'HzQt8U0ytJ9MoDkJQSAG44nkdit6P9PEL1nkFWzfUqZkP7XpVMib2XVanx50AtIX', 'HzQt8U0ytJ9MoDkJQSAG416eTbwZzXwojKqm5MQ9FJ1FWtBrdVZAUe1GbhFpfZ5H', 'HzQt8U0ytJ9MoDkJQSAG4zMLfuyraiidlX7E1NnvSUh_vgiE9vJMhGEWKecIRn1S', 'HzQt8U0ytJ9MoDkJQSAG4-RFEuFvZie9SBzXHLtyv5HKk2ZCdXr7QRQxOKcau2lf', 'HzQt8U0ytJ9MoDkJQSAG40WXhIek0_OwT0uSBmlqCcphRfAwTOUtNMweEjv2e4b1', 'HzQt8U0ytJ9MoDkJQSAG4x1AZGXHLNj46yEiwB7M4wb_CWC_f3G0YdezdUzmwQ72', 'HzQt8U0ytJ9MoDkJQSAG45sv36tARax2XlznpMUzndDnVfFoQKSH3OHcvJ3dLJQg', 'HzQt8U0ytJ9MoDkJQSAG46BQawiCFA4bWcfBWtuobGMTyn7xjCZibOsgnK94Biou', 'HzQt8U0ytJ9MoDkJQSAG4yFr1NGOzq55RGfjIUFtCQ_5RYkD2-yqJH77aXBIxDE5', 'HzQt8U0ytJ9MoDkJQSAG4xV11e6YjiZE78Kp3wetUhvzYzaxMYCKoAPGrfTYqHAh', 'HzQt8U0ytJ9MoDkJQSAG4wk2OW4mV0ci_JBNswpR2Kl5NsekYnjQpxNAaQCnFqMi', 'HzQt8U0ytJ9MoDkJQSAG4xNP4S8n-NyZoyxYR4xP0atqtzwGv-6b9gwLKh5_f7hR', 'HzQt8U0ytJ9MoDkJQSAG442yW7fEhBcKEt6-kcTnQh3YINywtke9PaixebluAd0_', 'HzQt8U0ytJ9MoDkJQSAG46260-rAuN9YvCPc0b9SB3NNPki2nI--fTvhY3-NhrKM', 'HzQt8U0ytJ9MoDkJQSAG45y5sdeaYBu_MhqOjlFxAbLhqgtHLVR9xM0_Kq0QMU5L', 'HzQt8U0ytJ9MoDkJQSAG41iH7brfIJhGKTsl3O6kw81R0nYMV1JJ3GpwYOuAyYR8', 'HzQt8U0ytJ9MoDkJQSAG4-jMmsGAud5hQmEkHI7HCuQaBqqNZGw2oxlEe6KsoifC', 'HzQt8U0ytJ9MoDkJQSAG45w-lkQJ6I1Bogrla_YCVhXuw-UfKwaNXdVs__jglZUu', 'HzQt8U0ytJ9MoDkJQSAG4z7YaO0Ae4mKNU2B2QPq4tJx1viLXlyQMprfZ2B17aoc', 'HzQt8U0ytJ9MoDkJQSAG40KMEbeT0TQwhDI8zGtPwfhrX4WEC9cEqoFGgx5wtBIn', 'HzQt8U0ytJ9MoDkJQSAG40ZqndhuLEvULxmuIWDDUUDLQbzbGkDpDkiuH81IjXYr', 'HzQt8U0ytJ9MoDkJQSAG43AGYw0_tYAqC3IYHNsBtbsjP8g8ibPVEZnX6Hp1_6Ol', 'HzQt8U0ytJ9MoDkJQSAG420TDcZTwY1wej9rClq3ykPRFi37ag8js4rkw1k-07NC', 'HzQt8U0ytJ9MoDkJQSAG48aFsih9dFwsdbWKoE8jgU-zQgvsFGUqNygajMHNfRAu', 'HzQt8U0ytJ9MoDkJQSAG42adNJrIti7DAnJ6B956Nc9_VV71kTvFqP7eimnscTpt', 'HzQt8U0ytJ9MoDkJQSAG480Qm6o2FjU0_9ylZGBTRypvWZvhh1Xx0rL4Ku1eVSLH', 'HzQt8U0ytJ9MoDkJQSAG444o-KbvGp4-_OuHHgYt0-IvPvRBqylquvHyJAqIwz7r', 'HzQt8U0ytJ9MoDkJQSAG4w3CFGDS2o9_9VFJqI5Ygg78PqcYrzXor1hCSAtSCqKT', 'HzQt8U0ytJ9MoDkJQSAG488pkdGaeKyUVsAlLaXzgdDfrw6dZU-u6vZAX7wqmuyt', 'HzQt8U0ytJ9MoDkJQSAG49hcZTMa303EKUThOA7b8WhTmErThWNzSRUMl2gKM47Z', 'HzQt8U0ytJ9MoDkJQSAG4zOOg-mwUiCqaGIR-tTfWJkFSML3_h5upjfyPBhxTq_8', 'HzQt8U0ytJ9MoDkJQSAG4wrsjkiqeCUndWIqRo5DUB24E7dB07ZQa2n5J2R33YKQ', 'HzQt8U0ytJ9MoDkJQSAG44q41pEXdA4mbvPNjhdTa4mhMtdDJuc46VIywxg3ucH6', 'HzQt8U0ytJ9MoDkJQSAG46okAw8ilpywyAuOS9kncC_HvGoSQAC93zO5EmVPU8Le', 'HzQt8U0ytJ9MoDkJQSAG41tZu90qIis7GkZdP97cvmIblVgDlss_ij8dUIuBtsab', 'HzQt8U0ytJ9MoDkJQSAG44zFhlse7mghWcxWE4bOjFk7n6om71iQc2DLNWszOqhW', 'HzQt8U0ytJ9MoDkJQSAG49zdjAoXHLFGkPXjgxDCGXfk6mf5VWNMafUt_W4tuJBx', 'HzQt8U0ytJ9MoDkJQSAG4wlGUDnY8Xo9yLjKYGjrBXJU3qcNuXen9mvL-i7RQbmI', 'HzQt8U0ytJ9MoDkJQSAG478zgaRmkbrCU7Fyz9w94YNrJPXXiYyN7vX2fwqYk6tb', 'HzQt8U0ytJ9MoDkJQSAG4xI4r5bndU3EgY0mHHl0zYKLx0_0Jv11f2rCudLxPH9v', 'HzQt8U0ytJ9MoDkJQSAG4zE2oZuokkbcTx9C885AVVO5OUdSz92TvhAd8IBbG0LK', 'HzQt8U0ytJ9MoDkJQSAG44DP_vEhuAI5L6XXC99Y6-EbFwVrquJ-itBpACWxTugA', 'HzQt8U0ytJ9MoDkJQSAG410NVAiJ3GdsZGq2TYRVf9XITYG7TALbJN5JPvvHNKu-', 'HzQt8U0ytJ9MoDkJQSAG44vAPDi4azMQJWp6ys-o_6C2YdzoL5zrH1fLT7qyvD68', 'HzQt8U0ytJ9MoDkJQSAG42XEigsFtQGyRLTxAPJbbySQUHz6kEsl8mlLpqOiGFu4', 'HzQt8U0ytJ9MoDkJQSAG40vJoh_qd3u8tT1zAQ-yhdk5xW0KK6G5IZlyr6HVI3-Y', 'HzQt8U0ytJ9MoDkJQSAG491KMYK221Svec5MMq5Io4mWMs0AaMeoVnVfgnstQnBH', 'HzQt8U0ytJ9MoDkJQSAG44N4jvn33rBuKuggQkcJE8FrwM3rs8_HL6-Yzao0NT33', 'HzQt8U0ytJ9MoDkJQSAG41jEqGOCAaEUfOHoo6lv1G7U6_i7ckkppSJQO1mbziMw', 'HzQt8U0ytJ9MoDkJQSAG42h41CMQixbUJ4vmgno23STjZrOq0kpCxHhYbpIBz13G', 'HzQt8U0ytJ9MoDkJQSAG4zLaiRYtftiWxDgV_uGoemrYACJfDkgje00bQIrAjoJ5', 'HzQt8U0ytJ9MoDkJQSAG4_25AbQ0PHL1LSSr-PGSO2pZ-6WoMcpuU5S6-IWWqpXs', 'HzQt8U0ytJ9MoDkJQSAG45luMMeXSQcFnedleFHSa6Cwa7S-BkgwMfHE1Vg1VPdE', 'HzQt8U0ytJ9MoDkJQSAG49LxRT3guvF42jFnGJJNmrXw8zhNwv__0Ee5GD0Aq6R4', 'HzQt8U0ytJ9MoDkJQSAG44GdNM_kNi5NnVz-4xwyN8tYvrwFD9K75bekyIAKwAHu', 'HzQt8U0ytJ9MoDkJQSAG44pMlkz167ClN8V9AuR4AwPI_P_WbdJdPgG_ykj73EJg', 'HzQt8U0ytJ9MoDkJQSAG40qxE-GTKdYZTAsKxOFWDYAnN8UUsK3sVu39Ru5Wp4Is', 'HzQt8U0ytJ9MoDkJQSAG47hRdIZiF5w47RkUxmnhuSywGHGnMrY4GU9C8xnsh2bh', 'HzQt8U0ytJ9MoDkJQSAG42DXl5UFC3SJkYHMFmL6MbrI36QDYPvsAuRpDLOGQkVs', 'HzQt8U0ytJ9MoDkJQSAG46SMpLPbPNo_IA6KYUYqCNFhemKIgsaE4Do39GqxmZ40', 'HzQt8U0ytJ9MoDkJQSAG4xr323ugpjnnqKg_vZDKVyH5SthVg6SFnOVrmAbVi5NT', 'HzQt8U0ytJ9MoDkJQSAG44X8drIybJOvW0JrE9kZ98uAhbKVzP1iym-te8c8xZk6', 'HzQt8U0ytJ9MoDkJQSAG48VGe8wT7b-awBY_-rT0SChQC5NhsXys03FY7m4PkYZl', 'HzQt8U0ytJ9MoDkJQSAG46cT3TBcy5kUKhPwyoU0wtObtLytk51s-U0vcvEWgaUb', 'HzQt8U0ytJ9MoDkJQSAG4x9ZxdwM3feXEY7ssTo9JsL2KCwBOqDFguZxfJoRACY5', 'HzQt8U0ytJ9MoDkJQSAG49Y8Ac7aEXCnmQorVWk0bcD68dyED9IrtjJIzA_1rohq', 'HzQt8U0ytJ9MoDkJQSAG405U90Vh_gCrAIRqgwSdJpCQAJrVMlgS_T99oQ48yuOu', 'HzQt8U0ytJ9MoDkJQSAG40uKMaiQfRpX0YChFYVk4qF9n6TgVfD4mWGG8o7vWjTp', 'HzQt8U0ytJ9MoDkJQSAG4x-dZex2Jm-HERkRMjXFGv61cOHmsEJRlGSWubjG_eSH', 'HzQt8U0ytJ9MoDkJQSAG452cbOOEa3frlydH6Xq5dIfKLNBL9UhbLakREt5q0NSf', 'HzQt8U0ytJ9MoDkJQSAG4wd6-EpT1vq0dH9CNaYzu6JITqX1b1YFXsi5EAOV46xf', 'HzQt8U0ytJ9MoDkJQSAG40-TcaFJYPEnDxdVc7hingNRsgID07qj5PsKJJ6hK5YX', 'HzQt8U0ytJ9MoDkJQSAG4y2zod_W3BZEcvwtmKR8DV-BlvSlg0XTNXAJqfCDEmgy', 'HzQt8U0ytJ9MoDkJQSAG4_6h8V-fJwj59lgH23Nm1eaOs64CP0Ugy3STHhTF_Ixu', 'HzQt8U0ytJ9MoDkJQSAG4_9sErXsKWnB2p1ezYo_BTbZsgcXTYn1dyAA7rjhMRol', 'HzQt8U0ytJ9MoDkJQSAG47gcDcMw7q__YhcpVcgfEuseTdei1l0zCsW6VquMZ3lN', 'HzQt8U0ytJ9MoDkJQSAG43KazB8pDtG__HfHVVrCSmTkE5nztXKO7s5fyjxIGKtJ', 'HzQt8U0ytJ9MoDkJQSAG4_3plTPHwnYPK9W3pEVSA0xYWJc16ntnkT9kL-rOyJoM', 'HzQt8U0ytJ9MoDkJQSAG4wC5uGFH1eNgiw-mX9HRoeoaIvgwKf8l1IEocCc6fVwO', 'HzQt8U0ytJ9MoDkJQSAG40VC_Z8feUO_z-nDKeZiTT05ah7ZtynuLukul4Tux9-s', 'HzQt8U0ytJ9MoDkJQSAG40VC_Z8feUO_z-nDKeZiTT1qPKvacmWRfjr1bWfNoY65', 'HzQt8U0ytJ9MoDkJQSAG40VC_Z8feUO_z-nDKeZiTT2pzwGOmNkXhC7lqwmrlu-w', 'HzQt8U0ytJ9MoDkJQSAG40VC_Z8feUO_z-nDKeZiTT259ddCdSberDZs3FbEyTGY', 'HzQt8U0ytJ9MoDkJQSAG40VC_Z8feUO_z-nDKeZiTT1Q9XLusnqBSigDZhsT0BS3', 'HzQt8U0ytJ9MoDkJQSAG40VC_Z8feUO_z-nDKeZiTT0V8jV5oZUH0KqUpKsom-ux', 'HzQt8U0ytJ9MoDkJQSAG40VC_Z8feUO_z-nDKeZiTT3HEz6Ha51SFzW5dTD5ztiN', 'HzQt8U0ytJ9MoDkJQSAG40VC_Z8feUO_z-nDKeZiTT300vjKqXbIlqDb5neswUMR', 'HzQt8U0ytJ9MoDkJQSAG40VC_Z8feUO_z-nDKeZiTT20nZnKsPAnmzgeaYuMWfBc', 'HzQt8U0ytJ9MoDkJQSAG40VC_Z8feUO_z-nDKeZiTT3PGc_WhkVpdjUEuOrin_h8', 'HzQt8U0ytJ9MoDkJQSAG4yp60a1_sB0nzpWWGmP-L71RngW85wUn1x40Yv-yvDjK', 'HzQt8U0ytJ9MoDkJQSAG4yp60a1_sB0nzpWWGmP-L725BTuG5WhZR8D3MqhBMrVU', 'HzQt8U0ytJ9MoDkJQSAG4yp60a1_sB0nzpWWGmP-L71EmwUi6ZQh4g4M1fYnXcVS', 'HzQt8U0ytJ9MoDkJQSAG4yp60a1_sB0nzpWWGmP-L72N4dvz2mYy4_7dOsgtRZCO', 'HzQt8U0ytJ9MoDkJQSAG4yp60a1_sB0nzpWWGmP-L73BXTsw5SgQhnarfUe49ymn', 'HzQt8U0ytJ9MoDkJQSAG4yp60a1_sB0nzpWWGmP-L72X2KTJjX10LHpS7E4gvQ1I', 'HzQt8U0ytJ9MoDkJQSAG4yp60a1_sB0nzpWWGmP-L73Z16BzFWHDt1f1mDlNKeU5', 'HzQt8U0ytJ9MoDkJQSAG4yp60a1_sB0nzpWWGmP-L70B5QIw7lqHbo3ryFomxP78', 'HzQt8U0ytJ9MoDkJQSAG4yp60a1_sB0nzpWWGmP-L71bJlJJRz845GHca-ANdg3I', 'HzQt8U0ytJ9MoDkJQSAG4yp60a1_sB0nzpWWGmP-L71MDc3kAyGmKKbH29LXwJ7w', 'HzQt8U0ytJ9MoDkJQSAG4yr7mCrlb2eeMDY7wExbKZATLnfJZ71xncCvoZFuCqb6', 'HzQt8U0ytJ9MoDkJQSAG4yr7mCrlb2eeMDY7wExbKZCN_lyGS2zp2sgOB_e56bHC', 'HzQt8U0ytJ9MoDkJQSAG4yr7mCrlb2eeMDY7wExbKZDg-eLkrBRLkNYBvWthZkAQ', 'HzQt8U0ytJ9MoDkJQSAG4yr7mCrlb2eeMDY7wExbKZCFI57lzIeLZR6lzqe64BcV', 'HzQt8U0ytJ9MoDkJQSAG4yr7mCrlb2eeMDY7wExbKZB-Sy05BLLikyjmxzZHYjMP', 'HzQt8U0ytJ9MoDkJQSAG4yr7mCrlb2eeMDY7wExbKZCWizScxQTQ6mK_lxxvLXZs', 'HzQt8U0ytJ9MoDkJQSAG4yr7mCrlb2eeMDY7wExbKZB6EQ6Nnjqy8jvZOXF0Y94T', 'HzQt8U0ytJ9MoDkJQSAG4yr7mCrlb2eeMDY7wExbKZD3PS2MzFFI1fuDrTHifY5g', 'HzQt8U0ytJ9MoDkJQSAG4yr7mCrlb2eeMDY7wExbKZANaQbs4WNEdNaZCVOkspQY', 'HzQt8U0ytJ9MoDkJQSAG4yr7mCrlb2eeMDY7wExbKZB1ovZcRNnXzCPvJVMSLc9x', 'HzQt8U0ytJ9MoDkJQSAG4yuy1dj7FMcfWR0UrSUT7ZOUp1EqmaoDtWzvYSmofT2I', 'HzQt8U0ytJ9MoDkJQSAG4yuy1dj7FMcfWR0UrSUT7ZMLFZ5rgarxw5Lw27sarigW', 'HzQt8U0ytJ9MoDkJQSAG4yuy1dj7FMcfWR0UrSUT7ZO4AfgAXlw6ke_U4JbP9vDV', 'HzQt8U0ytJ9MoDkJQSAG4yuy1dj7FMcfWR0UrSUT7ZNvISUhUifMCNTWzcmwbZXj', 'HzQt8U0ytJ9MoDkJQSAG4yuy1dj7FMcfWR0UrSUT7ZNYnauojZQaQAuh-VqCnaJ0', 'HzQt8U0ytJ9MoDkJQSAG4yuy1dj7FMcfWR0UrSUT7ZNtgkZpRmL4Ul1gm94j_-dt', 'HzQt8U0ytJ9MoDkJQSAG4yuy1dj7FMcfWR0UrSUT7ZMFx2taC1MUFyfWAKF_Bn8d', 'HzQt8U0ytJ9MoDkJQSAG4yuy1dj7FMcfWR0UrSUT7ZOnPXmVcBECEYReRv8stMcc', 'HzQt8U0ytJ9MoDkJQSAG4yuy1dj7FMcfWR0UrSUT7ZOtWOl7M_LW_K1fHazOe8Pn', 'HzQt8U0ytJ9MoDkJQSAG4yuy1dj7FMcfWR0UrSUT7ZOFzDp_PjdDMQmtLPfIItlb', 'HzQt8U0ytJ9MoDkJQSAG46d2xKrpHJJdkMsdpSpFpBnphcNl4ChyRds4L4RV8K-w', 'HzQt8U0ytJ9MoDkJQSAG46d2xKrpHJJdkMsdpSpFpBkK9Y0j3u0_NW11mNubpKxJ', 'HzQt8U0ytJ9MoDkJQSAG46d2xKrpHJJdkMsdpSpFpBl1sRmIEXf7IPSHTboBAgUU', 'HzQt8U0ytJ9MoDkJQSAG46d2xKrpHJJdkMsdpSpFpBkYAe_xixIewTydAU-gNG5X', 'HzQt8U0ytJ9MoDkJQSAG46d2xKrpHJJdkMsdpSpFpBkeljKIbHUzBHrUlPaWxpGG', 'HzQt8U0ytJ9MoDkJQSAG46d2xKrpHJJdkMsdpSpFpBkSDspv5eeLrR8HNoqNaVxG', 'HzQt8U0ytJ9MoDkJQSAG46d2xKrpHJJdkMsdpSpFpBmUQbVBHvu31aRSlMZc9Ktq', 'HzQt8U0ytJ9MoDkJQSAG46d2xKrpHJJdkMsdpSpFpBme9Z4BxFhhGt5dUBrnddKu', 'HzQt8U0ytJ9MoDkJQSAG46d2xKrpHJJdkMsdpSpFpBlzheUFAvit5p2GlRqMwBcU', 'HzQt8U0ytJ9MoDkJQSAG46d2xKrpHJJdkMsdpSpFpBngtMqwhipEzk2Emsa8E5Jq', 'HzQt8U0ytJ9MoDkJQSAG4-EZgCg5UGJsxN9-f720FpxJVxf5yQYpNaVje97QrUGh', 'HzQt8U0ytJ9MoDkJQSAG4-EZgCg5UGJsxN9-f720Fpwh4yKzCG_4evSpOygXFYlv', 'HzQt8U0ytJ9MoDkJQSAG4-EZgCg5UGJsxN9-f720FpysrsqMvM3UWr40GsMRf1S9', 'HzQt8U0ytJ9MoDkJQSAG4-EZgCg5UGJsxN9-f720FpzApOMuwZQMyS2JNBFp28nl', 'HzQt8U0ytJ9MoDkJQSAG4-EZgCg5UGJsxN9-f720Fpy-MyZfV2d5fyDEJ4MOAnG4', 'HzQt8U0ytJ9MoDkJQSAG4-EZgCg5UGJsxN9-f720FpyH8zYslRyzNlb8EJoM8diD', 'HzQt8U0ytJ9MoDkJQSAG4-EZgCg5UGJsxN9-f720FpyPlclFpYMTx9ZCJ0rVMhuc', 'HzQt8U0ytJ9MoDkJQSAG4-EZgCg5UGJsxN9-f720Fpxv9x8pZuYnCqyX81zZR2JF', 'HzQt8U0ytJ9MoDkJQSAG4-EZgCg5UGJsxN9-f720FpwzqNNjBNkqZN18BpLW7Urh', 'HzQt8U0ytJ9MoDkJQSAG4-EZgCg5UGJsxN9-f720FpyLvXGmQ-EHhflHd5yIX0TB', 'HzQt8U0ytJ9MoDkJQSAG46j4lGgCI-7cpphVGWA5uBu310ZZzesSHHb1dRXZdKc9', 'HzQt8U0ytJ9MoDkJQSAG46j4lGgCI-7cpphVGWA5uBtZuHz7v7d4OP9LrD5Fv6mv', 'HzQt8U0ytJ9MoDkJQSAG46j4lGgCI-7cpphVGWA5uBuL4c_ea7mnlDLLDWraxCmx', 'HzQt8U0ytJ9MoDkJQSAG46j4lGgCI-7cpphVGWA5uBsANEdraqq1r2tIt-Lhn_-1', 'HzQt8U0ytJ9MoDkJQSAG46j4lGgCI-7cpphVGWA5uBuSnGZlehuRgTlRqeLr7bbI', 'HzQt8U0ytJ9MoDkJQSAG46j4lGgCI-7cpphVGWA5uBsKX0liJiZBgn1y7KqonGa6', 'HzQt8U0ytJ9MoDkJQSAG46j4lGgCI-7cpphVGWA5uBsq3o7-8Z8PZHzjW8lTjkVq', 'HzQt8U0ytJ9MoDkJQSAG46j4lGgCI-7cpphVGWA5uBtGvTRcKSHiBtZmmI9IwKMe', 'HzQt8U0ytJ9MoDkJQSAG46j4lGgCI-7cpphVGWA5uBsXEyZiCnXM6sXm8EtcU91T', 'HzQt8U0ytJ9MoDkJQSAG46j4lGgCI-7cpphVGWA5uBttSHEDbCWoljF7T5kWrHku', 'HzQt8U0ytJ9MoDkJQSAG413Z6vQugJDuo1enORqUt3igCxC0WgOX8zHZdAVRXQFk', 'HzQt8U0ytJ9MoDkJQSAG413Z6vQugJDuo1enORqUt3hPQnnJ5OZEz3H_5L_FriDP', 'HzQt8U0ytJ9MoDkJQSAG413Z6vQugJDuo1enORqUt3hRXocOvyColIbsKdhYURYp', 'HzQt8U0ytJ9MoDkJQSAG413Z6vQugJDuo1enORqUt3gj9FjpfSTg4019R49m7ywM', 'HzQt8U0ytJ9MoDkJQSAG413Z6vQugJDuo1enORqUt3iyEcqglRMmFT5Sx0BMRRGN', 'HzQt8U0ytJ9MoDkJQSAG413Z6vQugJDuo1enORqUt3iQICxyTqVbzMvrAhLNAoOb', 'HzQt8U0ytJ9MoDkJQSAG413Z6vQugJDuo1enORqUt3hsst9wfM1XZHUU-uArbfnm', 'HzQt8U0ytJ9MoDkJQSAG413Z6vQugJDuo1enORqUt3j5-H0b12BKmf0Tox6Nt9yi', 'HzQt8U0ytJ9MoDkJQSAG413Z6vQugJDuo1enORqUt3hKczxR02wOmGxlw3R9mZh2', 'HzQt8U0ytJ9MoDkJQSAG413Z6vQugJDuo1enORqUt3jXsyIlOB7jdtSRY5ttmaH3', 'HzQt8U0ytJ9MoDkJQSAG4wsPXIamYX9nbcwsoGjodcYqtob7eZvWpZE6UgLPi6iD', 'HzQt8U0ytJ9MoDkJQSAG4wsPXIamYX9nbcwsoGjodcb2wsG7x5tXLfluAkcHo8qS', 'HzQt8U0ytJ9MoDkJQSAG4wsPXIamYX9nbcwsoGjodcb_32mAjfBz917RX22UcK3u', 'HzQt8U0ytJ9MoDkJQSAG4wsPXIamYX9nbcwsoGjodcba27FRpgA6uWXd-x9YA7t_', 'HzQt8U0ytJ9MoDkJQSAG4wsPXIamYX9nbcwsoGjodcbsb6tcaNKj26mzhD4-v6nd', 'HzQt8U0ytJ9MoDkJQSAG4wsPXIamYX9nbcwsoGjodcbp0iYilis3hmP5gmojZk0M', 'HzQt8U0ytJ9MoDkJQSAG4wsPXIamYX9nbcwsoGjodcYQvYlLcduDOsR4wvGJzQbm', 'HzQt8U0ytJ9MoDkJQSAG4wsPXIamYX9nbcwsoGjodcZODKDQSpNkunmZXb5P9ign', 'HzQt8U0ytJ9MoDkJQSAG4wsPXIamYX9nbcwsoGjodcYDT8DNcoc6jXK4IVO0Pje-', 'HzQt8U0ytJ9MoDkJQSAG4wsPXIamYX9nbcwsoGjodcZqcqrZOTs_ieemMkv-eSZL', 'HzQt8U0ytJ9MoDkJQSAG44nE0MK8bVfVO03rVJa9QdtNRYtFVdzXpVTHYG0xjqp6', 'HzQt8U0ytJ9MoDkJQSAG44nE0MK8bVfVO03rVJa9Qdu5zXzC9_dHOrv098Ju8ZBW', 'HzQt8U0ytJ9MoDkJQSAG44nE0MK8bVfVO03rVJa9QdvMQqVwBKzEZFbwcZDSDVi1', 'HzQt8U0ytJ9MoDkJQSAG44nE0MK8bVfVO03rVJa9QdtOY3xVX2dFnIYNP27_rMjU', 'HzQt8U0ytJ9MoDkJQSAG44nE0MK8bVfVO03rVJa9QdvL4N_4_E1CCa2IeZ9xjshO', 'HzQt8U0ytJ9MoDkJQSAG44nE0MK8bVfVO03rVJa9QdsPAM2_Xf6XsdPn8CZdPzZ4', 'HzQt8U0ytJ9MoDkJQSAG44nE0MK8bVfVO03rVJa9Qdvs9uksgRWcutG9ilOlke_5', 'HzQt8U0ytJ9MoDkJQSAG44nE0MK8bVfVO03rVJa9QdsDLRbLWW2BR1TWfRTUxWe2', 'HzQt8U0ytJ9MoDkJQSAG44nE0MK8bVfVO03rVJa9QdswVtDmWRLmsXdfXU-J_e0K', 'HzQt8U0ytJ9MoDkJQSAG44nE0MK8bVfVO03rVJa9Qdvtyrc-h5lFyOjkGkur3uOg', 'HzQt8U0ytJ9MoDkJQSAG41n9VcewDU54tlkt9njfXu2jyNYpv_4BJ99wAOGVLOFQ', 'HzQt8U0ytJ9MoDkJQSAG41n9VcewDU54tlkt9njfXu3Q_Q9M2Px0HfMej-S8lXnV', 'HzQt8U0ytJ9MoDkJQSAG41n9VcewDU54tlkt9njfXu2oT0Z0L2_rr_mekf7-O_fg', 'HzQt8U0ytJ9MoDkJQSAG41n9VcewDU54tlkt9njfXu2F7guogDm--9yD_cUXkJzH', 'HzQt8U0ytJ9MoDkJQSAG41n9VcewDU54tlkt9njfXu0uynWcftU1rlIMYCK1lBOG', 'HzQt8U0ytJ9MoDkJQSAG41n9VcewDU54tlkt9njfXu1KWluIRtgf_3q3WzBZBiCu', 'HzQt8U0ytJ9MoDkJQSAG41n9VcewDU54tlkt9njfXu0jApmVhAIK8Tj5VuuXb4ly', 'HzQt8U0ytJ9MoDkJQSAG41n9VcewDU54tlkt9njfXu10NcmAeuTpjw1qh_IjShgR', 'HzQt8U0ytJ9MoDkJQSAG41n9VcewDU54tlkt9njfXu3dsWgQa582nfsyxdDpDZUK', 'HzQt8U0ytJ9MoDkJQSAG41n9VcewDU54tlkt9njfXu1Mruwqyc3RgY2qR4sxtM63', 'HzQt8U0ytJ9MoDkJQSAG44nkdit6P9PEL1nkFWzfUqazYqYH3-qITY-SG8nlIwgK', 'HzQt8U0ytJ9MoDkJQSAG44nkdit6P9PEL1nkFWzfUqY7rR5wbtRm3bayGBrndKJu', 'HzQt8U0ytJ9MoDkJQSAG44nkdit6P9PEL1nkFWzfUqZ9CfADeTrvj5Geyq0qo1s_', 'HzQt8U0ytJ9MoDkJQSAG44nkdit6P9PEL1nkFWzfUqbAt_CWjh9GwutTtIRKhxPy', 'HzQt8U0ytJ9MoDkJQSAG44nkdit6P9PEL1nkFWzfUqZf97sUxkic8-J7UqtwMnb0', 'HzQt8U0ytJ9MoDkJQSAG44nkdit6P9PEL1nkFWzfUqaxen0Su-W3_AvGcFO5frl3', 'HzQt8U0ytJ9MoDkJQSAG44nkdit6P9PEL1nkFWzfUqbG3ZLdfwu5AgOicWu815cD', 'HzQt8U0ytJ9MoDkJQSAG44nkdit6P9PEL1nkFWzfUqZSVvszeVDEF4t6ABF1XOaA', 'HzQt8U0ytJ9MoDkJQSAG44nkdit6P9PEL1nkFWzfUqbI73FpeXc6UHF7bej5zCwB', 'HzQt8U0ytJ9MoDkJQSAG44nkdit6P9PEL1nkFWzfUqY7ihfdEz65eAtpoqnphXDJ', 'HzQt8U0ytJ9MoDkJQSAG416eTbwZzXwojKqm5MQ9FJ2m1KBVJzVFvFc2czm38AAh', 'HzQt8U0ytJ9MoDkJQSAG416eTbwZzXwojKqm5MQ9FJ1v6NVY0xkP279n_l9zZP9C', 'HzQt8U0ytJ9MoDkJQSAG416eTbwZzXwojKqm5MQ9FJ1-KZGDSMYejuuhxi-ThLiB', 'HzQt8U0ytJ9MoDkJQSAG416eTbwZzXwojKqm5MQ9FJ2IUvGafWgMleRfHvjXki8_', 'HzQt8U0ytJ9MoDkJQSAG416eTbwZzXwojKqm5MQ9FJ1p_ulppXjy7IAbswVNMUnN', 'HzQt8U0ytJ9MoDkJQSAG416eTbwZzXwojKqm5MQ9FJ2pPCpFNoNEcD0EwZHnap6x', 'HzQt8U0ytJ9MoDkJQSAG416eTbwZzXwojKqm5MQ9FJ27zuANLzy-WwRxQGrGvVPJ', 'HzQt8U0ytJ9MoDkJQSAG416eTbwZzXwojKqm5MQ9FJ1OCZqtrP-FEWo5B2cFHs7s', 'HzQt8U0ytJ9MoDkJQSAG416eTbwZzXwojKqm5MQ9FJ16roQ38INddnRYw-CpSSO0', 'HzQt8U0ytJ9MoDkJQSAG416eTbwZzXwojKqm5MQ9FJ3L_M_ELvL_2qpy64bhXFY6', 'HzQt8U0ytJ9MoDkJQSAG4zMLfuyraiidlX7E1NnvSUhv7kI-SNOktEN6BnL4YUgF', 'HzQt8U0ytJ9MoDkJQSAG4zMLfuyraiidlX7E1NnvSUjc4agcC6SvxL-O50RE1wNS', 'HzQt8U0ytJ9MoDkJQSAG4zMLfuyraiidlX7E1NnvSUiUuXDRqtPqEEj17zXbID-E', 'HzQt8U0ytJ9MoDkJQSAG4zMLfuyraiidlX7E1NnvSUg-kV3bZrGZXOZe8sfcMWbD', 'HzQt8U0ytJ9MoDkJQSAG4zMLfuyraiidlX7E1NnvSUh6XXqg9HCHpF49Nexhkv6d', 'HzQt8U0ytJ9MoDkJQSAG4zMLfuyraiidlX7E1NnvSUhqc_jH9QdCIV6KPfBc4GEN', 'HzQt8U0ytJ9MoDkJQSAG4zMLfuyraiidlX7E1NnvSUjlJyAvw644vieHup0pNGT2', 'HzQt8U0ytJ9MoDkJQSAG4zMLfuyraiidlX7E1NnvSUjNcpQIeLFl-Ab4ttxUaGlc', 'HzQt8U0ytJ9MoDkJQSAG4zMLfuyraiidlX7E1NnvSUhSsiRHNBIYz0CghuCoJOQU', 'HzQt8U0ytJ9MoDkJQSAG4zMLfuyraiidlX7E1NnvSUiycorKbq2jI6zV3fTpl-Gf', 'HzQt8U0ytJ9MoDkJQSAG4-RFEuFvZie9SBzXHLtyv5F9Acu-u4oDP7VzlE2sASsA', 'HzQt8U0ytJ9MoDkJQSAG4-RFEuFvZie9SBzXHLtyv5ElNkJ5rr1zo4ufHT_4E6TD', 'HzQt8U0ytJ9MoDkJQSAG4-RFEuFvZie9SBzXHLtyv5ECQcBvULxpHZdSxANgMbgw', 'HzQt8U0ytJ9MoDkJQSAG4-RFEuFvZie9SBzXHLtyv5EOfaRzbCaEb7Aijp7q_bUu', 'HzQt8U0ytJ9MoDkJQSAG4-RFEuFvZie9SBzXHLtyv5EdQkZNODqueiXQvIHxTtrB', 'HzQt8U0ytJ9MoDkJQSAG4-RFEuFvZie9SBzXHLtyv5EtMePjWMWW8A6CsNELSn82', 'HzQt8U0ytJ9MoDkJQSAG4-RFEuFvZie9SBzXHLtyv5E89pmZggkJtSa45cpzh80b', 'HzQt8U0ytJ9MoDkJQSAG4-RFEuFvZie9SBzXHLtyv5HRM8EyHDPFca1509ArjAa7', 'HzQt8U0ytJ9MoDkJQSAG4-RFEuFvZie9SBzXHLtyv5FuzfU9YuCUWWyd5JzjwFEn', 'HzQt8U0ytJ9MoDkJQSAG4-RFEuFvZie9SBzXHLtyv5HxfEEAWa8j70LcLsZOIuv1', 'HzQt8U0ytJ9MoDkJQSAG40WXhIek0_OwT0uSBmlqCcoHVx6kL7dxOy2zRTz6pMM0', 'HzQt8U0ytJ9MoDkJQSAG40WXhIek0_OwT0uSBmlqCcrKPiOu15TLRkJe1uSAB2jh', 'HzQt8U0ytJ9MoDkJQSAG40WXhIek0_OwT0uSBmlqCcobo9b3fHtcDpZ0bTReKPZ2', 'HzQt8U0ytJ9MoDkJQSAG40WXhIek0_OwT0uSBmlqCcpcgRXF7JBQaB4S4DNCdONW', 'HzQt8U0ytJ9MoDkJQSAG40WXhIek0_OwT0uSBmlqCcoWeoiwKCMztXp0eGFz64Dg', 'HzQt8U0ytJ9MoDkJQSAG40WXhIek0_OwT0uSBmlqCcqe8zHz8jVc3FMGX1dVu69y', 'HzQt8U0ytJ9MoDkJQSAG40WXhIek0_OwT0uSBmlqCcqEAsgwBLIu9LLuScwDGaZ2', 'HzQt8U0ytJ9MoDkJQSAG40WXhIek0_OwT0uSBmlqCcqLhwNbStopNpdxUw_Fv-OZ', 'HzQt8U0ytJ9MoDkJQSAG40WXhIek0_OwT0uSBmlqCcrSXK_2EPklveoMJj5SaYpG', 'HzQt8U0ytJ9MoDkJQSAG40WXhIek0_OwT0uSBmlqCcrfCme3RQmUL_1nDCBUXHTJ', 'HzQt8U0ytJ9MoDkJQSAG4x1AZGXHLNj46yEiwB7M4wacIk3VEvUGZ-FxU87Upd1e', 'HzQt8U0ytJ9MoDkJQSAG4x1AZGXHLNj46yEiwB7M4wZdpxOE1kyfbNzbK-sOBOjW', 'HzQt8U0ytJ9MoDkJQSAG4x1AZGXHLNj46yEiwB7M4wZaVRT16NpHnGNH4mX9Ae7V', 'HzQt8U0ytJ9MoDkJQSAG4x1AZGXHLNj46yEiwB7M4wb32t1Nj7V481drHIXS-EAO', 'HzQt8U0ytJ9MoDkJQSAG4x1AZGXHLNj46yEiwB7M4wa0JJNgd1s5E7WVa9jDqr70', 'HzQt8U0ytJ9MoDkJQSAG4x1AZGXHLNj46yEiwB7M4waFmcozI_12XxowfDbhfUqH', 'HzQt8U0ytJ9MoDkJQSAG4x1AZGXHLNj46yEiwB7M4wYYZPkjEsbhV7ZBh-KCSd9n', 'HzQt8U0ytJ9MoDkJQSAG4x1AZGXHLNj46yEiwB7M4wZFYUoqyaq356V29q8mXwfs', 'HzQt8U0ytJ9MoDkJQSAG4x1AZGXHLNj46yEiwB7M4waAV_G4Yw-nFA07PEy53JMg', 'HzQt8U0ytJ9MoDkJQSAG4x1AZGXHLNj46yEiwB7M4waTbK5G967x4kDI2OXM-g1K', 'HzQt8U0ytJ9MoDkJQSAG45sv36tARax2XlznpMUzndAOfnUPd3kNApCbuoKvxrUT', 'HzQt8U0ytJ9MoDkJQSAG45sv36tARax2XlznpMUzndAIMeGlniXQ1apSO3cfgP-2', 'HzQt8U0ytJ9MoDkJQSAG45sv36tARax2XlznpMUzndDcu4MBhl_vekFAAn_CgvgM', 'HzQt8U0ytJ9MoDkJQSAG45sv36tARax2XlznpMUzndDXqEwMOsyx5HcFhYz2tl39', 'HzQt8U0ytJ9MoDkJQSAG45sv36tARax2XlznpMUzndBj7kDKWPGMmueF2TH6ggeJ', 'HzQt8U0ytJ9MoDkJQSAG45sv36tARax2XlznpMUzndB8NrMPk6fdBF8VE9V-180r', 'HzQt8U0ytJ9MoDkJQSAG45sv36tARax2XlznpMUzndB9k5fqzobm3J1JS5h1_oOY', 'HzQt8U0ytJ9MoDkJQSAG45sv36tARax2XlznpMUzndArMtxUrPlcljS9djkH29_R', 'HzQt8U0ytJ9MoDkJQSAG45sv36tARax2XlznpMUzndDMq_0Ish99xpwah3ic-d0g', 'HzQt8U0ytJ9MoDkJQSAG45sv36tARax2XlznpMUzndD9VXED6NKYmvAwCbrgp_2m', 'HzQt8U0ytJ9MoDkJQSAG46BQawiCFA4bWcfBWtuobGPim5uvKPBB43T8c-2tpfiR', 'HzQt8U0ytJ9MoDkJQSAG46BQawiCFA4bWcfBWtuobGOTszLxbYSHSk2dDZy9y7Dl', 'HzQt8U0ytJ9MoDkJQSAG46BQawiCFA4bWcfBWtuobGNVR9oRQz10HaFb3cw8SNRm', 'HzQt8U0ytJ9MoDkJQSAG46BQawiCFA4bWcfBWtuobGMZQVfzaBVBzh1Qx_93P-SM', 'HzQt8U0ytJ9MoDkJQSAG46BQawiCFA4bWcfBWtuobGOeql868rU0a2xLR8o1XM5Z', 'HzQt8U0ytJ9MoDkJQSAG46BQawiCFA4bWcfBWtuobGP-pSufXW9931pSSY7sy_48', 'HzQt8U0ytJ9MoDkJQSAG46BQawiCFA4bWcfBWtuobGOWev_XBw6s4HH3g68yTHda', 'HzQt8U0ytJ9MoDkJQSAG46BQawiCFA4bWcfBWtuobGO0bRi6zRIVjk1ZVrMcCT2o', 'HzQt8U0ytJ9MoDkJQSAG46BQawiCFA4bWcfBWtuobGOuUqjqac8YocUxg5g-_zbz', 'HzQt8U0ytJ9MoDkJQSAG46BQawiCFA4bWcfBWtuobGNbDI0L-8TE6Ge_oQyQsudN', 'HzQt8U0ytJ9MoDkJQSAG4yFr1NGOzq55RGfjIUFtCQ_Pi2_Uyl_B7hiFRqWVC-d9', 'HzQt8U0ytJ9MoDkJQSAG4yFr1NGOzq55RGfjIUFtCQ-cjeMnFc2ihp5aD3VwqWd-', 'HzQt8U0ytJ9MoDkJQSAG4yFr1NGOzq55RGfjIUFtCQ-Oevj8McPruwsQqu1WzqG6', 'HzQt8U0ytJ9MoDkJQSAG4yFr1NGOzq55RGfjIUFtCQ9U7RoMsp13w9FksM4X_d6Y', 'HzQt8U0ytJ9MoDkJQSAG4yFr1NGOzq55RGfjIUFtCQ8tImdn9Q_Z7k8mNtm-MJ98', 'HzQt8U0ytJ9MoDkJQSAG4yFr1NGOzq55RGfjIUFtCQ-8K9Jh3FV-RENCvIPy3_ld', 'HzQt8U0ytJ9MoDkJQSAG4yFr1NGOzq55RGfjIUFtCQ_I_5DMJGiOveiyyjybdwOn', 'HzQt8U0ytJ9MoDkJQSAG4yFr1NGOzq55RGfjIUFtCQ-goyyNYH_T7ropmoSyn9iE', 'HzQt8U0ytJ9MoDkJQSAG4yFr1NGOzq55RGfjIUFtCQ-kx8VGY2MahAy6Sd6T_mrA', 'HzQt8U0ytJ9MoDkJQSAG4yFr1NGOzq55RGfjIUFtCQ9as1dXNhNBAX30j04w2P7k', 'HzQt8U0ytJ9MoDkJQSAG4xV11e6YjiZE78Kp3wetUhtgPRoc8jnzVHjKOq6Rdn6M', 'HzQt8U0ytJ9MoDkJQSAG4xV11e6YjiZE78Kp3wetUhuSzE7cxm6YxN3J0z-wmemc', 'HzQt8U0ytJ9MoDkJQSAG4xV11e6YjiZE78Kp3wetUhv3yVelxitf4j4v7Y1uSiZz', 'HzQt8U0ytJ9MoDkJQSAG4xV11e6YjiZE78Kp3wetUhuk_CRryzeYMZrQYcteZBlE', 'HzQt8U0ytJ9MoDkJQSAG4xV11e6YjiZE78Kp3wetUhujXYxl3OBBFhhqEWL81gaR', 'HzQt8U0ytJ9MoDkJQSAG4xV11e6YjiZE78Kp3wetUhvHc34Lm6C9C5nioKeoK7nt', 'HzQt8U0ytJ9MoDkJQSAG4xV11e6YjiZE78Kp3wetUhsuu6h2EipHCVh-m7bNLxUz', 'HzQt8U0ytJ9MoDkJQSAG4xV11e6YjiZE78Kp3wetUhvAXltkgUiLDaFdFugA6Td3', 'HzQt8U0ytJ9MoDkJQSAG4xV11e6YjiZE78Kp3wetUhtZFI85UKN28vuCkvvGdoJ3', 'HzQt8U0ytJ9MoDkJQSAG4xV11e6YjiZE78Kp3wetUhuNvk_3SKwmsARNwtPGzdxY', 'HzQt8U0ytJ9MoDkJQSAG4wk2OW4mV0ci_JBNswpR2KnQJH4TYtoiBOrFi6IH7zTS', 'HzQt8U0ytJ9MoDkJQSAG4wk2OW4mV0ci_JBNswpR2KlbA6s1qYuf_wabiM_6gl5v', 'HzQt8U0ytJ9MoDkJQSAG4wk2OW4mV0ci_JBNswpR2KlaXp567G0dydc834uwRR19', 'HzQt8U0ytJ9MoDkJQSAG4wk2OW4mV0ci_JBNswpR2KkdAMhENHNABT-A5hE51vz8', 'HzQt8U0ytJ9MoDkJQSAG4wk2OW4mV0ci_JBNswpR2KmbUhItS-cVYoKyPcex_JsN', 'HzQt8U0ytJ9MoDkJQSAG4wk2OW4mV0ci_JBNswpR2Kl5o1Er_AGgwrrZXmDxGFHR', 'HzQt8U0ytJ9MoDkJQSAG4wk2OW4mV0ci_JBNswpR2KljKnjHj6PxNNZPSW-1p7QS', 'HzQt8U0ytJ9MoDkJQSAG4wk2OW4mV0ci_JBNswpR2Kn6HZQhlXUYUfi235TvsfbA', 'HzQt8U0ytJ9MoDkJQSAG4wk2OW4mV0ci_JBNswpR2KmuHmq3QNtgWPANRmloNpyQ', 'HzQt8U0ytJ9MoDkJQSAG4wk2OW4mV0ci_JBNswpR2KmB-hkf7uAmIcaPc5TTBGLG', 'HzQt8U0ytJ9MoDkJQSAG4xNP4S8n-NyZoyxYR4xP0aut6TjlFV6VctvijOp-bEYn', 'HzQt8U0ytJ9MoDkJQSAG4xNP4S8n-NyZoyxYR4xP0at1Bo7wOdDK-LMiBxaZg_qF', 'HzQt8U0ytJ9MoDkJQSAG4xNP4S8n-NyZoyxYR4xP0au7wBs5_kESor1XP9LaKta7', 'HzQt8U0ytJ9MoDkJQSAG4xNP4S8n-NyZoyxYR4xP0av1Mx_xbRzK35Cuqr-smcFc', 'HzQt8U0ytJ9MoDkJQSAG4xNP4S8n-NyZoyxYR4xP0auBLeT70JLcT9pDZOdcjfmK', 'HzQt8U0ytJ9MoDkJQSAG4xNP4S8n-NyZoyxYR4xP0auWP1ml_WKbBpyhqFCSgTKi', 'HzQt8U0ytJ9MoDkJQSAG4xNP4S8n-NyZoyxYR4xP0atMIXuVPa0z_EYK-yIlfusO', 'HzQt8U0ytJ9MoDkJQSAG4xNP4S8n-NyZoyxYR4xP0atqbnYnlKbq6NPnMRnlhP_n', 'HzQt8U0ytJ9MoDkJQSAG4xNP4S8n-NyZoyxYR4xP0avavIsIfivTsdS9VXFFIaZ6', 'HzQt8U0ytJ9MoDkJQSAG4xNP4S8n-NyZoyxYR4xP0at5juGOmY4hDN6L4ziiMHJV', 'HzQt8U0ytJ9MoDkJQSAG442yW7fEhBcKEt6-kcTnQh0OnANzjE0hWlixRCCX6OMU', 'HzQt8U0ytJ9MoDkJQSAG442yW7fEhBcKEt6-kcTnQh2O9BqxurnB325_-8enw517', 'HzQt8U0ytJ9MoDkJQSAG442yW7fEhBcKEt6-kcTnQh1XmJJDUWmjUGPWV_PptFze', 'HzQt8U0ytJ9MoDkJQSAG442yW7fEhBcKEt6-kcTnQh2kaNUW47mpfuP_mIV3310a', 'HzQt8U0ytJ9MoDkJQSAG442yW7fEhBcKEt6-kcTnQh0W03LqihEZ1KECmdhzqRCL', 'HzQt8U0ytJ9MoDkJQSAG442yW7fEhBcKEt6-kcTnQh3etULiD909xO7fAuctORg0', 'HzQt8U0ytJ9MoDkJQSAG442yW7fEhBcKEt6-kcTnQh2I_0fywbxaLSJP7Ud9KeOr', 'HzQt8U0ytJ9MoDkJQSAG442yW7fEhBcKEt6-kcTnQh3-8rpCNLw0WjwoUcKjUiEL', 'HzQt8U0ytJ9MoDkJQSAG442yW7fEhBcKEt6-kcTnQh39YocHA5VRHRswgHijxOrS', 'HzQt8U0ytJ9MoDkJQSAG442yW7fEhBcKEt6-kcTnQh2G18WWiSjUX0ZxhugGAosf', 'HzQt8U0ytJ9MoDkJQSAG46260-rAuN9YvCPc0b9SB3ONZqRrGifMgNcZXtQOkk7l', 'HzQt8U0ytJ9MoDkJQSAG46260-rAuN9YvCPc0b9SB3O-gzdzWCEswNyjHihAwtd8', 'HzQt8U0ytJ9MoDkJQSAG46260-rAuN9YvCPc0b9SB3P6qDgCYcRr7lHVk9n7XUW2', 'HzQt8U0ytJ9MoDkJQSAG46260-rAuN9YvCPc0b9SB3MF3anGlRBAlDYvSiaA3Iep', 'HzQt8U0ytJ9MoDkJQSAG46260-rAuN9YvCPc0b9SB3OrPngG8XrrGOQG6Es3Z3Ah', 'HzQt8U0ytJ9MoDkJQSAG46260-rAuN9YvCPc0b9SB3OhH_U_XElbX7SafXBqx_F-', 'HzQt8U0ytJ9MoDkJQSAG46260-rAuN9YvCPc0b9SB3MxLub7iuv1E_qbeXv6GZub', 'HzQt8U0ytJ9MoDkJQSAG46260-rAuN9YvCPc0b9SB3NHhmntnipb_pALXQhVuXES', 'HzQt8U0ytJ9MoDkJQSAG46260-rAuN9YvCPc0b9SB3Mi65BQaSjkHV-LmboHCvBZ', 'HzQt8U0ytJ9MoDkJQSAG46260-rAuN9YvCPc0b9SB3O803fHa4Kr-KmFeeNYaZpV', 'HzQt8U0ytJ9MoDkJQSAG45y5sdeaYBu_MhqOjlFxAbJzyHRVEUYqkC5H02xWIIV5', 'HzQt8U0ytJ9MoDkJQSAG45y5sdeaYBu_MhqOjlFxAbJS_uUUsaOYTpxN9k3nT8A1', 'HzQt8U0ytJ9MoDkJQSAG45y5sdeaYBu_MhqOjlFxAbIRFZchmk7ZaBqfw-9TLxHd', 'HzQt8U0ytJ9MoDkJQSAG45y5sdeaYBu_MhqOjlFxAbLcRVcjGeYmVpKZhe7IGyiH', 'HzQt8U0ytJ9MoDkJQSAG45y5sdeaYBu_MhqOjlFxAbL2YRQiWkrKNU_BUlVNZ5WV', 'HzQt8U0ytJ9MoDkJQSAG45y5sdeaYBu_MhqOjlFxAbLDFT0TIzl9y6Cd-48zNcEg', 'HzQt8U0ytJ9MoDkJQSAG45y5sdeaYBu_MhqOjlFxAbKzWWrc14SpPbZ6vHui7IdB', 'HzQt8U0ytJ9MoDkJQSAG45y5sdeaYBu_MhqOjlFxAbItBSQ3Pn-pKvzMhTOVnuop', 'HzQt8U0ytJ9MoDkJQSAG45y5sdeaYBu_MhqOjlFxAbIm73i82dFK5ih78mB2FKye', 'HzQt8U0ytJ9MoDkJQSAG45y5sdeaYBu_MhqOjlFxAbIAatcV1FRqfR2INpl0gh9n', 'HzQt8U0ytJ9MoDkJQSAG41iH7brfIJhGKTsl3O6kw82_fP57Czspnm4OykEko48P', 'HzQt8U0ytJ9MoDkJQSAG41iH7brfIJhGKTsl3O6kw81UjDEqEmoSLWi-yLRRaCV6', 'HzQt8U0ytJ9MoDkJQSAG41iH7brfIJhGKTsl3O6kw830OS1E6wGGhzCFCT4m8pB6', 'HzQt8U0ytJ9MoDkJQSAG41iH7brfIJhGKTsl3O6kw80nxqn5G6VrA_KwGVsIbLqX', 'HzQt8U0ytJ9MoDkJQSAG41iH7brfIJhGKTsl3O6kw83aJSxPA8dFZI99us3m1Ikk', 'HzQt8U0ytJ9MoDkJQSAG41iH7brfIJhGKTsl3O6kw83Hx3nPZ5dAMjfwaOKQ8gPr', 'HzQt8U0ytJ9MoDkJQSAG41iH7brfIJhGKTsl3O6kw81tvIabPf8MEgPoGchNVKf9', 'HzQt8U0ytJ9MoDkJQSAG41iH7brfIJhGKTsl3O6kw811XaNKNb5W-LjS5z_UH6GP', 'HzQt8U0ytJ9MoDkJQSAG41iH7brfIJhGKTsl3O6kw80OwZiY66uRLOkmge0PoIGf', 'HzQt8U0ytJ9MoDkJQSAG41iH7brfIJhGKTsl3O6kw80QhrY_NVQGBjhZLmdc0HUC', 'HzQt8U0ytJ9MoDkJQSAG4-jMmsGAud5hQmEkHI7HCuSNAwZgQ9AOGcck__EO4kAH', 'HzQt8U0ytJ9MoDkJQSAG4-jMmsGAud5hQmEkHI7HCuRQti-AtKyU3tT5r110h071', 'HzQt8U0ytJ9MoDkJQSAG4-jMmsGAud5hQmEkHI7HCuQxo3GnVxAZ--5DnSNxcYhU', 'HzQt8U0ytJ9MoDkJQSAG4-jMmsGAud5hQmEkHI7HCuRiZGAWMAkxWtY-s8ryUsb6', 'HzQt8U0ytJ9MoDkJQSAG4-jMmsGAud5hQmEkHI7HCuQpL4-5VlQyIOY_2-Q04h2S', 'HzQt8U0ytJ9MoDkJQSAG4-jMmsGAud5hQmEkHI7HCuSpjg5QrphyYa2X-s2rFUch', 'HzQt8U0ytJ9MoDkJQSAG4-jMmsGAud5hQmEkHI7HCuREaY9HRNd2vqWw5gFoB6JE', 'HzQt8U0ytJ9MoDkJQSAG4-jMmsGAud5hQmEkHI7HCuT-lAaH3K8f23TAVnwNgkcv', 'HzQt8U0ytJ9MoDkJQSAG4-jMmsGAud5hQmEkHI7HCuT76feh7D8wrMz7hieiLZoS', 'HzQt8U0ytJ9MoDkJQSAG4-jMmsGAud5hQmEkHI7HCuQx3YbBTwTPGdi6IHgw6ppM', 'HzQt8U0ytJ9MoDkJQSAG45w-lkQJ6I1Bogrla_YCVhWc_FHJBJfW-PNRAmE03YKC', 'HzQt8U0ytJ9MoDkJQSAG45w-lkQJ6I1Bogrla_YCVhXhOAqUkn9MN3RapXZBGF8D', 'HzQt8U0ytJ9MoDkJQSAG45w-lkQJ6I1Bogrla_YCVhUw0FmHXX6DygFZc5NiuGTE', 'HzQt8U0ytJ9MoDkJQSAG45w-lkQJ6I1Bogrla_YCVhVTtcCu6pU222hHU3NVJA0j', 'HzQt8U0ytJ9MoDkJQSAG45w-lkQJ6I1Bogrla_YCVhVm6cGTWVTyZu4yUnZNug41', 'HzQt8U0ytJ9MoDkJQSAG45w-lkQJ6I1Bogrla_YCVhUpaB7ScqYUxsgjNi32uXdo', 'HzQt8U0ytJ9MoDkJQSAG45w-lkQJ6I1Bogrla_YCVhWFi3jiq_Do8j8swfMijVf-', 'HzQt8U0ytJ9MoDkJQSAG45w-lkQJ6I1Bogrla_YCVhUWNonvK9xRXtr0lVq7NtLi', 'HzQt8U0ytJ9MoDkJQSAG45w-lkQJ6I1Bogrla_YCVhUzlJr9LhW_Be3S1jgo6FQI', 'HzQt8U0ytJ9MoDkJQSAG45w-lkQJ6I1Bogrla_YCVhUb6yJFAucft5GqHp_luJz8', 'HzQt8U0ytJ9MoDkJQSAG4z7YaO0Ae4mKNU2B2QPq4tIBW-CURhuTL7EcQtX9ZlGa', 'HzQt8U0ytJ9MoDkJQSAG4z7YaO0Ae4mKNU2B2QPq4tK9Ri9saasSuDRy9kddFjJs', 'HzQt8U0ytJ9MoDkJQSAG4z7YaO0Ae4mKNU2B2QPq4tKSYzdzkxrKXwnS6qGE6YYh', 'HzQt8U0ytJ9MoDkJQSAG4z7YaO0Ae4mKNU2B2QPq4tKwUIbW-I6df8fh_2TTYt3Z', 'HzQt8U0ytJ9MoDkJQSAG4z7YaO0Ae4mKNU2B2QPq4tJ6rAXJH5UNRN5ucfRLivx1', 'HzQt8U0ytJ9MoDkJQSAG4z7YaO0Ae4mKNU2B2QPq4tI7h1roHf24PmrxuvGXjRiS', 'HzQt8U0ytJ9MoDkJQSAG4z7YaO0Ae4mKNU2B2QPq4tI8xe3N8-RV7eD4_arWNLqi', 'HzQt8U0ytJ9MoDkJQSAG4z7YaO0Ae4mKNU2B2QPq4tIkvvTLkqhibegxr4Fi9xho', 'HzQt8U0ytJ9MoDkJQSAG4z7YaO0Ae4mKNU2B2QPq4tIl2pqhM0ukElAlrry59KCL', 'HzQt8U0ytJ9MoDkJQSAG4z7YaO0Ae4mKNU2B2QPq4tI92LqT60tw0w_zzHa9A2TD', 'HzQt8U0ytJ9MoDkJQSAG40KMEbeT0TQwhDI8zGtPwfhB_X_kgEV-ko8e3mq4n1a_', 'HzQt8U0ytJ9MoDkJQSAG40KMEbeT0TQwhDI8zGtPwfj4-bq8TTTNbNKq90i6Ko6h', 'HzQt8U0ytJ9MoDkJQSAG40KMEbeT0TQwhDI8zGtPwfhxhI0-vVCwYVa3rAhUezMD', 'HzQt8U0ytJ9MoDkJQSAG40KMEbeT0TQwhDI8zGtPwfjHgUIFJOz5IV0fx8XtoO92', 'HzQt8U0ytJ9MoDkJQSAG40KMEbeT0TQwhDI8zGtPwfj6avS5N9D0v6g49r-a99a9', 'HzQt8U0ytJ9MoDkJQSAG40KMEbeT0TQwhDI8zGtPwfhLZndF3JsCu31WK6KaPeRl', 'HzQt8U0ytJ9MoDkJQSAG40KMEbeT0TQwhDI8zGtPwfh6QyBvzcz3Zi3hMJ2fy8is', 'HzQt8U0ytJ9MoDkJQSAG40KMEbeT0TQwhDI8zGtPwfgIxqVj0En4zdYw-_koFxKO', 'HzQt8U0ytJ9MoDkJQSAG40KMEbeT0TQwhDI8zGtPwfg7BTZslzIo3UPEguVKwVcL', 'HzQt8U0ytJ9MoDkJQSAG40KMEbeT0TQwhDI8zGtPwfjKAJtIGp5jclCfmqiuDWaE', 'HzQt8U0ytJ9MoDkJQSAG40ZqndhuLEvULxmuIWDDUUCuETvil9KMa35XmOe3x-xv', 'HzQt8U0ytJ9MoDkJQSAG40ZqndhuLEvULxmuIWDDUUB8bzdWn2BYPol6OMVo7fSv', 'HzQt8U0ytJ9MoDkJQSAG40ZqndhuLEvULxmuIWDDUUBcn1CwIG7dHF9gveD7ZWO8', 'HzQt8U0ytJ9MoDkJQSAG40ZqndhuLEvULxmuIWDDUUBz_oXq063wjJAOR2Di593K', 'HzQt8U0ytJ9MoDkJQSAG40ZqndhuLEvULxmuIWDDUUANsCZfdocuqqpCLvXyBcj0', 'HzQt8U0ytJ9MoDkJQSAG40ZqndhuLEvULxmuIWDDUUDScUxAhfyYiCaHxqeKhNsr', 'HzQt8U0ytJ9MoDkJQSAG40ZqndhuLEvULxmuIWDDUUDaSF4t2jtzN-dXMVN4k14e', 'HzQt8U0ytJ9MoDkJQSAG40ZqndhuLEvULxmuIWDDUUDVm-oqWjztwVUIDw2fka7T', 'HzQt8U0ytJ9MoDkJQSAG40ZqndhuLEvULxmuIWDDUUBcAuf5dbgPipPZfH8bNLp5', 'HzQt8U0ytJ9MoDkJQSAG40ZqndhuLEvULxmuIWDDUUCMA8WuviQHF5ZpzD5XJLWO', 'HzQt8U0ytJ9MoDkJQSAG43AGYw0_tYAqC3IYHNsBtbtgUARw3iZsov2iS1qFLfj3', 'HzQt8U0ytJ9MoDkJQSAG43AGYw0_tYAqC3IYHNsBtbuaxSI9KaGwTy426OA_4DMe', 'HzQt8U0ytJ9MoDkJQSAG43AGYw0_tYAqC3IYHNsBtbsk1gs1bwSWsxFgnQLGOtoX', 'HzQt8U0ytJ9MoDkJQSAG43AGYw0_tYAqC3IYHNsBtbuvbnjukXmHp4HJc7pdJq-7', 'HzQt8U0ytJ9MoDkJQSAG43AGYw0_tYAqC3IYHNsBtbtcTSu5ZKwPtWMBs5bmv-XP', 'HzQt8U0ytJ9MoDkJQSAG43AGYw0_tYAqC3IYHNsBtbvcv6d2HguOMjvTkl0e1YV3', 'HzQt8U0ytJ9MoDkJQSAG43AGYw0_tYAqC3IYHNsBtbtfr7r2cnmO9svW34Fl6INM', 'HzQt8U0ytJ9MoDkJQSAG43AGYw0_tYAqC3IYHNsBtbvy6WDmUg2gRChN96f0iBQm', 'HzQt8U0ytJ9MoDkJQSAG43AGYw0_tYAqC3IYHNsBtbvMREQ5xvPVQSLE4W-_ftHG', 'HzQt8U0ytJ9MoDkJQSAG43AGYw0_tYAqC3IYHNsBtbtDLAUCUDoXm_ntsfj0ZvdD', 'HzQt8U0ytJ9MoDkJQSAG420TDcZTwY1wej9rClq3ykP1-Jn8C2IQZqKcGBvvAZ40', 'HzQt8U0ytJ9MoDkJQSAG420TDcZTwY1wej9rClq3ykN1SQttPaA8z7KL7f2re5B1', 'HzQt8U0ytJ9MoDkJQSAG420TDcZTwY1wej9rClq3ykPDgSvT0gz_8iqzcgbZRj_B', 'HzQt8U0ytJ9MoDkJQSAG420TDcZTwY1wej9rClq3ykNgEzR7GR1uyq2WhalgyPGl', 'HzQt8U0ytJ9MoDkJQSAG420TDcZTwY1wej9rClq3ykOi16zyS88b0-gaz5X7lWub', 'HzQt8U0ytJ9MoDkJQSAG420TDcZTwY1wej9rClq3ykMsvkNI7sZATsd2E4bABzlI', 'HzQt8U0ytJ9MoDkJQSAG420TDcZTwY1wej9rClq3ykNmlpK-eRmS7qGfqqyB79pI', 'HzQt8U0ytJ9MoDkJQSAG420TDcZTwY1wej9rClq3ykPmmvqp3P6EWU1Mhu6z7h-a', 'HzQt8U0ytJ9MoDkJQSAG420TDcZTwY1wej9rClq3ykPBj3_HeFjGdvXMD_MK0Fm3', 'HzQt8U0ytJ9MoDkJQSAG420TDcZTwY1wej9rClq3ykPDvZxmCq-8RKJIuS4crO8q', 'HzQt8U0ytJ9MoDkJQSAG48aFsih9dFwsdbWKoE8jgU_6oz7HVogynb0Hzt7xlJfG', 'HzQt8U0ytJ9MoDkJQSAG48aFsih9dFwsdbWKoE8jgU-dYEDore6JVxV_BW4pajuB', 'HzQt8U0ytJ9MoDkJQSAG48aFsih9dFwsdbWKoE8jgU8LEbplNZWxqa3VXoQ9XhYc', 'HzQt8U0ytJ9MoDkJQSAG48aFsih9dFwsdbWKoE8jgU-xf_tNcnF56hYghwMfRUTR', 'HzQt8U0ytJ9MoDkJQSAG48aFsih9dFwsdbWKoE8jgU8dgrl6jR9pWDscmikSmfhP', 'HzQt8U0ytJ9MoDkJQSAG48aFsih9dFwsdbWKoE8jgU_Sw6d52jGwmNM0t_3PaHQH', 'HzQt8U0ytJ9MoDkJQSAG48aFsih9dFwsdbWKoE8jgU80yPoFc232KTWfMgU2Obqq', 'HzQt8U0ytJ9MoDkJQSAG48aFsih9dFwsdbWKoE8jgU-ONa5WU8cG-mX5OyV6Id6P', 'HzQt8U0ytJ9MoDkJQSAG48aFsih9dFwsdbWKoE8jgU_-vA3fYdsH-fppIiWWbHEL', 'HzQt8U0ytJ9MoDkJQSAG48aFsih9dFwsdbWKoE8jgU_bbpPIBXO5pXx6DvZjqoSN', 'HzQt8U0ytJ9MoDkJQSAG42adNJrIti7DAnJ6B956Nc9jLTrm4CFehFQOUspzuOiw', 'HzQt8U0ytJ9MoDkJQSAG42adNJrIti7DAnJ6B956Nc-D0NEFQahWYy1_9J1awH75', 'HzQt8U0ytJ9MoDkJQSAG42adNJrIti7DAnJ6B956Nc8w_eokFnEyA1JcXKkIszOq', 'HzQt8U0ytJ9MoDkJQSAG42adNJrIti7DAnJ6B956Nc9P50GFPHDH8DL_UEKxorNw', 'HzQt8U0ytJ9MoDkJQSAG42adNJrIti7DAnJ6B956Nc_LqPw-UVqsveEoLsetc28P', 'HzQt8U0ytJ9MoDkJQSAG42adNJrIti7DAnJ6B956Nc89C30TKoIK59DfwIXKUFH7', 'HzQt8U0ytJ9MoDkJQSAG42adNJrIti7DAnJ6B956Nc99eGbgFK2zzMx_VNfaeyrV', 'HzQt8U0ytJ9MoDkJQSAG42adNJrIti7DAnJ6B956Nc8t3Oz7uqWNopp-c01Whork', 'HzQt8U0ytJ9MoDkJQSAG42adNJrIti7DAnJ6B956Nc8zbo_peRM98JifQ19SiYNk', 'HzQt8U0ytJ9MoDkJQSAG42adNJrIti7DAnJ6B956Nc-lWitTBjv20AW0FZbpbIqS', 'HzQt8U0ytJ9MoDkJQSAG480Qm6o2FjU0_9ylZGBTRyoFoYSc1uTy_LeVNHAjR99l', 'HzQt8U0ytJ9MoDkJQSAG480Qm6o2FjU0_9ylZGBTRyqXvkUQL5p4PMLE3I4Vd-V0', 'HzQt8U0ytJ9MoDkJQSAG480Qm6o2FjU0_9ylZGBTRyqJTzq1M3JClNDCe61y_Nbe', 'HzQt8U0ytJ9MoDkJQSAG480Qm6o2FjU0_9ylZGBTRypfdQq374nZ2BKa8n7vAynH', 'HzQt8U0ytJ9MoDkJQSAG480Qm6o2FjU0_9ylZGBTRyp3Laf_hZ3dIAyGgY-maCA2', 'HzQt8U0ytJ9MoDkJQSAG480Qm6o2FjU0_9ylZGBTRyp8NXKE2FJnQuXwS9CHj63Q', 'HzQt8U0ytJ9MoDkJQSAG480Qm6o2FjU0_9ylZGBTRyoujcO5qknMYQM-LA9NI-e9', 'HzQt8U0ytJ9MoDkJQSAG480Qm6o2FjU0_9ylZGBTRyqOBU09_YavcKZZMGe-EIXY', 'HzQt8U0ytJ9MoDkJQSAG480Qm6o2FjU0_9ylZGBTRypieTO_BDd69qsPXoY8F2gZ', 'HzQt8U0ytJ9MoDkJQSAG480Qm6o2FjU0_9ylZGBTRyoFZ9gw6PgpqBEN42YBcLNm', 'HzQt8U0ytJ9MoDkJQSAG444o-KbvGp4-_OuHHgYt0-J0enPMxsREDclZ1Rlo0-YA', 'HzQt8U0ytJ9MoDkJQSAG444o-KbvGp4-_OuHHgYt0-LZUL-fHYG6SJ1OrkWSwT7U', 'HzQt8U0ytJ9MoDkJQSAG444o-KbvGp4-_OuHHgYt0-LK43ESxL9xyd5M0xsw9sRD', 'HzQt8U0ytJ9MoDkJQSAG444o-KbvGp4-_OuHHgYt0-IQlX5yQPx97kIXoMGNRRgQ', 'HzQt8U0ytJ9MoDkJQSAG444o-KbvGp4-_OuHHgYt0-IF5_I1OcRQwqt9D4FIKFfg', 'HzQt8U0ytJ9MoDkJQSAG444o-KbvGp4-_OuHHgYt0-LfQcv8ZB6gdqxjnLDsJedL', 'HzQt8U0ytJ9MoDkJQSAG444o-KbvGp4-_OuHHgYt0-KZVEXOiVLhpi8GowvqGhuJ', 'HzQt8U0ytJ9MoDkJQSAG444o-KbvGp4-_OuHHgYt0-KITcb24J3j44sAg6GRZGKX', 'HzQt8U0ytJ9MoDkJQSAG444o-KbvGp4-_OuHHgYt0-LgPf99TFwbJziQkDhHjWfB', 'HzQt8U0ytJ9MoDkJQSAG444o-KbvGp4-_OuHHgYt0-K19qdDey3W7IrlVzcRg1nF', 'HzQt8U0ytJ9MoDkJQSAG4w3CFGDS2o9_9VFJqI5Ygg7sa_sGIp4Tr3U4PXvi9GS-', 'HzQt8U0ytJ9MoDkJQSAG4w3CFGDS2o9_9VFJqI5Ygg6MifsyAMVbPMRTQnT_hcWP', 'HzQt8U0ytJ9MoDkJQSAG4w3CFGDS2o9_9VFJqI5Ygg5bC2n8syNSyDdEtlshyBaF', 'HzQt8U0ytJ9MoDkJQSAG4w3CFGDS2o9_9VFJqI5Ygg6wVTZXNBfLhZW392Xdvfk6', 'HzQt8U0ytJ9MoDkJQSAG4w3CFGDS2o9_9VFJqI5Ygg7qQ78-2a4R9YXHyeQoB-jI', 'HzQt8U0ytJ9MoDkJQSAG4w3CFGDS2o9_9VFJqI5Ygg5MUvO5LBxIJaThxSB7sSw0', 'HzQt8U0ytJ9MoDkJQSAG4w3CFGDS2o9_9VFJqI5Ygg7Mw6wnNAsX91G6EkjgQsGV', 'HzQt8U0ytJ9MoDkJQSAG4w3CFGDS2o9_9VFJqI5Ygg5fTcYulE5WTpz3sEdSPckK', 'HzQt8U0ytJ9MoDkJQSAG4w3CFGDS2o9_9VFJqI5Ygg6fpk-CZ-msM0nl494fyarV', 'HzQt8U0ytJ9MoDkJQSAG4w3CFGDS2o9_9VFJqI5Ygg67H2Bey-EbkUQxfBtYPqat', 'HzQt8U0ytJ9MoDkJQSAG488pkdGaeKyUVsAlLaXzgdAcrpBQcuBU7WZn3iHGPVmd', 'HzQt8U0ytJ9MoDkJQSAG488pkdGaeKyUVsAlLaXzgdCwN4CkM0kJAFuPco96eRT2', 'HzQt8U0ytJ9MoDkJQSAG488pkdGaeKyUVsAlLaXzgdD_zEpQy7YB6DrNwDppFhKl', 'HzQt8U0ytJ9MoDkJQSAG488pkdGaeKyUVsAlLaXzgdDDR4bFFCusgaEybF7lMZBG', 'HzQt8U0ytJ9MoDkJQSAG488pkdGaeKyUVsAlLaXzgdB70ySWDpVSTHnHHkzrzr9h', 'HzQt8U0ytJ9MoDkJQSAG488pkdGaeKyUVsAlLaXzgdAbIya5m8PXq_tEw-nxqm1s', 'HzQt8U0ytJ9MoDkJQSAG488pkdGaeKyUVsAlLaXzgdA7gLCjc83vaDo5m6Y4l9-F', 'HzQt8U0ytJ9MoDkJQSAG488pkdGaeKyUVsAlLaXzgdDbYJMF0W5Hn3pPNiyMlZLH', 'HzQt8U0ytJ9MoDkJQSAG488pkdGaeKyUVsAlLaXzgdC5yMq8whk6vQEHsFPaNQp6', 'HzQt8U0ytJ9MoDkJQSAG488pkdGaeKyUVsAlLaXzgdDbwWOOltixp0G3bkvaQhOu']
def generate(context):
token = tokens[context['participant'].id % len(tokens)]
return TaskStatement(TITLE, STATEMENT.format(token=token))
| 1,836.368421
| 34,009
| 0.927402
| 1,244
| 34,891
| 25.754019
| 0.7709
| 0.0103
| 0.010644
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.165626
| 0.017626
| 34,891
| 18
| 34,010
| 1,938.388889
| 0.769051
| 0
| 0
| 0
| 0
| 0.25
| 0.937577
| 0.918317
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0
| 0
| 0.166667
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1141330cc1bb597a77b364aba3a5c2cfbed9b633
| 26
|
py
|
Python
|
src/exts/storage/__init__.py
|
Pix-00/olea-v2_flask_1_
|
7ddfa83a7a2a7dfbe55b78da002c1193f38781c0
|
[
"Apache-2.0"
] | null | null | null |
src/exts/storage/__init__.py
|
Pix-00/olea-v2_flask_1_
|
7ddfa83a7a2a7dfbe55b78da002c1193f38781c0
|
[
"Apache-2.0"
] | null | null | null |
src/exts/storage/__init__.py
|
Pix-00/olea-v2_flask_1_
|
7ddfa83a7a2a7dfbe55b78da002c1193f38781c0
|
[
"Apache-2.0"
] | null | null | null |
from .main import Storage
| 13
| 25
| 0.807692
| 4
| 26
| 5.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 26
| 1
| 26
| 26
| 0.954545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3a0cc9b48789c9619fb42ce7294ca0fb422aa73f
| 325
|
py
|
Python
|
react_comments_django/context_processors.py
|
studyhub-co/react-comments-django
|
00a69da4197a1f641bf520828b34c193b3c2f5a9
|
[
"Apache-2.0"
] | null | null | null |
react_comments_django/context_processors.py
|
studyhub-co/react-comments-django
|
00a69da4197a1f641bf520828b34c193b3c2f5a9
|
[
"Apache-2.0"
] | 6
|
2021-04-22T08:54:19.000Z
|
2022-02-10T08:07:44.000Z
|
react_comments_django/context_processors.py
|
physics-is-beautiful/react-comments-django
|
00a69da4197a1f641bf520828b34c193b3c2f5a9
|
[
"Apache-2.0"
] | 1
|
2021-07-15T02:37:12.000Z
|
2021-07-15T02:37:12.000Z
|
# from django.conf import settings
#
#
# def react_comments_django_settings(request):
# if hasattr(settings, 'REACT_COMMENTS_DJANGO_BASE_TEMPLATE'):
# return dict(BASE_TEMPLATE=settings.REACT_COMMENTS_DJANGO_BASE_TEMPLATE)
# else:
# return dict(BASE_TEMPLATE='react-comments-django/react_index.html')
| 36.111111
| 81
| 0.76
| 40
| 325
| 5.825
| 0.45
| 0.223176
| 0.32618
| 0.23176
| 0.334764
| 0.334764
| 0
| 0
| 0
| 0
| 0
| 0
| 0.141538
| 325
| 8
| 82
| 40.625
| 0.835125
| 0.947692
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
28ab7dd4bb1bd35201da9dea4b4e7dc9cb81ae98
| 164
|
py
|
Python
|
borgweb/views/index.py
|
audeoudh/borgweb
|
58bb2d97c58c5e78da723f42967d8f9c752b8f5d
|
[
"BSD-3-Clause"
] | 331
|
2015-06-15T09:31:38.000Z
|
2022-03-24T18:10:50.000Z
|
borgweb/views/index.py
|
audeoudh/borgweb
|
58bb2d97c58c5e78da723f42967d8f9c752b8f5d
|
[
"BSD-3-Clause"
] | 91
|
2015-06-15T20:16:19.000Z
|
2022-03-09T19:24:22.000Z
|
borgweb/views/index.py
|
audeoudh/borgweb
|
58bb2d97c58c5e78da723f42967d8f9c752b8f5d
|
[
"BSD-3-Clause"
] | 62
|
2015-06-15T09:31:46.000Z
|
2022-02-27T03:51:28.000Z
|
"""
index / main view
"""
from flask import render_template
from . import blueprint
@blueprint.route('/')
def index():
return render_template('index.html')
| 12.615385
| 40
| 0.695122
| 20
| 164
| 5.6
| 0.65
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.164634
| 164
| 12
| 41
| 13.666667
| 0.817518
| 0.103659
| 0
| 0
| 0
| 0
| 0.079137
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.4
| 0.2
| 0.8
| 0.4
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
28ba174daebd28fb586e4cad71d48abefc7a9353
| 142
|
py
|
Python
|
dataset/__init__.py
|
rcap107/holoclean
|
d4f5929a8e4d92d4f41eb058c04c96cdcb0af767
|
[
"Apache-2.0"
] | 468
|
2018-11-11T15:40:12.000Z
|
2022-03-30T13:21:48.000Z
|
dataset/__init__.py
|
rcap107/holoclean
|
d4f5929a8e4d92d4f41eb058c04c96cdcb0af767
|
[
"Apache-2.0"
] | 43
|
2018-11-10T20:03:49.000Z
|
2020-10-20T16:39:03.000Z
|
dataset/__init__.py
|
rcap107/holoclean
|
d4f5929a8e4d92d4f41eb058c04c96cdcb0af767
|
[
"Apache-2.0"
] | 118
|
2018-11-12T19:11:42.000Z
|
2022-03-23T18:25:29.000Z
|
from .dataset import Dataset
from .dataset import AuxTables
from .dataset import CellStatus
__all__ = ['Dataset', 'AuxTables', 'CellStatus']
| 23.666667
| 48
| 0.774648
| 16
| 142
| 6.625
| 0.375
| 0.311321
| 0.481132
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.126761
| 142
| 5
| 49
| 28.4
| 0.854839
| 0
| 0
| 0
| 0
| 0
| 0.183099
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
28d527b44406edeef9a67426835fb33e89b3421b
| 88
|
py
|
Python
|
fixtures/gotodef/approximate-resource-imports/gotodeflib2.py
|
gliviu/hyperclick-robot-framework
|
ff76a2c07829c4a0b12856e4925c8f9f8d741385
|
[
"MIT"
] | 3
|
2017-02-18T11:55:59.000Z
|
2020-02-03T18:02:03.000Z
|
fixtures/gotodef/approximate-resource-imports/gotodeflib2.py
|
gliviu/hyperclick-robot-framework
|
ff76a2c07829c4a0b12856e4925c8f9f8d741385
|
[
"MIT"
] | 4
|
2017-02-13T13:18:02.000Z
|
2020-08-14T16:26:13.000Z
|
fixtures/gotodef/approximate-resource-imports/gotodeflib2.py
|
gliviu/hyperclick-robot-framework
|
ff76a2c07829c4a0b12856e4925c8f9f8d741385
|
[
"MIT"
] | null | null | null |
def impkw():
print('impkw2')
def my_third_keyword():
print('my_third_keyword2')
| 17.6
| 30
| 0.681818
| 12
| 88
| 4.666667
| 0.666667
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027027
| 0.159091
| 88
| 4
| 31
| 22
| 0.72973
| 0
| 0
| 0
| 0
| 0
| 0.261364
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
e93ac497fc091e3c82e2ab1911a26325c3b2e250
| 34
|
py
|
Python
|
dtuf/__main__.py
|
davedoesdev/dtuf
|
590f42e8ccee1b3f02af153fba34b0ba3b9b7850
|
[
"MIT"
] | 13
|
2016-01-05T01:48:01.000Z
|
2022-02-20T14:53:04.000Z
|
dtuf/__main__.py
|
davedoesdev/dtuf
|
590f42e8ccee1b3f02af153fba34b0ba3b9b7850
|
[
"MIT"
] | 3
|
2015-12-10T21:32:22.000Z
|
2016-03-09T22:38:02.000Z
|
dtuf/__main__.py
|
davedoesdev/dtuf
|
590f42e8ccee1b3f02af153fba34b0ba3b9b7850
|
[
"MIT"
] | 2
|
2018-02-02T21:29:08.000Z
|
2020-05-27T10:50:35.000Z
|
import dtuf.main
dtuf.main.main()
| 11.333333
| 16
| 0.764706
| 6
| 34
| 4.333333
| 0.5
| 0.615385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088235
| 34
| 2
| 17
| 17
| 0.83871
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
aaeb6e7029cb574f5d29091c0ccd8c4ad17b8148
| 137
|
py
|
Python
|
src/base/__init__.py
|
shaliniiit/CVDD-PyTorch
|
c07e1bd24fad81c1a1c51a70d90474b333d19f57
|
[
"MIT"
] | 48
|
2019-07-30T12:34:41.000Z
|
2022-02-23T10:56:42.000Z
|
src/base/__init__.py
|
Wuliyuanulb/CVDD-PyTorch
|
aa2b033ed8216ce132ef6977da1e4fae665fb0c0
|
[
"MIT"
] | 4
|
2019-11-28T14:26:38.000Z
|
2021-11-16T14:53:17.000Z
|
src/base/__init__.py
|
Wuliyuanulb/CVDD-PyTorch
|
aa2b033ed8216ce132ef6977da1e4fae665fb0c0
|
[
"MIT"
] | 19
|
2019-07-30T02:44:57.000Z
|
2022-02-02T00:39:13.000Z
|
from .base_dataset import *
from .torchnlp_dataset import *
from .base_net import *
from .base_trainer import *
from .embedding import *
| 22.833333
| 31
| 0.781022
| 19
| 137
| 5.421053
| 0.421053
| 0.38835
| 0.330097
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.145985
| 137
| 5
| 32
| 27.4
| 0.880342
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
aaebb1ad92f7af47a7d010a1f197d41ab01d5438
| 40
|
py
|
Python
|
fftbg/bird/msg_types.py
|
rainbowbismuth/birb-brains-bot
|
f168ec06c5c5cc8d41589437c6f91f0d97289167
|
[
"MIT"
] | 1
|
2020-12-01T01:31:31.000Z
|
2020-12-01T01:31:31.000Z
|
fftbg/bird/msg_types.py
|
rainbowbismuth/birb-brains-bot
|
f168ec06c5c5cc8d41589437c6f91f0d97289167
|
[
"MIT"
] | 2
|
2021-05-30T21:10:16.000Z
|
2021-05-30T21:10:44.000Z
|
fftbg/bird/msg_types.py
|
rainbowbismuth/birb-brains-bot
|
f168ec06c5c5cc8d41589437c6f91f0d97289167
|
[
"MIT"
] | null | null | null |
BIRD_GOING_ALL_IN = 'BIRD_GOING_ALL_IN'
| 20
| 39
| 0.85
| 8
| 40
| 3.5
| 0.5
| 0.642857
| 0.857143
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075
| 40
| 1
| 40
| 40
| 0.756757
| 0
| 0
| 0
| 0
| 0
| 0.425
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c923c7d31b4c0467042ba3aa9347bf83a94695a6
| 16,376
|
py
|
Python
|
data.py
|
IVRL/FG-NIC
|
b2338f5dfd10883150fc415d149b1f080e5a344d
|
[
"MIT"
] | 2
|
2021-05-31T22:46:12.000Z
|
2021-06-01T01:24:41.000Z
|
data.py
|
IVRL/FG-NIC
|
b2338f5dfd10883150fc415d149b1f080e5a344d
|
[
"MIT"
] | null | null | null |
data.py
|
IVRL/FG-NIC
|
b2338f5dfd10883150fc415d149b1f080e5a344d
|
[
"MIT"
] | 2
|
2021-12-13T08:57:18.000Z
|
2021-12-29T06:38:47.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Project : FG-NIC
# @Author : Xiaoyu LIN
# @File : data.py
# @Description : This file is used to genterate Pytorch dataset for caltech-256 and caltech-101.
from PIL import Image
from typing import Any, Callable, List, Optional, Union, Tuple
from torchvision.datasets.vision import VisionDataset
from torchvision.datasets.utils import check_integrity, verify_str_arg
import copy
import gdown
import pickle
import random
import os
import tarfile
class Caltech256(VisionDataset):
""" Caltech 256 Dataset.
Args:
root (string): Root directory of dataset where directory
``caltech256`` exists or will be saved to if download is set to True.
phase (string): ['train', 'valid', 'test'] load data for different phase.
is_return_origin (bool): If true, return target is label for classification,
if false, return target both label and the original image for restoration.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``.
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
train_size (int): The number of images in train and validation set per class.
valid_ratio (float): The ratio of validation image in train and validation set per class.
"""
def __init__(self,
root: str,
phase: str = 'train',
is_return_origin: bool = True,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
train_size: int = 60,
valid_ratio: float = 0.2,
) -> None:
super(Caltech256, self).__init__(root,
transform=transform,
target_transform=target_transform)
os.makedirs(self.root, exist_ok=True)
self.is_return_origin = is_return_origin
if download:
self.download()
if not self._check_integrity():
raise RuntimeError('Dataset not found or corrupted.' +
' You can use download=True to download it')
self.categories = sorted(os.listdir(os.path.join(self.root, "256_ObjectCategories")))
# check previous train and validation indices
if os.path.isfile(os.path.join(self.root, 'train_dic.pickle')) and os.path.isfile(
os.path.join(self.root, 'valid_dic.pickle')):
with open(os.path.join(self.root, 'train_dic.pickle'), 'rb') as file:
train_dic = pickle.load(file)
with open(os.path.join(self.root, 'valid_dic.pickle'), 'rb') as file:
valid_dic = pickle.load(file)
# if no previous train and validation indices, sample train and validation data
else:
train_dic = {}
valid_dic = {}
for c in self.categories:
fileslist = os.listdir(os.path.join(self.root, "256_ObjectCategories", c))
n = len(list(filter(lambda file: file.endswith(".jpg"), fileslist)))
# select 60 images randomly as training images per class
train_index = random.sample(range(1, n + 1), k=train_size)
valid_index = random.sample(train_index, k=int(train_size * valid_ratio))
train_index = list(set(train_index).difference(set(valid_index)))
train_dic[c] = train_index
valid_dic[c] = valid_index
with open(os.path.join(self.root, 'train_dic.pickle'), 'wb') as file:
pickle.dump(train_dic, file)
with open(os.path.join(self.root, 'valid_dic.pickle'), 'wb') as file:
pickle.dump(valid_dic, file)
# generate new index, label(y), and map (between label number and text label)
self.index: List[int] = []
self.y = []
self.map = {}
for (i, c) in enumerate(self.categories):
if 'train' in phase.lower():
self.index.extend(train_dic[c])
self.y.extend(len(train_dic[c]) * [i])
if 'valid' in phase.lower():
self.index.extend(valid_dic[c])
self.y.extend(len(valid_dic[c]) * [i])
if 'test' in phase.lower():
fileslist = os.listdir(os.path.join(self.root, "256_ObjectCategories", c))
n = len(list(filter(lambda file: file.endswith(".jpg"), fileslist)))
self.index.extend(
list(set(range(1, n + 1)).difference(set(train_dic[c])).difference(set(valid_dic[c]))))
self.y.extend((n - train_size) * [i])
self.map[i] = c.split('.')[-1]
def __getitem__(self,
index: int
) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class for classification task
or the same image for restoration task.
"""
img = Image.open(os.path.join(self.root,
"256_ObjectCategories",
self.categories[self.y[index]],
"{:03d}_{:04d}.jpg".format(self.y[index] + 1, self.index[index])))
if img.mode != 'RGB':
img = img.convert('RGB')
origin = copy.deepcopy(img)
target = self.y[index]
if self.is_return_origin and self.transform is not None:
img, origin = self.transform(img)
elif self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
if self.is_return_origin:
return img, origin, target,
else:
return img, target,
def _check_integrity(self) -> bool:
# can be more robust and check hash of files
return os.path.exists(os.path.join(self.root, "256_ObjectCategories"))
def __len__(self) -> int:
return len(self.index)
def download(self) -> None:
if self._check_integrity():
print('Files already downloaded and verified')
return
download_root = self.root
extract_root = download_root
filename = "256_ObjectCategories.tar"
url = "https://drive.google.com/uc?id=1r6o0pSROcV1_VwT4oSjA2FBUSCWGuxLK"
archive = os.path.join(download_root, filename)
gdown.download(url, archive, quiet=False)
# extract file
print("Extracting {} to {}".format(archive, extract_root))
cwd = os.getcwd()
tar = tarfile.open(archive, "r")
os.chdir(extract_root)
tar.extractall()
tar.close()
os.chdir(cwd)
print("Extraction done!")
class Caltech101(VisionDataset):
"""`Caltech 101 <http://www.vision.caltech.edu/Image_Datasets/Caltech101/>`_ Dataset.
.. warning::
This class needs `scipy <https://docs.scipy.org/doc/>`_ to load target files from `.mat` format.
Args:
root (string): Root directory of dataset where directory
``caltech101`` exists or will be saved to if download is set to True.
target_type (string or list, optional): Type of target to use, ``category`` or
``annotation``. Can also be a list to output a tuple with all specified target types.
``category`` represents the target class, and ``annotation`` is a list of points
from a hand-generated outline. Defaults to ``category``.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
def __init__(self,
root: str,
phase: str = 'train',
is_return_origin: bool = True,
target_type: Union[List[str], str] = "category",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
train_size: int = 30,
valid_ratio: float = 0.2,
) -> None:
super(Caltech101, self).__init__(root,
transform=transform,
target_transform=target_transform)
os.makedirs(self.root, exist_ok=True)
self.is_return_origin = is_return_origin
if not isinstance(target_type, list):
target_type = [target_type]
self.target_type = [verify_str_arg(t, "target_type", ("category", "annotation"))
for t in target_type]
if download:
self.download()
if not self._check_integrity():
raise RuntimeError('Dataset not found or corrupted.' +
' You can use download=True to download it')
self.categories = sorted(os.listdir(os.path.join(self.root, "101_ObjectCategories")))
self.categories.remove("BACKGROUND_Google") # this is not a real class
# For some reason, the category names in "101_ObjectCategories" and
# "Annotations" do not always match. This is a manual map between the
# two. Defaults to using same name, since most names are fine.
name_map = {"Faces": "Faces_2",
"Faces_easy": "Faces_3",
"Motorbikes": "Motorbikes_16",
"airplanes": "Airplanes_Side_2"}
self.annotation_categories = list(map(lambda x: name_map[x] if x in name_map else x, self.categories))
self.index: List[int] = []
self.y = []
for (i, c) in enumerate(self.categories):
n = len(os.listdir(os.path.join(self.root, "101_ObjectCategories", c)))
self.index.extend(range(1, n + 1))
self.y.extend(n * [i])
# check previous train and validation indices
if os.path.isfile(os.path.join(self.root, 'train_dic.pickle')) and os.path.isfile(
os.path.join(self.root, 'valid_dic.pickle')):
with open(os.path.join(self.root, 'train_dic.pickle'), 'rb') as file:
train_dic = pickle.load(file)
with open(os.path.join(self.root, 'valid_dic.pickle'), 'rb') as file:
valid_dic = pickle.load(file)
# if no previous train and validation indices, sample train and validation data
else:
train_dic = {}
valid_dic = {}
for c in self.categories:
fileslist = os.listdir(os.path.join(self.root, "101_ObjectCategories", c))
n = len(list(filter(lambda file: file.endswith(".jpg"), fileslist)))
# select 60 images randomly as training images per class
train_index = random.sample(range(1, n + 1), k=train_size)
valid_index = random.sample(train_index, k=int(train_size * valid_ratio))
train_index = list(set(train_index).difference(set(valid_index)))
train_dic[c] = train_index
valid_dic[c] = valid_index
with open(os.path.join(self.root, 'train_dic.pickle'), 'wb') as file:
pickle.dump(train_dic, file)
with open(os.path.join(self.root, 'valid_dic.pickle'), 'wb') as file:
pickle.dump(valid_dic, file)
# generate new index, label(y), and map (between label number and text label)
self.index: List[int] = []
self.y = []
self.map = {}
for (i, c) in enumerate(self.categories):
if 'train' in phase.lower():
self.index.extend(train_dic[c])
self.y.extend(len(train_dic[c]) * [i])
if 'valid' in phase.lower():
self.index.extend(valid_dic[c])
self.y.extend(len(valid_dic[c]) * [i])
if 'test' in phase.lower():
fileslist = os.listdir(os.path.join(self.root, "101_ObjectCategories", c))
n = len(list(filter(lambda file: file.endswith(".jpg"), fileslist)))
self.index.extend(
list(set(range(1, n + 1)).difference(set(train_dic[c])).difference(set(valid_dic[c]))))
self.y.extend((n - train_size) * [i])
self.map[i] = c.split('.')[-1]
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where the type of target specified by target_type.
"""
import scipy.io
img = Image.open(os.path.join(self.root,
"101_ObjectCategories",
self.categories[self.y[index]],
"image_{:04d}.jpg".format(self.index[index])))
if img.mode != 'RGB':
img = img.convert('RGB')
target: Any = []
for t in self.target_type:
if t == "category":
target.append(self.y[index])
elif t == "annotation":
data = scipy.io.loadmat(os.path.join(self.root,
"Annotations",
self.annotation_categories[self.y[index]],
"annotation_{:04d}.mat".format(self.index[index])))
target.append(data["obj_contour"])
target = tuple(target) if len(target) > 1 else target[0]
if self.is_return_origin and self.transform is not None:
img, origin = self.transform(img)
elif self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
if self.is_return_origin:
return img, origin, target
else:
return img, target
def _check_integrity(self) -> bool:
# can be more robust and check hash of files
return os.path.exists(os.path.join(self.root, "101_ObjectCategories"))
def __len__(self) -> int:
return len(self.index)
def download(self) -> None:
if self._check_integrity():
print('Files already downloaded and verified')
return
download_root = self.root
extract_root = download_root
filename = "101_ObjectCategories.tar"
url = "https://drive.google.com/uc?id=137RyRjvTBkBiIfeYBNZBtViDHQ6_Ewsp"
archive = os.path.join(download_root, filename)
gdown.download(url, archive, quiet=False)
# extract file
print("Extracting {} to {}".format(archive, extract_root))
cwd = os.getcwd()
tar = tarfile.open(archive, "r")
os.chdir(extract_root)
tar.extractall()
tar.close()
os.chdir(cwd)
print("Extraction done!")
def extra_repr(self) -> str:
return "Target type: {target_type}".format(**self.__dict__)
| 45.112948
| 111
| 0.553249
| 1,905
| 16,376
| 4.64147
| 0.152231
| 0.021715
| 0.029405
| 0.038
| 0.748699
| 0.746664
| 0.740443
| 0.72133
| 0.71658
| 0.683443
| 0
| 0.011681
| 0.341292
| 16,376
| 363
| 112
| 45.112948
| 0.80801
| 0.215132
| 0
| 0.728395
| 0
| 0
| 0.099239
| 0.005645
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045267
| false
| 0
| 0.045267
| 0.020576
| 0.144033
| 0.024691
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a33d9a1c079733143e68fd42b039458a49962730
| 1,381
|
py
|
Python
|
djangophysics/units/permissions.py
|
fmeurou/django-physics
|
0f67efa1b6bd547e0b80191e7a2624c2c971bdc0
|
[
"MIT"
] | 1
|
2021-06-15T20:51:45.000Z
|
2021-06-15T20:51:45.000Z
|
djangophysics/units/permissions.py
|
fmeurou/django-physics
|
0f67efa1b6bd547e0b80191e7a2624c2c971bdc0
|
[
"MIT"
] | null | null | null |
djangophysics/units/permissions.py
|
fmeurou/django-physics
|
0f67efa1b6bd547e0b80191e7a2624c2c971bdc0
|
[
"MIT"
] | 1
|
2021-12-01T00:01:29.000Z
|
2021-12-01T00:01:29.000Z
|
"""
Permissions for CustomUnit APIs
"""
from rest_framework import permissions
class CustomUnitObjectPermission(permissions.BasePermission):
"""
Permissions for CustomUnit API
"""
def has_object_permission(self, request, view, obj):
"""
Limit creation and modification tu logged in users
"""
if not request.user or not request.user.is_authenticated:
return False
if request.method in permissions.SAFE_METHODS:
return True
if request.method == 'POST':
return True
elif request.method.lower() in ['put', 'patch', 'delete'] and \
request.user == obj.user:
return True
return False
class CustomDimensionObjectPermission(permissions.BasePermission):
"""
Permissions for CustomDimension API
"""
def has_object_permission(self, request, view, obj):
"""
Limit creation and modification tu logged in users
"""
if not request.user or not request.user.is_authenticated:
return False
if request.method in permissions.SAFE_METHODS:
return True
if request.method == 'POST':
return True
elif request.method.lower() in ['put', 'patch', 'delete'] and \
request.user == obj.user:
return True
return False
| 29.382979
| 71
| 0.611875
| 145
| 1,381
| 5.765517
| 0.331034
| 0.078947
| 0.066986
| 0.093301
| 0.722488
| 0.722488
| 0.722488
| 0.722488
| 0.722488
| 0.722488
| 0
| 0
| 0.304127
| 1,381
| 47
| 72
| 29.382979
| 0.869927
| 0.144823
| 0
| 0.88
| 0
| 0
| 0.032787
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08
| false
| 0
| 0.04
| 0
| 0.6
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a382b3dbcbcb81d02ece6aaac5616012124d63e5
| 13,022
|
py
|
Python
|
snn/core/cnn_fn.py
|
KaroliShp/pacbayes-opt
|
fa30b897fd6c3763a4bdb66a9fc9518165841c18
|
[
"Apache-2.0"
] | 20
|
2019-04-03T11:33:45.000Z
|
2022-01-16T03:30:44.000Z
|
snn/core/cnn_fn.py
|
KaroliShp/pacbayes-opt
|
fa30b897fd6c3763a4bdb66a9fc9518165841c18
|
[
"Apache-2.0"
] | 3
|
2020-05-06T09:22:12.000Z
|
2021-12-07T17:46:07.000Z
|
snn/core/cnn_fn.py
|
KaroliShp/pacbayes-opt
|
fa30b897fd6c3763a4bdb66a9fc9518165841c18
|
[
"Apache-2.0"
] | 8
|
2019-06-10T08:16:45.000Z
|
2021-12-05T16:50:49.000Z
|
from __future__ import division, print_function, unicode_literals
import functools
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR) # Remove tf warnings
import numpy as np
from time import time
import os, shutil, random
NUM_CLASSES = 10
def CNN_withnoise(images, param_placeholders, scopes_list,layers,params_mean_values, graph=tf.Graph(), trainable=True):
with graph.as_default():
param_tensor_list = []
with tf.variable_scope(scopes_list[0]) as scope:
kernel = variable_initializer('weights',
[5, 5, 3, 64],
tf.constant_initializer(params_mean_values[0]), trainable = trainable)
conv = tf.nn.conv2d(images, kernel+param_placeholders[0], [1, 1, 1, 1], padding='SAME')
biases = variable_initializer('biases', [64], tf.constant_initializer(params_mean_values[1]), trainable = trainable)
pre_activation = tf.nn.bias_add(conv, biases+param_placeholders[1])
conv1 = tf.nn.relu(pre_activation, name=scope.name)
param_tensor_list.append(kernel)
param_tensor_list.append(biases)
# pool1
pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool1')
# norm1
norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm1')
# conv2
with tf.variable_scope(scopes_list[1]) as scope:
kernel = variable_initializer('weights',
[5, 5, 64, 64],
tf.constant_initializer(params_mean_values[2]), trainable = trainable)
conv = tf.nn.conv2d(norm1, kernel+param_placeholders[2], [1, 1, 1, 1], padding='SAME')
biases = variable_initializer('biases', [64], tf.constant_initializer(params_mean_values[3]), trainable = trainable)
pre_activation = tf.nn.bias_add(conv, biases+param_placeholders[3], name=scope.name)
conv2 = tf.nn.relu(pre_activation, name=scope.name)
param_tensor_list.append(kernel)
param_tensor_list.append(biases)
# norm2
norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm2')
# pool2
pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1], padding='SAME', name='pool2')
# local3
with tf.variable_scope(scopes_list[2]) as scope:
# Move everything into depth so we can perform a single matrix multiply.
reshape = tf.reshape(pool2, [-1, 4096])
dim = reshape.get_shape()[1].value
weights = variable_initializer('weights', [dim, 384],
tf.constant_initializer(params_mean_values[4]), trainable = trainable)
biases = variable_initializer('biases', [384], tf.constant_initializer(params_mean_values[5]), trainable = trainable)
local3 = tf.nn.relu(tf.matmul(reshape, weights+param_placeholders[4]) + biases+param_placeholders[5], name=scope.name)
param_tensor_list.append(weights)
param_tensor_list.append(biases)
# local4
with tf.variable_scope(scopes_list[3]) as scope:
weights = variable_initializer('weights', [384, 192],
tf.constant_initializer(params_mean_values[6]), trainable = trainable)
biases = variable_initializer('biases', [192], tf.constant_initializer(params_mean_values[7]), trainable = trainable)
local4 = tf.nn.relu(tf.matmul(local3, weights+param_placeholders[6]) + biases + param_placeholders[7], name=scope.name)
param_tensor_list.append(weights)
param_tensor_list.append(biases)
with tf.variable_scope(scopes_list[4]) as scope:
weights = variable_initializer('weights', [192, NUM_CLASSES],
tf.constant_initializer(params_mean_values[8]), trainable = trainable)
biases = variable_initializer('biases', [NUM_CLASSES],
tf.constant_initializer(params_mean_values[9]), trainable = trainable)
softmax_linear = tf.add(tf.matmul(local4, weights+param_placeholders[8]), biases+param_placeholders[9], name=scope.name)
param_tensor_list.append(weights)
param_tensor_list.append(biases)
return softmax_linear, param_tensor_list
def lazy_property(function):
""" Create a property such that defining model classes is easier with tf """
attribute = '_cache_' + function.__name__
@property
@functools.wraps(function)
def decorator(self):
if not hasattr(self, attribute):
setattr(self, attribute, function(self))
return getattr(self, attribute)
return decorator
def variable_initializer(name, shape, initializer, trainable=True):
return tf.get_variable(name, shape, initializer=initializer, dtype=tf.float32, trainable=trainable)
def convolutional_net(images, scopes_list = ['conv1','conv2','local3','local4','softmax_linear']):
"""Build the CIFAR-10 model.
Args:
x: Images placeholder.
Returns:
Logits.
"""
# We instantiate all variables using tf.get_variable() instead of
# tf.Variable() in order to share variables across multiple GPU training runs.
# If we only ran this model on a single GPU, we could simplify this function
# by replacing all instances of tf.get_variable() with tf.Variable().
#
# conv1
with tf.variable_scope(scopes_list[0]) as scope:
kernel = variable_initializer('weights',
[5, 5, 3, 64],
tf.truncated_normal_initializer(stddev=5e-2, dtype=tf.float32))
conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
biases = variable_initializer('biases', [64], tf.constant_initializer(0.0))
pre_activation = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(pre_activation, name=scope.name)
# pool1
pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool1')
# norm1
norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm1')
# conv2
with tf.variable_scope(scopes_list[1]) as scope:
kernel = variable_initializer('weights',
[5, 5, 64, 64],
tf.truncated_normal_initializer(stddev=5e-2, dtype=tf.float32))
conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')
biases = variable_initializer('biases', [64], tf.constant_initializer(0.1))
pre_activation = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(pre_activation, name=scope.name)
# norm2
norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm2')
# pool2
pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1], padding='SAME', name='pool2')
# local3
with tf.variable_scope(scopes_list[2]) as scope:
# Move everything into depth so we can perform a single matrix multiply.
reshape = tf.reshape(pool2, [-1, 4096])
dim = reshape.get_shape()[1].value
weights = variable_initializer('weights', [dim, 384],
tf.truncated_normal_initializer(stddev=0.04, dtype=tf.float32))
biases = variable_initializer('biases', [384], tf.constant_initializer(0.1))
local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
# local4
with tf.variable_scope(scopes_list[3]) as scope:
weights = variable_initializer('weights', [384, 192],
tf.truncated_normal_initializer(stddev=0.04, dtype=tf.float32))
biases = variable_initializer('biases', [192], tf.constant_initializer(0.1))
local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name=scope.name)
with tf.variable_scope(scopes_list[4]) as scope:
weights = variable_initializer('weights', [192, NUM_CLASSES],
tf.truncated_normal_initializer(stddev=1/192.0, dtype=tf.float32) )
biases = variable_initializer('biases', [NUM_CLASSES],
tf.constant_initializer(0.0))
softmax_linear = tf.add(tf.matmul(local4, weights), biases, name=scope.name)
return softmax_linear
def convolutional_net_init(images, params_mean_values=None,
scopes_list=['conv1', 'conv2', 'local3', 'local4', 'softmax_linear']):
"""
Build CIFAR10 model with provided initial values
"""
# We instantiate all variables using tf.get_variable() instead of
# tf.Variable() in order to share variables across multiple GPU training runs.
# If we only ran this model on a single GPU, we could simplify this function
# by replacing all instances of tf.get_variable() with tf.Variable().
#
# conv1
param_tensor_list = []
with tf.variable_scope(scopes_list[0]) as scope:
kernel = variable_initializer('weights',
[5, 5, 3, 64],
tf.constant_initializer(params_mean_values[0]))
conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
biases = variable_initializer('biases', [64], tf.constant_initializer(params_mean_values[1]))
pre_activation = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(pre_activation, name=scope.name)
# _activation_summary(conv1)
param_tensor_list.append(kernel)
param_tensor_list.append(biases)
# pool1
pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool1')
# norm1
norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm1')
# conv2
with tf.variable_scope(scopes_list[1]) as scope:
kernel = variable_initializer('weights',
[5, 5, 64, 64],
tf.constant_initializer(params_mean_values[2]))
conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')
biases = variable_initializer('biases', [64], tf.constant_initializer(params_mean_values[3]))
pre_activation = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(pre_activation, name=scope.name)
param_tensor_list.append(kernel)
param_tensor_list.append(biases)
# norm2
norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm2')
# pool2
pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1], padding='SAME', name='pool2')
# local3
with tf.variable_scope(scopes_list[2]) as scope:
# Move everything into depth so we can perform a single matrix multiply.
reshape = tf.reshape(pool2, [-1, 4096])
dim = reshape.get_shape()[1].value
weights = variable_initializer('weights', [dim, 384],
tf.constant_initializer(params_mean_values[4]))
biases = variable_initializer('biases', [384], tf.constant_initializer(params_mean_values[5]))
local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
param_tensor_list.append(weights)
param_tensor_list.append(biases)
# local4
with tf.variable_scope(scopes_list[3]) as scope:
weights = variable_initializer('weights', [384, 192],
tf.constant_initializer(params_mean_values[6]))
biases = variable_initializer('biases', [192], tf.constant_initializer(params_mean_values[7]))
local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name=scope.name)
param_tensor_list.append(weights)
param_tensor_list.append(biases)
with tf.variable_scope(scopes_list[4]) as scope:
weights = variable_initializer('weights', [192, NUM_CLASSES],
tf.constant_initializer(params_mean_values[8]))
biases = variable_initializer('biases', [NUM_CLASSES],
tf.constant_initializer(params_mean_values[9]))
softmax_linear = tf.add(tf.matmul(local4, weights), biases, name=scope.name)
# _activation_summary(softmax_linear)
param_tensor_list.append(weights)
param_tensor_list.append(biases)
return softmax_linear, param_tensor_list
def weight_diff(w1, w2):
""" Calculates the array of differences between the weights in arrays """
# Expand and flatten arrays
_w1 = np.hstack([x.flatten() for x in w1])
_w2 = np.hstack([x.flatten() for x in w2])
return _w1 - _w2
def l2_norm(w1, w2):
return np.linalg.norm(weight_diff(w1, w2))
| 48.771536
| 132
| 0.62141
| 1,646
| 13,022
| 4.746051
| 0.120899
| 0.018433
| 0.067204
| 0.069124
| 0.836662
| 0.825653
| 0.807604
| 0.800179
| 0.779954
| 0.758449
| 0
| 0.047654
| 0.258716
| 13,022
| 266
| 133
| 48.954887
| 0.761629
| 0.099677
| 0
| 0.646409
| 0
| 0
| 0.032815
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044199
| false
| 0
| 0.033149
| 0.01105
| 0.121547
| 0.005525
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a39e865e36d5db053da4c847bb23be8798c0eec9
| 228
|
py
|
Python
|
python/tests/test_import_protobuf.py
|
foxglove/ws-server-example-python
|
9ca831a96206ff38e0f46d0510527019a1358993
|
[
"MIT"
] | null | null | null |
python/tests/test_import_protobuf.py
|
foxglove/ws-server-example-python
|
9ca831a96206ff38e0f46d0510527019a1358993
|
[
"MIT"
] | null | null | null |
python/tests/test_import_protobuf.py
|
foxglove/ws-server-example-python
|
9ca831a96206ff38e0f46d0510527019a1358993
|
[
"MIT"
] | null | null | null |
def test_import_protobuf():
"""
Ensure the generated protobuf file is successfully importable in a dev environment.
"""
from foxglove_websocket.examples.proto.ExampleMsg_pb2 import ExampleMsg
_ = ExampleMsg
| 28.5
| 87
| 0.745614
| 26
| 228
| 6.346154
| 0.846154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005435
| 0.192982
| 228
| 7
| 88
| 32.571429
| 0.891304
| 0.364035
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.666667
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6e7f17689b905d9eb78446804ddbcd616ba96ec0
| 34
|
py
|
Python
|
navec/__init__.py
|
FreedomSlow/navec
|
b9add7f6661d5da44a2e1ed42364e0c3bc4b00f1
|
[
"MIT"
] | 115
|
2019-06-13T09:06:41.000Z
|
2022-03-22T12:15:11.000Z
|
navec/__init__.py
|
FreedomSlow/navec
|
b9add7f6661d5da44a2e1ed42364e0c3bc4b00f1
|
[
"MIT"
] | 4
|
2020-02-13T06:40:00.000Z
|
2021-11-24T13:58:11.000Z
|
navec/__init__.py
|
FreedomSlow/navec
|
b9add7f6661d5da44a2e1ed42364e0c3bc4b00f1
|
[
"MIT"
] | 13
|
2019-06-13T06:31:25.000Z
|
2022-03-20T19:20:58.000Z
|
from .navec import Navec # noqa
| 11.333333
| 32
| 0.705882
| 5
| 34
| 4.8
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.235294
| 34
| 2
| 33
| 17
| 0.923077
| 0.117647
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6ebd69e37b6a26e451555096c95c124ba4ed98bc
| 150
|
py
|
Python
|
Python Games/Breakout/States/Baseclass.py
|
lazydinoz/HackFest21
|
84bfbfbb2c75a6511226a87d2e947984db878ba1
|
[
"MIT"
] | 1
|
2021-11-12T10:51:19.000Z
|
2021-11-12T10:51:19.000Z
|
Python Games/Breakout/States/Baseclass.py
|
lazydinoz/HackFest21
|
84bfbfbb2c75a6511226a87d2e947984db878ba1
|
[
"MIT"
] | null | null | null |
Python Games/Breakout/States/Baseclass.py
|
lazydinoz/HackFest21
|
84bfbfbb2c75a6511226a87d2e947984db878ba1
|
[
"MIT"
] | null | null | null |
import pygame
class Base:
def __init__(self): pass
def render(self) : pass
def update(self, params) : pass
def enter(self) : pass
| 13.636364
| 35
| 0.64
| 21
| 150
| 4.380952
| 0.571429
| 0.26087
| 0.23913
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.266667
| 150
| 11
| 36
| 13.636364
| 0.836364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.666667
| false
| 0.666667
| 0.166667
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
42bd9bd4494f3544721fc186b5d3de319b3092a0
| 2,084
|
py
|
Python
|
tests/unit/test_global_config.py
|
iwanbolzern/ConfMe
|
2b91dac318e499b7e25a40fb2abaf0f15a604301
|
[
"MIT"
] | 21
|
2020-03-04T07:40:12.000Z
|
2022-03-25T15:35:29.000Z
|
tests/unit/test_global_config.py
|
iwanbolzern/ConfMe
|
2b91dac318e499b7e25a40fb2abaf0f15a604301
|
[
"MIT"
] | 9
|
2020-03-05T12:38:40.000Z
|
2021-12-22T14:37:33.000Z
|
tests/unit/test_global_config.py
|
iwanbolzern/ConfMe
|
2b91dac318e499b7e25a40fb2abaf0f15a604301
|
[
"MIT"
] | null | null | null |
import os
import uuid
from pathlib import Path
import pytest
from tests.unit.config_model import GlobalRootConfig, RootConfig
@pytest.fixture
def test_config_yaml(tmp_path: str):
config_content = 'rootValue: 1\n' \
'rangeValue: 5\n' \
'childNode:\n' \
' testStr: "test-env"\n' \
' testInt: 42\n' \
' testFloat: 42.42\n' \
' anyEnum: value2'
config_path = Path(tmp_path) / f'{uuid.uuid4()}_test.yaml'
with open(config_path, 'w') as config_file:
config_file.write(config_content)
return str(config_path)
@pytest.fixture
def prod_config_yaml(tmp_path: str):
config_content = 'rootValue: 1\n' \
'rangeValue: 5\n' \
'childNode:\n' \
' testStr: "prod-env"\n' \
' testInt: 42\n' \
' testFloat: 42.42\n' \
' anyEnum: value2'
config_path = Path(tmp_path) / f'{uuid.uuid4()}_prod.yaml'
with open(config_path, 'w') as config_file:
config_file.write(config_content)
return str(config_path)
def test_load_global_config(prod_config_yaml: str, test_config_yaml: str):
os.environ['highSecure'] = 'superSecureSecret'
GlobalRootConfig.register_folder(Path(prod_config_yaml).parent)
os.environ['ENV'] = 'test'
root_config = GlobalRootConfig.get()
assert root_config.childNode.testStr == 'test-env'
os.environ['ENV'] = 'prod'
root_config = GlobalRootConfig.get()
assert root_config.childNode.testStr == 'prod-env'
def test_load_config_by_env(prod_config_yaml: str, test_config_yaml: str):
os.environ['highSecure'] = 'superSecureSecret'
RootConfig.register_folder(Path(prod_config_yaml).parent)
os.environ['ENV'] = 'test'
root_config = RootConfig.get()
assert root_config.childNode.testStr == 'test-env'
os.environ['ENV'] = 'prod'
root_config = RootConfig.get()
assert root_config.childNode.testStr == 'prod-env'
| 29.771429
| 74
| 0.615163
| 247
| 2,084
| 4.97166
| 0.222672
| 0.065147
| 0.057003
| 0.061889
| 0.809446
| 0.809446
| 0.809446
| 0.809446
| 0.798046
| 0.698697
| 0
| 0.013029
| 0.263436
| 2,084
| 69
| 75
| 30.202899
| 0.786971
| 0
| 0
| 0.693878
| 0
| 0
| 0.190019
| 0.023033
| 0
| 0
| 0
| 0
| 0.081633
| 1
| 0.081633
| false
| 0
| 0.102041
| 0
| 0.22449
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
42c0f1bf08d85f82660fbe016388bfb70b691d73
| 123
|
py
|
Python
|
mmt/models/clip/__init__.py
|
jianzhnie/MultimodalTransformer
|
6cd4ca8034a53da361149745aecead68fbe304a0
|
[
"Apache-2.0"
] | 1
|
2021-11-08T14:32:24.000Z
|
2021-11-08T14:32:24.000Z
|
mmt/models/clip/__init__.py
|
jianzhnie/MultimodalTransformer
|
6cd4ca8034a53da361149745aecead68fbe304a0
|
[
"Apache-2.0"
] | null | null | null |
mmt/models/clip/__init__.py
|
jianzhnie/MultimodalTransformer
|
6cd4ca8034a53da361149745aecead68fbe304a0
|
[
"Apache-2.0"
] | null | null | null |
'''
Author: jianzhnie
Date: 2021-12-03 12:05:26
LastEditTime: 2021-12-03 12:05:27
LastEditors: jianzhnie
Description:
'''
| 13.666667
| 33
| 0.731707
| 19
| 123
| 4.736842
| 0.631579
| 0.133333
| 0.177778
| 0.222222
| 0.266667
| 0
| 0
| 0
| 0
| 0
| 0
| 0.256881
| 0.113821
| 123
| 8
| 34
| 15.375
| 0.568807
| 0.918699
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6e1a578c79d553a442e86b8c12861bda68d2515e
| 148
|
py
|
Python
|
Desktop10.4.1/python/gdalconst.py
|
Esri/raster2gpkg
|
d10ebb3038786ecddf41072ba5b2c49baad97c5a
|
[
"Apache-2.0"
] | 13
|
2015-11-18T18:26:34.000Z
|
2021-05-09T13:59:46.000Z
|
Desktop10.4.1/python/gdalconst.py
|
Esri/raster2gpkg
|
d10ebb3038786ecddf41072ba5b2c49baad97c5a
|
[
"Apache-2.0"
] | 4
|
2015-12-26T03:16:25.000Z
|
2016-08-23T17:18:11.000Z
|
Desktop10.4.1/python/gdalconst.py
|
Esri/raster2gpkg
|
d10ebb3038786ecddf41072ba5b2c49baad97c5a
|
[
"Apache-2.0"
] | 5
|
2015-10-22T13:28:53.000Z
|
2020-12-12T13:07:52.000Z
|
# import osgeo.gdalconst as a convenience
from osgeo.gdal import deprecation_warn
deprecation_warn('gdalconst')
from osgeo.gdalconst import *
| 24.666667
| 42
| 0.797297
| 19
| 148
| 6.105263
| 0.526316
| 0.241379
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.141892
| 148
| 5
| 43
| 29.6
| 0.913386
| 0.263514
| 0
| 0
| 0
| 0
| 0.088235
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6e429e3963717941792f36ffbca568cbdbc21f51
| 311
|
py
|
Python
|
plugins/flytekit-dolt/flytekitplugins/dolt/__init__.py
|
bstadlbauer/flytekit
|
12ef34d7b6d777088ab87f9cf0d5c32355895852
|
[
"Apache-2.0"
] | null | null | null |
plugins/flytekit-dolt/flytekitplugins/dolt/__init__.py
|
bstadlbauer/flytekit
|
12ef34d7b6d777088ab87f9cf0d5c32355895852
|
[
"Apache-2.0"
] | null | null | null |
plugins/flytekit-dolt/flytekitplugins/dolt/__init__.py
|
bstadlbauer/flytekit
|
12ef34d7b6d777088ab87f9cf0d5c32355895852
|
[
"Apache-2.0"
] | null | null | null |
"""
.. currentmodule:: flytekitplugins.dolt
This package contains things that are useful when extending Flytekit.
.. autosummary::
:template: custom.rst
:toctree: generated/
DoltConfig
DoltTable
DoltTableNameTransformer
"""
from .schema import DoltConfig, DoltTable, DoltTableNameTransformer
| 19.4375
| 69
| 0.762058
| 28
| 311
| 8.464286
| 0.892857
| 0.160338
| 0.362869
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157556
| 311
| 15
| 70
| 20.733333
| 0.90458
| 0.749196
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6e4d963e949a413ae61d0903b72f491070b179e9
| 59
|
py
|
Python
|
service_monitor/__init__.py
|
soltanoff/systemd_watcher
|
11550f0760c2c654c4f57e11295ec03b9e8ee181
|
[
"MIT"
] | 5
|
2018-12-05T09:22:45.000Z
|
2020-03-17T15:36:21.000Z
|
service_monitor/__init__.py
|
soltanoff/systemd_watcher
|
11550f0760c2c654c4f57e11295ec03b9e8ee181
|
[
"MIT"
] | 10
|
2018-10-29T09:45:27.000Z
|
2021-09-22T17:43:45.000Z
|
service_monitor/__init__.py
|
soltanoff/systemd_watcher
|
11550f0760c2c654c4f57e11295ec03b9e8ee181
|
[
"MIT"
] | null | null | null |
from service_monitor.service_monitor import ServiceMonitor
| 29.5
| 58
| 0.915254
| 7
| 59
| 7.428571
| 0.714286
| 0.538462
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.067797
| 59
| 1
| 59
| 59
| 0.945455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2848fad24bb5e5ad398444d03a8c922c2aff1bb8
| 2,751
|
py
|
Python
|
analysis/prep/network/lib/nhd/barriers.py
|
astutespruce/sarp
|
7ce503380440c47b762ed1a8efd1d3e3aab6605e
|
[
"MIT"
] | 5
|
2020-07-10T16:13:26.000Z
|
2022-03-02T05:06:30.000Z
|
analysis/prep/network/lib/nhd/barriers.py
|
astutespruce/sarp
|
7ce503380440c47b762ed1a8efd1d3e3aab6605e
|
[
"MIT"
] | 23
|
2019-06-02T14:37:53.000Z
|
2019-10-23T17:59:40.000Z
|
analysis/prep/network/lib/nhd/barriers.py
|
astutespruce/sarp
|
7ce503380440c47b762ed1a8efd1d3e3aab6605e
|
[
"MIT"
] | 2
|
2020-05-27T23:28:36.000Z
|
2020-12-14T22:10:24.000Z
|
from pyogrio import read_dataframe
from analysis.lib.geometry import make_valid
BARRIER_COLS = ["NHDPlusID", "FType", "FCode", "GNIS_Name", "geometry"]
# Dam, reservoir, waterfall
POINT_FTYPES = [343, 436, 487]
# Dam, Gate, Lock Chamber, Waterfall
LINE_FTYPES = [343, 369, 398, 487]
# Dam, Lock, Spillway
POLY_FTYPES = [343, 398, 455]
def extract_barrier_points(gdb_path, target_crs):
"""Extract NHDPoint records that are barrier types.
Parameters
----------
gdb_path : str
path to the NHD HUC4 Geodatabase
target_crs: GeoPandas CRS object
target CRS to project NHD to for analysis, like length calculations.
Must be a planar projection.
Returns
-------
GeoDataFrame
"""
df = read_dataframe(
gdb_path,
layer="NHDPoint",
columns=BARRIER_COLS,
force_2d=True,
where=f"FType in {tuple(POINT_FTYPES)}",
)
df.NHDPlusID = df.NHDPlusID.astype("uint64")
df["id"] = df.index.values.astype("uint32") + 1
if len(df):
df = df.to_crs(target_crs)
df.geometry = make_valid(df.geometry.values.data)
return df
def extract_barrier_lines(gdb_path, target_crs):
"""Extract NHDLine records that are barrier types.
Parameters
----------
gdb_path : str
path to the NHD HUC4 Geodatabase
target_crs: GeoPandas CRS object
target CRS to project NHD to for analysis, like length calculations.
Must be a planar projection.
Returns
-------
GeoDataFrame
"""
df = read_dataframe(
gdb_path,
layer="NHDLine",
columns=BARRIER_COLS,
force_2d=True,
where=f"FType in {tuple(LINE_FTYPES)}",
)
df.NHDPlusID = df.NHDPlusID.astype("uint64")
df["id"] = df.index.values.astype("uint32") + 1
if len(df):
df = df.to_crs(target_crs)
df.geometry = make_valid(df.geometry.values.data)
return df
def extract_barrier_polygons(gdb_path, target_crs):
"""Extract NHDArea records that are barrier types.
Parameters
----------
gdb_path : str
path to the NHD HUC4 Geodatabase
target_crs: GeoPandas CRS object
target CRS to project NHD to for analysis, like length calculations.
Must be a planar projection.
Returns
-------
GeoDataFrame
"""
df = read_dataframe(
gdb_path,
layer="NHDArea",
columns=BARRIER_COLS,
force_2d=True,
where=f"FType in {tuple(POLY_FTYPES)}",
)
df.NHDPlusID = df.NHDPlusID.astype("uint64")
df["id"] = df.index.values.astype("uint32") + 1
if len(df):
df = df.to_crs(target_crs)
df.geometry = make_valid(df.geometry.values.data)
return df
| 23.313559
| 76
| 0.627045
| 350
| 2,751
| 4.788571
| 0.262857
| 0.064439
| 0.03043
| 0.02864
| 0.802506
| 0.761337
| 0.761337
| 0.761337
| 0.761337
| 0.761337
| 0
| 0.025074
| 0.260633
| 2,751
| 117
| 77
| 23.512821
| 0.798918
| 0.348237
| 0
| 0.625
| 0
| 0
| 0.114634
| 0.012805
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.041667
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9534fbf95b610a117f262020ffa544ae78a87543
| 121
|
py
|
Python
|
enthought/envisage/ui/single_project/action/new_project_action.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 3
|
2016-12-09T06:05:18.000Z
|
2018-03-01T13:00:29.000Z
|
enthought/envisage/ui/single_project/action/new_project_action.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 1
|
2020-12-02T00:51:32.000Z
|
2020-12-02T08:48:55.000Z
|
enthought/envisage/ui/single_project/action/new_project_action.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | null | null | null |
# proxy module
from __future__ import absolute_import
from envisage.ui.single_project.action.new_project_action import *
| 30.25
| 66
| 0.859504
| 17
| 121
| 5.647059
| 0.705882
| 0.270833
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 121
| 3
| 67
| 40.333333
| 0.872727
| 0.099174
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9579a8b128b6c295843eb776ef772560204798a1
| 206
|
py
|
Python
|
test_data/scraper_follower3.py
|
digawp/MyScraper
|
1f0bcb47a1b81002bf70f0869949e16ab10c90e6
|
[
"MIT"
] | null | null | null |
test_data/scraper_follower3.py
|
digawp/MyScraper
|
1f0bcb47a1b81002bf70f0869949e16ab10c90e6
|
[
"MIT"
] | null | null | null |
test_data/scraper_follower3.py
|
digawp/MyScraper
|
1f0bcb47a1b81002bf70f0869949e16ab10c90e6
|
[
"MIT"
] | null | null | null |
import scrapy
def generate_next_urls(response):
'''
Sample crawler. Replace with your own implementation.
https://doc.scrapy.org/en/1.3/intro/tutorial.html#following-links
'''
return []
| 25.75
| 69
| 0.699029
| 27
| 206
| 5.259259
| 0.962963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011765
| 0.174757
| 206
| 8
| 70
| 25.75
| 0.823529
| 0.57767
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2500a8001e30053ec2e117af0ad5f6533705f9d5
| 33
|
py
|
Python
|
tweerator/__init__.py
|
Parassharmaa/tweerator
|
9ed281e05734ef3cb3532f56d18ff9450f5dde46
|
[
"MIT"
] | null | null | null |
tweerator/__init__.py
|
Parassharmaa/tweerator
|
9ed281e05734ef3cb3532f56d18ff9450f5dde46
|
[
"MIT"
] | null | null | null |
tweerator/__init__.py
|
Parassharmaa/tweerator
|
9ed281e05734ef3cb3532f56d18ff9450f5dde46
|
[
"MIT"
] | null | null | null |
from .tweerator import Tweerator
| 16.5
| 32
| 0.848485
| 4
| 33
| 7
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 33
| 1
| 33
| 33
| 0.965517
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
252aaf2ab821edfdc8d04f4d44b1948a21ae2822
| 10,048
|
py
|
Python
|
tests/test_recipient_algs_ecdh_aes_key_wrap.py
|
dajiaji/python-cwt
|
61723510663dc4cd5a5171ff3a78994cac5f5213
|
[
"MIT"
] | 11
|
2021-04-29T13:48:15.000Z
|
2022-01-31T22:27:14.000Z
|
tests/test_recipient_algs_ecdh_aes_key_wrap.py
|
dajiaji/python-cwt
|
61723510663dc4cd5a5171ff3a78994cac5f5213
|
[
"MIT"
] | 185
|
2021-04-23T22:14:50.000Z
|
2022-03-28T06:27:35.000Z
|
tests/test_recipient_algs_ecdh_aes_key_wrap.py
|
dajiaji/python-cwt
|
61723510663dc4cd5a5171ff3a78994cac5f5213
|
[
"MIT"
] | 5
|
2021-08-09T02:21:18.000Z
|
2022-01-05T11:39:08.000Z
|
"""
Tests for Direct.
"""
import pytest
from cwt.cose import COSE
from cwt.cose_key import COSEKey
from cwt.exceptions import DecodeError, EncodeError
from cwt.recipient import Recipient
from cwt.recipient_algs.ecdh_aes_key_wrap import ECDH_AESKeyWrap
@pytest.fixture(scope="session", autouse=True)
def sender_key_es():
return COSEKey.from_jwk(
{
"kty": "EC",
"alg": "ECDH-ES+A128KW",
"crv": "P-256",
}
)
@pytest.fixture(scope="session", autouse=True)
def recipient_public_key():
return COSEKey.from_jwk(
{
"kty": "EC",
"kid": "01",
"crv": "P-256",
"x": "Ze2loSV3wrroKUN_4zhwGhCqo3Xhu1td4QjeQ5wIVR0",
"y": "HlLtdXARY_f55A3fnzQbPcm6hgr34Mp8p-nuzQCE0Zw",
}
)
@pytest.fixture(scope="session", autouse=True)
def recipient_private_key():
return COSEKey.from_jwk(
{
"kty": "EC",
"alg": "ECDH-ES+A128KW",
"kid": "01",
"crv": "P-256",
"x": "Ze2loSV3wrroKUN_4zhwGhCqo3Xhu1td4QjeQ5wIVR0",
"y": "HlLtdXARY_f55A3fnzQbPcm6hgr34Mp8p-nuzQCE0Zw",
"d": "r_kHyZ-a06rmxM3yESK84r1otSg-aQcVStkRhA-iCM8",
}
)
class TestECDH_AESKeyWrap:
"""
Tests for ECDH_AESKeyWrap.
"""
def test_ecdh_aes_key_wrap_constructor_with_ecdh_es_a128kw(self):
ctx = ECDH_AESKeyWrap({1: -29}, {4: b"01"})
assert isinstance(ctx, ECDH_AESKeyWrap)
assert ctx.alg == -29
assert ctx.kid == b"01"
def test_ecdh_aes_key_wrap_constructor_with_ecdh_es_a192kw(self):
ctx = ECDH_AESKeyWrap({1: -30}, {4: b"01"})
assert ctx.alg == -30
assert ctx.kid == b"01"
def test_ecdh_aes_key_wrap_constructor_with_ecdh_es_a256kw(self):
ctx = ECDH_AESKeyWrap({1: -31}, {4: b"01"})
assert ctx.alg == -31
assert ctx.kid == b"01"
def test_ecdh_aes_key_wrap_constructor_with_ecdh_ss_a128kw(self):
ctx = ECDH_AESKeyWrap({1: -32}, {4: b"01"})
assert ctx.alg == -32
assert ctx.kid == b"01"
def test_ecdh_aes_key_wrap_constructor_with_ecdh_ss_a192kw(self):
ctx = ECDH_AESKeyWrap({1: -33}, {4: b"01"})
assert ctx.alg == -33
assert ctx.kid == b"01"
def test_ecdh_aes_key_wrap_constructor_with_ecdh_ss_a256kw(self):
ctx = ECDH_AESKeyWrap({1: -34}, {4: b"01"})
assert ctx.alg == -34
assert ctx.kid == b"01"
def test_ecdh_aes_key_wrap_constructor_with_invalid_alg(self):
with pytest.raises(ValueError) as err:
ECDH_AESKeyWrap({1: -1}, {4: b"01"})
pytest.fail("ECDH_AESKeyWrap() should fail.")
assert "Unknown alg(1) for ECDH with key wrap: -1." in str(err.value)
def test_ecdh_aes_key_wrap_encode_and_extract_with_ecdh_es(
self, sender_key_es, recipient_public_key, recipient_private_key
):
enc_key = COSEKey.from_symmetric_key(alg="ChaCha20/Poly1305")
sender = ECDH_AESKeyWrap({1: -29}, {4: b"01"}, sender_key=sender_key_es)
sender.apply(enc_key, recipient_key=recipient_public_key, context={"alg": "A128GCM"})
assert sender.ciphertext is not None
encoded = sender.to_list()
recipient = Recipient.from_list(encoded)
decoded_key = recipient.extract(recipient_private_key, alg="ChaCha20/Poly1305", context={"alg": "A128GCM"})
assert enc_key.key == decoded_key.key
def test_ecdh_aes_key_wrap_through_cose_api(self, recipient_public_key, recipient_private_key):
enc_key = COSEKey.from_symmetric_key(alg="ChaCha20/Poly1305")
rec = Recipient.from_jwk({"kty": "EC", "crv": "P-256", "alg": "ECDH-ES+A128KW"})
rec.apply(enc_key, recipient_key=recipient_public_key, context={"alg": "A128GCM"})
ctx = COSE.new(alg_auto_inclusion=True)
encoded = ctx.encode_and_encrypt(b"Hello world!", enc_key, recipients=[rec])
assert b"Hello world!" == ctx.decode(encoded, recipient_private_key, context={"alg": "A128GCM"})
def test_ecdh_aes_key_wrap_through_cose_api_without_kid(self):
enc_key = COSEKey.from_symmetric_key(alg="ChaCha20/Poly1305")
rec = Recipient.from_jwk({"kty": "EC", "crv": "P-256", "alg": "ECDH-ES+A128KW"})
pub_key = COSEKey.from_jwk(
{
"kty": "EC",
# "kid": "01",
"crv": "P-256",
"x": "Ze2loSV3wrroKUN_4zhwGhCqo3Xhu1td4QjeQ5wIVR0",
"y": "HlLtdXARY_f55A3fnzQbPcm6hgr34Mp8p-nuzQCE0Zw",
}
)
rec.apply(enc_key, recipient_key=pub_key, context={"alg": "A128GCM"})
ctx = COSE.new(alg_auto_inclusion=True)
priv_key = COSEKey.from_jwk(
{
"kty": "EC",
# "kid": "01",
"alg": "ECDH-ES+A128KW",
"crv": "P-256",
"x": "Ze2loSV3wrroKUN_4zhwGhCqo3Xhu1td4QjeQ5wIVR0",
"y": "HlLtdXARY_f55A3fnzQbPcm6hgr34Mp8p-nuzQCE0Zw",
"d": "r_kHyZ-a06rmxM3yESK84r1otSg-aQcVStkRhA-iCM8",
}
)
encoded = ctx.encode_and_encrypt(b"Hello world!", enc_key, recipients=[rec])
assert b"Hello world!" == ctx.decode(encoded, priv_key, context={"alg": "A128GCM"})
def test_ecdh_aes_key_wrap_apply_without_key(self, sender_key_es):
sender = ECDH_AESKeyWrap({1: -29}, {4: b"01"}, sender_key=sender_key_es)
with pytest.raises(ValueError) as err:
sender.apply(recipient_key=recipient_public_key, context={"alg": "A128GCM"})
pytest.fail("apply() should fail.")
assert "key should be set." in str(err.value)
def test_ecdh_aes_key_wrap_apply_without_sender_key(self, recipient_public_key):
enc_key = COSEKey.from_symmetric_key(alg="ChaCha20/Poly1305")
sender = ECDH_AESKeyWrap({1: -29}, {4: b"01"})
with pytest.raises(ValueError) as err:
sender.apply(enc_key, recipient_key=recipient_public_key, context={"alg": "A128GCM"})
pytest.fail("apply() should fail.")
assert "sender_key should be set in advance." in str(err.value)
def test_ecdh_aes_key_wrap_apply_without_recipient_key(self, sender_key_es):
enc_key = COSEKey.from_symmetric_key(alg="ChaCha20/Poly1305")
sender = ECDH_AESKeyWrap({1: -29}, {4: b"01"}, sender_key=sender_key_es)
with pytest.raises(ValueError) as err:
sender.apply(enc_key, context={"alg": "A128GCM"})
pytest.fail("apply() should fail.")
assert "recipient_key should be set in advance." in str(err.value)
def test_ecdh_aes_key_wrap_apply_without_context(self, sender_key_es):
enc_key = COSEKey.from_symmetric_key(alg="ChaCha20/Poly1305")
sender = ECDH_AESKeyWrap({1: -29}, {4: b"01"}, sender_key=sender_key_es)
with pytest.raises(ValueError) as err:
sender.apply(enc_key, recipient_key=recipient_public_key)
pytest.fail("apply() should fail.")
assert "context should be set." in str(err.value)
def test_ecdh_aes_key_wrap_apply_with_invalid_recipient_key(self, sender_key_es, recipient_private_key):
enc_key = COSEKey.from_symmetric_key(alg="ChaCha20/Poly1305")
rec = Recipient.new(protected={"alg": "ECDH-ES+A128KW"}, sender_key=sender_key_es)
with pytest.raises(ValueError) as err:
rec.apply(enc_key, recipient_key=recipient_private_key, context={"alg": "A128GCM"})
pytest.fail("apply() should fail.")
assert "public_key should be elliptic curve public key." in str(err.value)
def test_ecdh_aes_key_wrap_apply_with_invalid_key_to_wrap(self, sender_key_es, recipient_public_key):
mac_key = COSEKey.from_symmetric_key(key="xxx", alg="HS256")
rec = Recipient.new(protected={"alg": "ECDH-ES+A128KW"}, sender_key=sender_key_es)
with pytest.raises(EncodeError) as err:
rec.apply(mac_key, recipient_key=recipient_public_key, context={"alg": "A128GCM"})
pytest.fail("apply() should fail.")
assert "Failed to wrap key." in str(err.value)
def test_ecdh_aes_key_wrap_extract_without_alg(self):
enc_key = COSEKey.from_symmetric_key(alg="ChaCha20/Poly1305")
ctx = ECDH_AESKeyWrap({1: -29}, {4: b"01"})
with pytest.raises(ValueError) as err:
ctx.extract(enc_key)
pytest.fail("extract() should fail.")
assert "alg should be set." in str(err.value)
def test_ecdh_aes_key_wrap_extract_without_context(self):
enc_key = COSEKey.from_symmetric_key(alg="ChaCha20/Poly1305")
ctx = ECDH_AESKeyWrap({1: -29}, {4: b"01"})
with pytest.raises(ValueError) as err:
ctx.extract(enc_key, alg="ChaCha20/Poly1305")
pytest.fail("extract() should fail.")
assert "context should be set." in str(err.value)
def test_ecdh_aes_key_wrap_extract_with_invalid_recipient_private_key(self, recipient_public_key):
enc_key = COSEKey.from_symmetric_key(alg="ChaCha20/Poly1305")
rec = Recipient.from_jwk({"kty": "EC", "crv": "P-256", "alg": "ECDH-ES+A128KW"})
rec.apply(enc_key, recipient_key=recipient_public_key, context={"alg": "A128GCM"})
ctx = COSE.new(alg_auto_inclusion=True)
recipient_private_key = COSEKey.from_jwk(
{
"kty": "EC",
"kid": "01",
# "alg": "ECDH-ES+A128KW",
"crv": "P-256",
"x": "Ze2loSV3wrroKUN_4zhwGhCqo3Xhu1td4QjeQ5wIVR0",
"y": "HlLtdXARY_f55A3fnzQbPcm6hgr34Mp8p-nuzQCE0Zw",
"d": "r_kHyZ-a06rmxM3yESK84r1otSg-aQcVStkRhA-iCM8",
}
)
encoded = ctx.encode_and_encrypt(b"Hello world!", enc_key, recipients=[rec])
with pytest.raises(DecodeError) as err:
ctx.decode(encoded, recipient_private_key, context={"alg": "A128GCM"})
pytest.fail("extract() should fail.")
assert "Failed to decode key." in str(err.value)
| 44.264317
| 115
| 0.63744
| 1,289
| 10,048
| 4.687355
| 0.098526
| 0.023833
| 0.033102
| 0.046342
| 0.847071
| 0.833995
| 0.765475
| 0.743628
| 0.716981
| 0.697617
| 0
| 0.052707
| 0.23338
| 10,048
| 226
| 116
| 44.460177
| 0.731663
| 0.009554
| 0
| 0.5
| 0
| 0
| 0.181626
| 0.056311
| 0
| 0
| 0
| 0
| 0.143617
| 1
| 0.117021
| false
| 0
| 0.031915
| 0.015957
| 0.170213
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
254ea764a61f90a0a6be45290318eae97e984971
| 152
|
py
|
Python
|
tests/__init__.py
|
dgpv/bip32_template_python_implementation
|
299e87d7827a6e7b0b650fb394f269aaa3e061f7
|
[
"MIT"
] | 5
|
2020-10-26T16:49:54.000Z
|
2021-11-06T10:46:06.000Z
|
tests/__init__.py
|
dgpv/bip32_template_python_implementation
|
299e87d7827a6e7b0b650fb394f269aaa3e061f7
|
[
"MIT"
] | 1
|
2020-10-25T09:40:46.000Z
|
2020-10-25T10:00:28.000Z
|
tests/__init__.py
|
dgpv/bip32_template_python_implementation
|
299e87d7827a6e7b0b650fb394f269aaa3e061f7
|
[
"MIT"
] | 1
|
2022-01-06T07:30:18.000Z
|
2022-01-06T07:30:18.000Z
|
try:
import micropython # type: ignore
# only needed for micropython's unittest
from .test_templates import *
except ImportError:
pass
| 21.714286
| 44
| 0.710526
| 18
| 152
| 5.944444
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.236842
| 152
| 6
| 45
| 25.333333
| 0.922414
| 0.335526
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.2
| 0.6
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
256bdd15f9465a4084f6529ce7ef2bc8b3dff1fb
| 22
|
py
|
Python
|
networks/__init__.py
|
naivete5656/Mitosis_Detection_MLM
|
dfdadd7dfafab7e931f13a84c27e221498c9f959
|
[
"MIT"
] | 2
|
2020-07-14T02:47:32.000Z
|
2020-07-15T09:38:01.000Z
|
networks/__init__.py
|
naivete5656/Mitosis_Detection_MLM
|
dfdadd7dfafab7e931f13a84c27e221498c9f959
|
[
"MIT"
] | 2
|
2021-12-17T13:04:09.000Z
|
2022-01-03T01:20:25.000Z
|
networks/__init__.py
|
naivete5656/Mitosis_Detection_MLM
|
dfdadd7dfafab7e931f13a84c27e221498c9f959
|
[
"MIT"
] | 2
|
2021-05-16T03:47:08.000Z
|
2021-12-28T16:56:23.000Z
|
from .vnet import VNet
| 22
| 22
| 0.818182
| 4
| 22
| 4.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136364
| 22
| 1
| 22
| 22
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c27830aae37019bfa65e406eaf4e44663387f8e0
| 25
|
py
|
Python
|
tabledict/core/exceptions/__init__.py
|
DolphDev/LDictionary
|
a8c44d40f70c7d7243ea3440743dfb9c68d319b5
|
[
"MIT"
] | null | null | null |
tabledict/core/exceptions/__init__.py
|
DolphDev/LDictionary
|
a8c44d40f70c7d7243ea3440743dfb9c68d319b5
|
[
"MIT"
] | 5
|
2018-01-08T14:32:02.000Z
|
2020-08-11T13:12:20.000Z
|
psv/core/exceptions/__init__.py
|
DolphDev/PSV
|
1cb22e20b15e10b01f104b879debb4864b93bfe9
|
[
"MIT"
] | null | null | null |
from . import messages
| 6.25
| 22
| 0.72
| 3
| 25
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.24
| 25
| 3
| 23
| 8.333333
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6c5636a859756dc88d73234155f3e5cb260e0c8c
| 47
|
py
|
Python
|
Applications/price_GEF_14/top_level.py
|
nagadakos/online-learning
|
3be9a59b56d4b7147b7efa4175448e74731cd005
|
[
"Apache-2.0"
] | null | null | null |
Applications/price_GEF_14/top_level.py
|
nagadakos/online-learning
|
3be9a59b56d4b7147b7efa4175448e74731cd005
|
[
"Apache-2.0"
] | 4
|
2018-10-25T20:53:07.000Z
|
2018-10-30T16:20:50.000Z
|
Applications/price_GEF_14/top_level.py
|
nagadakos/online-learning
|
3be9a59b56d4b7147b7efa4175448e74731cd005
|
[
"Apache-2.0"
] | 1
|
2018-10-26T13:48:31.000Z
|
2018-10-26T13:48:31.000Z
|
import sys
print("Hello from price_GEF_14!")
| 9.4
| 33
| 0.744681
| 8
| 47
| 4.125
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.05
| 0.148936
| 47
| 4
| 34
| 11.75
| 0.775
| 0
| 0
| 0
| 0
| 0
| 0.510638
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
6c8ac8654891cf438d4cf9012cf1a39879ff0346
| 19,320
|
py
|
Python
|
test/test_slimta_smtp_server.py
|
nanojob/python-slimta
|
70b9c633756a56afaf1fdd53c5ead6d0001036e7
|
[
"MIT"
] | 141
|
2015-01-24T23:59:18.000Z
|
2022-01-30T16:36:37.000Z
|
test/test_slimta_smtp_server.py
|
nanojob/python-slimta
|
70b9c633756a56afaf1fdd53c5ead6d0001036e7
|
[
"MIT"
] | 106
|
2015-01-13T22:49:07.000Z
|
2021-02-17T15:14:11.000Z
|
test/test_slimta_smtp_server.py
|
nanojob/python-slimta
|
70b9c633756a56afaf1fdd53c5ead6d0001036e7
|
[
"MIT"
] | 43
|
2015-07-29T14:55:09.000Z
|
2021-09-24T22:30:38.000Z
|
import unittest
from mox3.mox import MoxTestBase, IsA
from gevent.ssl import SSLSocket, SSLContext, SSLError
from pysasl import SASLAuth
from slimta.smtp.server import Server
from slimta.smtp.auth import AuthSession
from slimta.smtp import ConnectionLost
class TestSmtpServer(MoxTestBase, unittest.TestCase):
def setUp(self):
super(TestSmtpServer, self).setUp()
self.sock = self.mox.CreateMock(SSLSocket)
self.sock.fileno = lambda: -1
self.sock.getpeername = lambda: ('test', 0)
self.context = self.mox.CreateMock(SSLContext)
self.context.session_stats = lambda: {}
def test_starttls_extension(self):
s = Server(None, None)
self.assertFalse('STARTTLS' in s.extensions)
s = Server(None, None, context=self.context)
self.assertTrue('STARTTLS' in s.extensions)
s = Server(None, None, context=self.context, tls_immediately=True)
self.assertFalse('STARTTLS' in s.extensions)
def test_recv_command(self):
self.sock.recv(IsA(int)).AndReturn(b'cmd ARG\r\n')
self.mox.ReplayAll()
s = Server(self.sock, None)
cmd, arg = s._recv_command()
self.assertEqual(b'CMD', cmd)
self.assertEqual(b'ARG', arg)
def test_get_message_data(self):
expected_reply = b'250 2.6.0 Message accepted for delivery\r\n'
self.sock.recv(IsA(int)).AndReturn(b'one\r\n')
self.sock.recv(IsA(int)).AndReturn(b'.\r\n')
self.sock.sendall(expected_reply)
self.mox.ReplayAll()
s = Server(self.sock, None)
s._get_message_data()
self.assertFalse(s.have_mailfrom)
self.assertFalse(s.have_rcptto)
def test_call_custom_handler(self):
class TestHandler(object):
def TEST(self, arg):
return arg.lower()
s = Server(None, TestHandler())
self.assertEqual(b'stuff', s._call_custom_handler('TEST', b'STUFF'))
def test_banner_quit(self):
self.sock.sendall(b'220 ESMTP server\r\n')
self.sock.recv(IsA(int)).AndReturn(b'QUIT\r\n')
self.sock.sendall(b'221 2.0.0 Bye\r\n')
self.mox.ReplayAll()
s = Server(self.sock, None)
s.handle()
def test_unhandled_error(self):
class TestHandler(object):
def BANNER_(self, reply):
raise Exception('test')
self.sock.sendall(b'421 4.3.0 Unhandled system error\r\n')
self.mox.ReplayAll()
s = Server(self.sock, TestHandler())
with self.assertRaises(Exception) as cm:
s.handle()
self.assertEqual(('test', ), cm.exception.args)
def test_banner_command(self):
self.sock.sendall(b'220 ESMTP server\r\n')
self.sock.recv(IsA(int)).AndReturn(b'BANNER\r\n')
self.sock.sendall(b'500 5.5.2 Syntax error, command unrecognized\r\n')
self.sock.recv(IsA(int)).AndReturn(b'BANNER_\r\n')
self.sock.sendall(b'500 5.5.2 Syntax error, command unrecognized\r\n')
self.sock.recv(IsA(int)).AndReturn(b'QUIT\r\n')
self.sock.sendall(b'221 2.0.0 Bye\r\n')
self.mox.ReplayAll()
s = Server(self.sock, None)
s.handle()
def test_tls_immediately(self):
self.context.wrap_socket(self.sock, server_side=True).AndReturn(self.sock)
self.sock.sendall(b'220 ESMTP server\r\n')
self.sock.recv(IsA(int)).AndReturn(b'QUIT\r\n')
self.sock.sendall(b'221 2.0.0 Bye\r\n')
self.mox.ReplayAll()
s = Server(self.sock, None, context=self.context, tls_immediately=True)
s.handle()
def test_tls_immediately_sslerror(self):
self.context.wrap_socket(self.sock, server_side=True).AndRaise(SSLError())
self.sock.sendall(b'421 4.7.0 TLS negotiation failed\r\n')
self.mox.ReplayAll()
s = Server(self.sock, None, context=self.context, tls_immediately=True)
s.handle()
def test_ehlo(self):
self.sock.sendall(b'220 ESMTP server\r\n')
self.sock.recv(IsA(int)).AndReturn(b'EHLO there\r\n')
self.sock.sendall(b'250-Hello there\r\n250 TEST\r\n')
self.sock.recv(IsA(int)).AndReturn(b'QUIT\r\n')
self.sock.sendall(b'221 2.0.0 Bye\r\n')
self.mox.ReplayAll()
s = Server(self.sock, None)
s.extensions.reset()
s.extensions.add('TEST')
s.handle()
self.assertEqual('there', s.ehlo_as)
def test_ehlo_empty(self):
self.sock.sendall(b'220 ESMTP server\r\n')
self.sock.recv(IsA(int)).AndReturn(b'EHLO\r\n')
self.sock.sendall(b'501 5.5.4 Syntax error in parameters or arguments\r\n')
self.sock.recv(IsA(int)).AndReturn(b'QUIT\r\n')
self.sock.sendall(b'221 2.0.0 Bye\r\n')
self.mox.ReplayAll()
s = Server(self.sock, None)
s.handle()
self.assertEqual(None, s.ehlo_as)
def test_ehlo_empty_with_helo(self):
self.sock.sendall(b'220 ESMTP server\r\n')
self.sock.recv(IsA(int)).AndReturn(b'EHLO\r\n')
self.sock.sendall(b'501 5.5.4 Syntax error in parameters or arguments\r\n')
self.sock.recv(IsA(int)).AndReturn(b'HELO there\r\n')
self.sock.sendall(b'250 Hello there\r\n')
self.sock.recv(IsA(int)).AndReturn(b'QUIT\r\n')
self.sock.sendall(b'221 2.0.0 Bye\r\n')
self.mox.ReplayAll()
s = Server(self.sock, None)
s.handle()
self.assertEqual('there', s.ehlo_as)
def test_helo(self):
self.sock.sendall(b'220 ESMTP server\r\n')
self.sock.recv(IsA(int)).AndReturn(b'HELO there\r\n')
self.sock.sendall(b'250 Hello there\r\n')
self.sock.recv(IsA(int)).AndReturn(b'QUIT\r\n')
self.sock.sendall(b'221 2.0.0 Bye\r\n')
self.mox.ReplayAll()
s = Server(self.sock, None)
s.handle()
self.assertEqual('there', s.ehlo_as)
def test_helo_empty(self):
self.sock.sendall(b'220 ESMTP server\r\n')
self.sock.recv(IsA(int)).AndReturn(b'HELO\r\n')
self.sock.sendall(b'501 5.5.4 Syntax error in parameters or arguments\r\n')
self.sock.recv(IsA(int)).AndReturn(b'QUIT\r\n')
self.sock.sendall(b'221 2.0.0 Bye\r\n')
self.mox.ReplayAll()
s = Server(self.sock, None)
s.handle()
self.assertEqual(None, s.ehlo_as)
def test_helo_empty_with_ehlo(self):
self.sock.sendall(b'220 ESMTP server\r\n')
self.sock.recv(IsA(int)).AndReturn(b'HELO\r\n')
self.sock.sendall(b'501 5.5.4 Syntax error in parameters or arguments\r\n')
self.sock.recv(IsA(int)).AndReturn(b'EHLO there\r\n')
self.sock.sendall(b'250-Hello there\r\n250 TEST\r\n')
self.sock.recv(IsA(int)).AndReturn(b'QUIT\r\n')
self.sock.sendall(b'221 2.0.0 Bye\r\n')
self.mox.ReplayAll()
s = Server(self.sock, None)
s.extensions.reset()
s.extensions.add('TEST')
s.handle()
self.assertEqual('there', s.ehlo_as)
def test_starttls(self):
self.sock.sendall(b'220 ESMTP server\r\n')
self.sock.recv(IsA(int)).AndReturn(b'EHLO there\r\n')
self.sock.sendall(b'250-Hello there\r\n250 STARTTLS\r\n')
self.sock.recv(IsA(int)).AndReturn(b'STARTTLS\r\n')
self.sock.sendall(b'220 2.7.0 Go ahead\r\n')
self.context.wrap_socket(self.sock, server_side=True).AndReturn(self.sock)
self.sock.recv(IsA(int)).AndReturn(b'QUIT\r\n')
self.sock.sendall(b'221 2.0.0 Bye\r\n')
self.mox.ReplayAll()
s = Server(self.sock, None, context=self.context)
s.extensions.reset()
s.extensions.add('STARTTLS')
s.handle()
self.assertEqual(None, s.ehlo_as)
def test_starttls_bad(self):
self.sock.sendall(b'220 ESMTP server\r\n')
self.sock.recv(IsA(int)).AndReturn(b'STARTTLS\r\n')
self.sock.sendall(b'503 5.5.1 Bad sequence of commands\r\n')
self.sock.recv(IsA(int)).AndReturn(b'STARTTLS badarg\r\n')
self.sock.sendall(b'501 5.5.4 Syntax error in parameters or arguments\r\n')
self.sock.recv(IsA(int)).AndReturn(b'EHLO there\r\n')
self.sock.sendall(b'250-Hello there\r\n250 STARTTLS\r\n')
self.sock.recv(IsA(int)).AndReturn(b'STARTTLS\r\n')
self.sock.sendall(b'220 2.7.0 Go ahead\r\n')
self.context.wrap_socket(self.sock, server_side=True).AndRaise(SSLError())
self.sock.sendall(b'421 4.7.0 TLS negotiation failed\r\n')
self.mox.ReplayAll()
s = Server(self.sock, None, context=self.context)
s.extensions.reset()
s.extensions.add('STARTTLS')
s.handle()
self.assertEqual('there', s.ehlo_as)
def test_auth(self):
self.sock.sendall(b'220 ESMTP server\r\n')
self.sock.recv(IsA(int)).AndReturn(b'EHLO there\r\n')
self.sock.sendall(b'250-Hello there\r\n250 AUTH PLAIN\r\n')
self.sock.recv(IsA(int)).AndReturn(b'AUTH PLAIN dGVzdHppZAB0ZXN0dXNlcgB0ZXN0cGFzc3dvcmQ=\r\n')
self.sock.sendall(b'235 2.7.0 Authentication successful\r\n')
self.sock.recv(IsA(int)).AndReturn(b'QUIT\r\n')
self.sock.sendall(b'221 2.0.0 Bye\r\n')
self.mox.ReplayAll()
s = Server(self.sock, None)
s.extensions.reset()
s.extensions.add('AUTH', AuthSession(SASLAuth([b'PLAIN']), s.io))
s.handle()
self.assertTrue(s.authed)
def test_mailfrom(self):
self.sock.sendall(b'220 ESMTP server\r\n')
self.sock.recv(IsA(int)).AndReturn(b'HELO there\r\n')
self.sock.sendall(b'250 Hello there\r\n')
self.sock.recv(IsA(int)).AndReturn(b'MAIL FROM:<test">"addr>\r\n')
self.sock.sendall(b'250 2.1.0 Sender <test">"addr> Ok\r\n')
self.sock.recv(IsA(int)).AndReturn(b'QUIT\r\n')
self.sock.sendall(b'221 2.0.0 Bye\r\n')
self.mox.ReplayAll()
s = Server(self.sock, None)
s.handle()
self.assertTrue(s.have_mailfrom)
def test_mailfrom_bad(self):
self.sock.sendall(b'220 ESMTP server\r\n')
self.sock.recv(IsA(int)).AndReturn(b'MAIL FROM:<test>\r\n')
self.sock.sendall(b'503 5.5.1 Bad sequence of commands\r\n')
self.sock.recv(IsA(int)).AndReturn(b'HELO there\r\n')
self.sock.sendall(b'250 Hello there\r\n')
self.sock.recv(IsA(int)).AndReturn(b'MAIL FROM:<test1> SIZE=5\r\n')
self.sock.sendall(b'504 5.5.4 Command parameter not implemented\r\n')
self.sock.recv(IsA(int)).AndReturn(b'MAIL FRM:<addr>\r\n')
self.sock.sendall(b'501 5.5.4 Syntax error in parameters or arguments\r\n')
self.sock.recv(IsA(int)).AndReturn(b'MAIL FROM:<addr\r\n')
self.sock.sendall(b'501 5.5.4 Syntax error in parameters or arguments\r\n')
self.sock.recv(IsA(int)).AndReturn(b'MAIL FROM:<test1>\r\n')
self.sock.sendall(b'250 2.1.0 Sender <test1> Ok\r\n')
self.sock.recv(IsA(int)).AndReturn(b'MAIL FROM:<test2>\r\n')
self.sock.sendall(b'503 5.5.1 Bad sequence of commands\r\n')
self.sock.recv(IsA(int)).AndReturn(b'QUIT\r\n')
self.sock.sendall(b'221 2.0.0 Bye\r\n')
self.mox.ReplayAll()
s = Server(self.sock, None)
s.handle()
self.assertTrue(s.have_mailfrom)
def test_mailfrom_send_extension(self):
self.sock.sendall(b'220 ESMTP server\r\n')
self.sock.recv(IsA(int)).AndReturn(b'EHLO there\r\n')
self.sock.sendall(b'250-Hello there\r\n250 SIZE 10\r\n')
self.sock.recv(IsA(int)).AndReturn(b'MAIL FROM:<test1> SIZE=ASDF\r\n')
self.sock.sendall(b'501 5.5.4 Syntax error in parameters or arguments\r\n')
self.sock.recv(IsA(int)).AndReturn(b'MAIL FROM:<test1> SIZE=20\r\n')
self.sock.sendall(b'552 5.3.4 Message size exceeds 10 limit\r\n')
self.sock.recv(IsA(int)).AndReturn(b'MAIL FROM:<test1> SIZE=5\r\n')
self.sock.sendall(b'250 2.1.0 Sender <test1> Ok\r\n')
self.sock.recv(IsA(int)).AndReturn(b'QUIT\r\n')
self.sock.sendall(b'221 2.0.0 Bye\r\n')
self.mox.ReplayAll()
s = Server(self.sock, None)
s.extensions.reset()
s.extensions.add('SIZE', 10)
s.handle()
self.assertTrue(s.have_mailfrom)
def test_rcptto(self):
self.sock.sendall(b'220 ESMTP server\r\n')
self.sock.recv(IsA(int)).AndReturn(b'RCPT TO:<test">"addr>\r\n')
self.sock.sendall(b'250 2.1.5 Recipient <test">"addr> Ok\r\n')
self.sock.recv(IsA(int)).AndReturn(b'RCPT TO:<test2>\r\n')
self.sock.sendall(b'250 2.1.5 Recipient <test2> Ok\r\n')
self.sock.recv(IsA(int)).AndReturn(b'QUIT\r\n')
self.sock.sendall(b'221 2.0.0 Bye\r\n')
self.mox.ReplayAll()
s = Server(self.sock, None)
s.ehlo_as = b'test'
s.have_mailfrom = True
s.handle()
self.assertTrue(s.have_rcptto)
def test_rcptto_bad(self):
self.sock.sendall(b'220 ESMTP server\r\n')
self.sock.recv(IsA(int)).AndReturn(b'RCPT TO:<test>\r\n')
self.sock.sendall(b'503 5.5.1 Bad sequence of commands\r\n')
self.sock.recv(IsA(int)).AndReturn(b'HELO there\r\n')
self.sock.sendall(b'250 Hello there\r\n')
self.sock.recv(IsA(int)).AndReturn(b'RCPT TO:<test>\r\n')
self.sock.sendall(b'503 5.5.1 Bad sequence of commands\r\n')
self.sock.recv(IsA(int)).AndReturn(b'MAIL FROM:<test1>\r\n')
self.sock.sendall(b'250 2.1.0 Sender <test1> Ok\r\n')
self.sock.recv(IsA(int)).AndReturn(b'RCPT T:<test1>\r\n')
self.sock.sendall(b'501 5.5.4 Syntax error in parameters or arguments\r\n')
self.sock.recv(IsA(int)).AndReturn(b'RCPT TO:<test1\r\n')
self.sock.sendall(b'501 5.5.4 Syntax error in parameters or arguments\r\n')
self.sock.recv(IsA(int)).AndReturn(b'QUIT\r\n')
self.sock.sendall(b'221 2.0.0 Bye\r\n')
self.mox.ReplayAll()
s = Server(self.sock, None)
s.handle()
self.assertFalse(s.have_rcptto)
def test_data(self):
self.sock.sendall(b'220 ESMTP server\r\n')
self.sock.recv(IsA(int)).AndReturn(b'DATA\r\n')
self.sock.sendall(b'354 Start mail input; end with <CRLF>.<CRLF>\r\n')
self.sock.recv(IsA(int)).AndReturn(b'.\r\nQUIT\r\n')
self.sock.sendall(b'250 2.6.0 Message accepted for delivery\r\n')
self.sock.sendall(b'221 2.0.0 Bye\r\n')
self.mox.ReplayAll()
s = Server(self.sock, None)
s.ehlo_as = b'test'
s.have_mailfrom = True
s.have_rcptto = True
s.handle()
def test_data_bad(self):
self.sock.sendall(b'220 ESMTP server\r\n')
self.sock.recv(IsA(int)).AndReturn(b'DATA arg\r\n')
self.sock.sendall(b'501 5.5.4 Syntax error in parameters or arguments\r\n')
self.sock.recv(IsA(int)).AndReturn(b'DATA\r\n')
self.sock.sendall(b'503 5.5.1 Bad sequence of commands\r\n')
self.sock.recv(IsA(int)).AndReturn(b'QUIT\r\n')
self.sock.sendall(b'221 2.0.0 Bye\r\n')
self.mox.ReplayAll()
s = Server(self.sock, None)
s.ehlo_as = b'test'
s.have_mailfrom = True
s.handle()
def test_data_connectionlost(self):
self.sock.sendall(b'220 ESMTP server\r\n')
self.sock.recv(IsA(int)).AndReturn(b'DATA\r\n')
self.sock.sendall(b'354 Start mail input; end with <CRLF>.<CRLF>\r\n')
self.sock.recv(IsA(int)).AndReturn(b'')
self.mox.ReplayAll()
s = Server(self.sock, None)
s.ehlo_as = b'test'
s.have_mailfrom = True
s.have_rcptto = True
self.assertRaises(ConnectionLost, s.handle)
def test_noop(self):
self.sock.sendall(b'220 ESMTP server\r\n')
self.sock.recv(IsA(int)).AndReturn(b'NOOP\r\n')
self.sock.sendall(b'250 2.0.0 Ok\r\n')
self.sock.recv(IsA(int)).AndReturn(b'QUIT\r\n')
self.sock.sendall(b'221 2.0.0 Bye\r\n')
self.mox.ReplayAll()
s = Server(self.sock, None)
s.handle()
def test_rset(self):
class TestHandlers(object):
server = None
def NOOP(self2, reply):
self.assertEqual(b'test', self2.server.ehlo_as)
self.assertFalse(self2.server.have_mailfrom)
self.assertFalse(self2.server.have_rcptto)
self.sock.sendall(b'220 ESMTP server\r\n')
self.sock.recv(IsA(int)).AndReturn(b'RSET arg\r\n')
self.sock.sendall(b'501 5.5.4 Syntax error in parameters or arguments\r\n')
self.sock.recv(IsA(int)).AndReturn(b'RSET\r\n')
self.sock.sendall(b'250 2.0.0 Ok\r\n')
self.sock.recv(IsA(int)).AndReturn(b'NOOP\r\n')
self.sock.sendall(b'250 2.0.0 Ok\r\n')
self.sock.recv(IsA(int)).AndReturn(b'QUIT\r\n')
self.sock.sendall(b'221 2.0.0 Bye\r\n')
self.mox.ReplayAll()
h = TestHandlers()
s = h.server = Server(self.sock, h)
s.ehlo_as = b'test'
s.have_mailfrom = True
s.have_rcptto = True
s.handle()
def test_quit_bad(self):
self.sock.sendall(b'220 ESMTP server\r\n')
self.sock.recv(IsA(int)).AndReturn(b'QUIT arg\r\n')
self.sock.sendall(b'501 5.5.4 Syntax error in parameters or arguments\r\n')
self.sock.recv(IsA(int)).AndReturn(b'QUIT\r\n')
self.sock.sendall(b'221 2.0.0 Bye\r\n')
self.mox.ReplayAll()
s = Server(self.sock, None)
s.handle()
def test_custom_command(self):
class TestHandlers(object):
def TEST(self2, reply, arg, server):
self.assertTrue(server.have_mailfrom)
reply.code = '250'
reply.message = 'Doing '+arg.decode()
self.sock.sendall(b'220 ESMTP server\r\n')
self.sock.recv(IsA(int)).AndReturn(b'TEST stuff\r\n')
self.sock.sendall(b'250 2.0.0 Doing stuff\r\n')
self.sock.recv(IsA(int)).AndReturn(b'QUIT\r\n')
self.sock.sendall(b'221 2.0.0 Bye\r\n')
self.mox.ReplayAll()
s = Server(self.sock, TestHandlers())
s.have_mailfrom = True
s.handle()
def test_bad_commands(self):
self.sock.sendall(b'220 ESMTP server\r\n')
self.sock.recv(IsA(int)).AndReturn(b'\r\n')
self.sock.sendall(b'500 5.5.2 Syntax error, command unrecognized\r\n')
self.sock.recv(IsA(int)).AndReturn(b'BADCMD\r\n')
self.sock.sendall(b'500 5.5.2 Syntax error, command unrecognized\r\n')
self.sock.recv(IsA(int)).AndReturn(b'STARTTLS\r\n')
self.sock.sendall(b'500 5.5.2 Syntax error, command unrecognized\r\n')
self.sock.recv(IsA(int)).AndReturn(b'AUTH\r\n')
self.sock.sendall(b'500 5.5.2 Syntax error, command unrecognized\r\n')
self.sock.recv(IsA(int)).AndReturn(b'QUIT\r\n')
self.sock.sendall(b'221 2.0.0 Bye\r\n')
self.mox.ReplayAll()
s = Server(self.sock, None)
s.handle()
def test_gather_params(self):
s = Server(None, None)
self.assertEqual({b'ONE': b'1'}, s._gather_params(b' ONE=1'))
self.assertEqual({b'TWO': True}, s._gather_params(b'TWO'))
self.assertEqual({b'THREE': b'foo', b'FOUR': b'bar'},
s._gather_params(b' THREE=foo FOUR=bar'))
self.assertEqual({b'FIVE': True}, s._gather_params(b'five'))
# vim:et:fdm=marker:sts=4:sw=4:ts=4
| 43.611738
| 102
| 0.616511
| 3,130
| 19,320
| 3.764537
| 0.0623
| 0.152084
| 0.094204
| 0.132394
| 0.844097
| 0.827209
| 0.808538
| 0.796656
| 0.791988
| 0.780956
| 0
| 0.038847
| 0.220549
| 19,320
| 442
| 103
| 43.710407
| 0.743608
| 0.001708
| 0
| 0.684864
| 0
| 0
| 0.227327
| 0.003422
| 0
| 0
| 0
| 0
| 0.081886
| 1
| 0.091811
| false
| 0
| 0.01737
| 0.002481
| 0.124069
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
66685c1b4c5f59fd325683a2eb2a24c69a61e101
| 29
|
py
|
Python
|
thealot/__init__.py
|
nCrazed/TheAlot
|
dc1bf88019c4f0ef08924d6f7bb39ff1e799940e
|
[
"MIT"
] | null | null | null |
thealot/__init__.py
|
nCrazed/TheAlot
|
dc1bf88019c4f0ef08924d6f7bb39ff1e799940e
|
[
"MIT"
] | 1
|
2016-02-25T11:22:46.000Z
|
2016-02-25T11:22:46.000Z
|
thealot/__init__.py
|
nCrazed/TheAlot
|
dc1bf88019c4f0ef08924d6f7bb39ff1e799940e
|
[
"MIT"
] | null | null | null |
from .thealot import TheAlot
| 14.5
| 28
| 0.827586
| 4
| 29
| 6
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 29
| 1
| 29
| 29
| 0.96
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
667e7c4daf1ab809f643a72558b8514957d82aff
| 11,856
|
py
|
Python
|
tests/classes/wrappers/test_map_result.py
|
vahndi/ux
|
8acb3c07327e547ee948788536b6d6d1d7815bb2
|
[
"MIT"
] | null | null | null |
tests/classes/wrappers/test_map_result.py
|
vahndi/ux
|
8acb3c07327e547ee948788536b6d6d1d7815bb2
|
[
"MIT"
] | 43
|
2019-05-30T12:26:52.000Z
|
2020-08-02T21:57:24.000Z
|
tests/classes/wrappers/test_map_result.py
|
vahndi/ux
|
8acb3c07327e547ee948788536b6d6d1d7815bb2
|
[
"MIT"
] | null | null | null |
from itertools import product
from typing import List
from unittest import TestCase
from pandas import Series, Index, DataFrame
from ux.wrappers.map_result import MapResult
class TestMapResult(TestCase):
def setUp(self) -> None:
self.mr_single_single: MapResult = MapResult(
data={'a': 1, 'b': 2, 'c': 3},
key_names='letters',
value_names='numbers'
)
self.s_single_single: Series = Series(
index=Index(data=['a', 'b', 'c'], name='letters'),
data=[1, 2, 3], name='numbers'
)
self.mr_single_fixed: MapResult = MapResult(
data={'a': [1, 2, 3], 'b': [4, 5, 6]},
key_names='letters',
value_names='numbers'
)
self.s_single_fixed: Series = Series(
index=Index(data=['a', 'a', 'a', 'b', 'b', 'b'], name='letters'),
data=[1, 2, 3, 4, 5, 6], name='numbers'
)
self.d_data_fixed_wide: DataFrame = DataFrame(
data={'a': [1, 2, 3], 'b': [4, 5, 6]}
)
self.mr_single_variable: MapResult = MapResult(
data={'a': [1, 2], 'b': [3, 4, 5]},
key_names='letters',
value_names='numbers'
)
self.s_single_variable: Series = Series(
index=Index(data=['a', 'a', 'b', 'b', 'b'], name='letters'),
data=[1, 2, 3, 4, 5], name='numbers'
)
self.mr_tuple_single: MapResult = MapResult(
data={('a', 'b'): 1, ('c', 'd'): 2, ('e', 'f'): 3},
key_names=['letter_1', 'letter_2'],
value_names='numbers'
)
self.s_tuple_single: Series = Series(
index=Index(data=[('a', 'b'), ('c', 'd'), ('e', 'f')], names=['letter_1', 'letter_2']),
data=[1, 2, 3], name='numbers'
)
self.mr_tuple_fixed: MapResult = MapResult(
data={('a', 'b'): [1, 2, 3], ('c', 'd'): [4, 5, 6]},
key_names=['letter_1', 'letter_2'],
value_names='numbers'
)
self.s_tuple_fixed: Series = Series(
index=Index(data=[('a', 'b'), ('a', 'b'), ('a', 'b'), ('c', 'd'), ('c', 'd'), ('c', 'd')],
names=['letter_1', 'letter_2']),
data=[1, 2, 3, 4, 5, 6], name='numbers'
)
self.mr_tuple_variable: MapResult = MapResult(
data={('a', 'b'): [1, 2], ('c', 'd'): [3, 4, 5]},
key_names=['letter_1', 'letter_2'],
value_names='numbers'
)
self.s_tuple_variable: Series = Series(
index=Index(data=[('a', 'b'), ('a', 'b'), ('c', 'd'), ('c', 'd'), ('c', 'd')],
names=['letter_1', 'letter_2']),
data=[1, 2, 3, 4, 5], name='numbers'
)
self.mr_single_key: List[MapResult] = [
self.mr_single_single, self.mr_single_fixed, self.mr_single_variable
]
self.mr_tuple_key: List[MapResult] = [
self.mr_tuple_single, self.mr_tuple_fixed, self.mr_tuple_variable
]
@staticmethod
def series_equivalent(data_1: Series, data_2: Series) -> bool:
return (
data_1.index.tolist() == data_2.index.tolist() and
data_1.to_list() == data_2.to_list()
)
@staticmethod
def frames_equivalent(data_1: DataFrame, data_2: DataFrame) -> bool:
return (
sorted(data_1.columns) == sorted(data_2.columns) and
data_1.index.to_list() == data_2.index.to_list()
and all(data_1[column].to_list() == data_2[column].to_list() for column in data_1.columns)
)
def test_to_series(self):
self.assertTrue(self.series_equivalent(self.s_single_single, self.mr_single_single.to_series()))
self.assertTrue(self.series_equivalent(self.s_single_fixed, self.mr_single_fixed.to_series()))
self.assertTrue(self.series_equivalent(self.s_single_variable, self.mr_single_variable.to_series()))
self.assertTrue(self.series_equivalent(self.s_tuple_single, self.mr_tuple_single.to_series()))
self.assertTrue(self.series_equivalent(self.s_tuple_fixed, self.mr_tuple_fixed.to_series()))
self.assertTrue(self.series_equivalent(self.s_tuple_variable, self.mr_tuple_variable.to_series()))
def test_to_frame(self):
self.assertTrue(
self.frames_equivalent(self.s_single_single.reset_index(), self.mr_single_single.to_frame())
)
self.assertTrue(
self.frames_equivalent(self.s_single_fixed.reset_index(), self.mr_single_fixed.to_frame())
)
self.assertTrue(
self.frames_equivalent(self.s_single_variable.reset_index(), self.mr_single_variable.to_frame())
)
self.assertTrue(
self.frames_equivalent(self.s_tuple_single.reset_index(), self.mr_tuple_single.to_frame())
)
self.assertTrue(
self.frames_equivalent(self.s_tuple_fixed.reset_index(), self.mr_tuple_fixed.to_frame())
)
self.assertTrue(
self.frames_equivalent(self.s_tuple_variable.reset_index(), self.mr_tuple_variable.to_frame())
)
def test_to_frame_wide(self):
self.assertTrue(self.frames_equivalent(self.d_data_fixed_wide, self.mr_single_fixed.to_frame(wide=True)))
def test_add_works(self):
self.assertEqual(
self.mr_single_single + self.mr_single_single,
MapResult(
data={'a': 2, 'b': 4, 'c': 6},
key_names='letters', value_names='numbers'
)
)
self.assertEqual(
self.mr_single_fixed + self.mr_single_fixed,
MapResult(
data={'a': [1, 2, 3, 1, 2, 3], 'b': [4, 5, 6, 4, 5, 6]},
key_names='letters', value_names='numbers'
)
)
self.assertEqual(
self.mr_single_variable + self.mr_single_variable,
MapResult(
data={'a': [1, 2, 1, 2], 'b': [3, 4, 5, 3, 4, 5]},
key_names='letters', value_names='numbers'
)
)
self.assertEqual(
self.mr_tuple_single + self.mr_tuple_single,
MapResult(
data={('a', 'b'): 2, ('c', 'd'): 4, ('e', 'f'): 6},
key_names=['letter_1', 'letter_2'], value_names='numbers'
)
)
self.assertEqual(
self.mr_tuple_fixed + self.mr_tuple_fixed,
MapResult(
data={('a', 'b'): [1, 2, 3, 1, 2, 3], ('c', 'd'): [4, 5, 6, 4, 5, 6]},
key_names=['letter_1', 'letter_2'], value_names='numbers'
)
)
self.assertEqual(
self.mr_tuple_variable + self.mr_tuple_variable,
MapResult(
data={('a', 'b'): [1, 2, 1, 2], ('c', 'd'): [3, 4, 5, 3, 4, 5]},
key_names=['letter_1', 'letter_2'], value_names='numbers'
)
)
self.assertEqual(
self.mr_single_fixed + self.mr_single_variable,
MapResult(
data={'a': [1, 2, 3, 1, 2], 'b': [4, 5, 6, 3, 4, 5]},
key_names=['letters'], value_names='numbers'
)
)
self.assertEqual(
self.mr_tuple_fixed + self.mr_tuple_variable,
MapResult(
data={('a', 'b'): [1, 2, 3, 1, 2], ('c', 'd'): [4, 5, 6, 3, 4, 5]},
key_names=['letter_1', 'letter_2'], value_names='numbers'
)
)
def test_add_fails(self):
# mismatching key types
for mr_1, mr_2 in product(
self.mr_single_key, self.mr_tuple_key
):
self.assertRaises(KeyError, lambda: mr_1 + mr_2)
# unaddable value types
for mr_1, mr_2 in [
(self.mr_single_single, self.mr_single_fixed),
(self.mr_single_single, self.mr_single_variable),
(self.mr_tuple_single, self.mr_tuple_fixed),
(self.mr_tuple_single, self.mr_tuple_variable),
]:
self.assertRaises(TypeError, lambda: mr_1 + mr_2)
def test_sub_works(self):
self.assertEqual(
self.mr_single_single - self.mr_single_single,
MapResult(
data={'a': 0, 'b': 0, 'c': 0},
key_names='letters', value_names='numbers'
)
)
self.assertEqual(
self.mr_tuple_single - self.mr_tuple_single,
MapResult(
data={('a', 'b'): 0, ('c', 'd'): 0, ('e', 'f'): 0},
key_names=['letter_1', 'letter_2'], value_names='numbers'
)
)
def test_sub_fails(self):
# mismatching key types
for mr_1, mr_2 in product(
self.mr_single_key, self.mr_tuple_key
):
self.assertRaises(KeyError, lambda: mr_1 - mr_2)
# unsubtractable value types
for mr_1, mr_2 in [
(self.mr_single_single, self.mr_single_fixed),
(self.mr_single_single, self.mr_single_variable),
(self.mr_single_fixed, self.mr_single_variable),
(self.mr_tuple_single, self.mr_tuple_fixed),
(self.mr_tuple_single, self.mr_tuple_variable),
(self.mr_tuple_fixed, self.mr_tuple_variable)
]:
self.assertRaises(TypeError, lambda: mr_1 - mr_2)
def test_mul_works(self):
self.assertEqual(
self.mr_single_single * self.mr_single_single,
MapResult(
data={'a': 1, 'b': 4, 'c': 9},
key_names='letters', value_names='numbers'
)
)
self.assertEqual(
self.mr_tuple_single * self.mr_tuple_single,
MapResult(
data={('a', 'b'): 1, ('c', 'd'): 4, ('e', 'f'): 9},
key_names=['letter_1', 'letter_2'], value_names='numbers'
)
)
def test_mul_fails(self):
# mismatching key types
for mr_1, mr_2 in product(
self.mr_single_key, self.mr_tuple_key
):
self.assertRaises(KeyError, lambda: mr_1 * mr_2)
# unmultipliable value types
for mr_1, mr_2 in [
(self.mr_single_single, self.mr_single_fixed),
(self.mr_single_single, self.mr_single_variable),
(self.mr_single_fixed, self.mr_single_variable),
(self.mr_tuple_single, self.mr_tuple_fixed),
(self.mr_tuple_single, self.mr_tuple_variable),
(self.mr_tuple_fixed, self.mr_tuple_variable)
]:
self.assertRaises(TypeError, lambda: mr_1 * mr_2)
def test_div_works(self):
self.assertEqual(
self.mr_single_single / self.mr_single_single,
MapResult(
data={'a': 1, 'b': 1, 'c': 1},
key_names='letters', value_names='numbers'
)
)
self.assertEqual(
self.mr_tuple_single / self.mr_tuple_single,
MapResult(
data={('a', 'b'): 1, ('c', 'd'): 1, ('e', 'f'): 1},
key_names=['letter_1', 'letter_2'], value_names='numbers'
)
)
def test_div_fails(self):
# mismatching key types
for mr_1, mr_2 in product(
self.mr_single_key, self.mr_tuple_key
):
self.assertRaises(KeyError, lambda: mr_1 / mr_2)
# indivisible value types
for mr_1, mr_2 in [
(self.mr_single_single, self.mr_single_fixed),
(self.mr_single_single, self.mr_single_variable),
(self.mr_single_fixed, self.mr_single_variable),
(self.mr_tuple_single, self.mr_tuple_fixed),
(self.mr_tuple_single, self.mr_tuple_variable),
(self.mr_tuple_fixed, self.mr_tuple_variable)
]:
self.assertRaises(TypeError, lambda: mr_1 / mr_2)
| 38.245161
| 113
| 0.54251
| 1,492
| 11,856
| 4.020777
| 0.058981
| 0.107018
| 0.108018
| 0.06001
| 0.88048
| 0.820303
| 0.780963
| 0.744791
| 0.713619
| 0.661777
| 0
| 0.027557
| 0.311319
| 11,856
| 309
| 114
| 38.368932
| 0.707165
| 0.015773
| 0
| 0.420074
| 0
| 0
| 0.051458
| 0
| 0
| 0
| 0
| 0
| 0.130112
| 1
| 0.052045
| false
| 0
| 0.018587
| 0.007435
| 0.081784
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
66dd8d3a4d0c6b8b9cd781c4991c94896208c2d5
| 3,668
|
py
|
Python
|
tests/test_compat.py
|
sharov/dd-trace-py
|
d0995b49cf7147ab463d0a67a38779fad3f539b4
|
[
"BSD-3-Clause"
] | 1
|
2019-11-24T23:09:29.000Z
|
2019-11-24T23:09:29.000Z
|
tests/test_compat.py
|
sharov/dd-trace-py
|
d0995b49cf7147ab463d0a67a38779fad3f539b4
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_compat.py
|
sharov/dd-trace-py
|
d0995b49cf7147ab463d0a67a38779fad3f539b4
|
[
"BSD-3-Clause"
] | 2
|
2017-05-27T05:58:36.000Z
|
2019-02-07T13:38:53.000Z
|
# -*- coding: utf-8 -*-
# Define source file encoding to support raw unicode characters in Python 2
# Third party
from nose.tools import eq_
# Project
from ddtrace.compat import to_unicode, PY2
# Use different test suites for each Python version, this allows us to test the expected
# results for each Python version rather than writing a generic "works for both" test suite
if PY2:
class TestCompatPY2(object):
def test_to_unicode_string(self):
# Calling `compat.to_unicode` on a non-unicode string
res = to_unicode('test')
eq_(type(res), unicode)
eq_(res, 'test')
def test_to_unicode_unicode_encoded(self):
# Calling `compat.to_unicode` on a unicode encoded string
res = to_unicode('\xc3\xbf')
eq_(type(res), unicode)
eq_(res, u'ÿ')
def test_to_unicode_unicode_double_decode(self):
# Calling `compat.to_unicode` on a unicode decoded string
# This represents the double-decode issue, which can cause a `UnicodeEncodeError`
# `'\xc3\xbf'.decode('utf-8').decode('utf-8')`
res = to_unicode('\xc3\xbf'.decode('utf-8'))
eq_(type(res), unicode)
eq_(res, u'ÿ')
def test_to_unicode_unicode_string(self):
# Calling `compat.to_unicode` on a unicode string
res = to_unicode(u'ÿ')
eq_(type(res), unicode)
eq_(res, u'ÿ')
def test_to_unicode_bytearray(self):
# Calling `compat.to_unicode` with a `bytearray` containing unicode
res = to_unicode(bytearray('\xc3\xbf'))
eq_(type(res), unicode)
eq_(res, u'ÿ')
def test_to_unicode_bytearray_double_decode(self):
# Calling `compat.to_unicode` with an already decoded `bytearray`
# This represents the double-decode issue, which can cause a `UnicodeEncodeError`
# `bytearray('\xc3\xbf').decode('utf-8').decode('utf-8')`
res = to_unicode(bytearray('\xc3\xbf').decode('utf-8'))
eq_(type(res), unicode)
eq_(res, u'ÿ')
def test_to_unicode_non_string(self):
# Calling `compat.to_unicode` on non-string types
eq_(to_unicode(1), u'1')
eq_(to_unicode(True), u'True')
eq_(to_unicode(None), u'None')
eq_(to_unicode(dict(key='value')), u'{\'key\': \'value\'}')
else:
class TestCompatPY3(object):
def test_to_unicode_string(self):
# Calling `compat.to_unicode` on a non-unicode string
res = to_unicode('test')
eq_(type(res), str)
eq_(res, 'test')
def test_to_unicode_unicode_encoded(self):
# Calling `compat.to_unicode` on a unicode encoded string
res = to_unicode('\xff')
eq_(type(res), str)
eq_(res, 'ÿ')
def test_to_unicode_unicode_string(self):
# Calling `compat.to_unicode` on a unicode string
res = to_unicode('ÿ')
eq_(type(res), str)
eq_(res, 'ÿ')
def test_to_unicode_bytearray(self):
# Calling `compat.to_unicode` with a `bytearray` containing unicode """
res = to_unicode(bytearray('\xff', 'utf-8'))
eq_(type(res), str)
eq_(res, 'ÿ')
def test_to_unicode_non_string(self):
# Calling `compat.to_unicode` on non-string types
eq_(to_unicode(1), '1')
eq_(to_unicode(True), 'True')
eq_(to_unicode(None), 'None')
eq_(to_unicode(dict(key='value')), '{\'key\': \'value\'}')
| 38.610526
| 93
| 0.585605
| 475
| 3,668
| 4.298947
| 0.187368
| 0.18952
| 0.052889
| 0.094025
| 0.803624
| 0.767385
| 0.751714
| 0.700784
| 0.683154
| 0.683154
| 0
| 0.00887
| 0.293075
| 3,668
| 94
| 94
| 39.021277
| 0.778635
| 0.333969
| 0
| 0.571429
| 0
| 0
| 0.05
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.214286
| false
| 0
| 0.035714
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
66eb57287a0dafaa40e5c7029f4f95ff6c3153ba
| 299
|
py
|
Python
|
webapi/youtube/__init__.py
|
RKSCapul/demo-hermitcraft-concept-site-backend-r2
|
ef1f2c7ee16be0301445a2e2902def0df2d08546
|
[
"MIT"
] | null | null | null |
webapi/youtube/__init__.py
|
RKSCapul/demo-hermitcraft-concept-site-backend-r2
|
ef1f2c7ee16be0301445a2e2902def0df2d08546
|
[
"MIT"
] | null | null | null |
webapi/youtube/__init__.py
|
RKSCapul/demo-hermitcraft-concept-site-backend-r2
|
ef1f2c7ee16be0301445a2e2902def0df2d08546
|
[
"MIT"
] | null | null | null |
from .channels import getYouTubeChannelDataAll
from .channels import getYouTubeChannelDataUser
from .channels import getYouTubeAccountPictureAll
from .channels import getYouTubeChannelLivestreamDataAll
from .videos import getYouTubeChannelVideos
from .videos import getAllRecentYouTubeChannelVideos
| 42.714286
| 56
| 0.899666
| 24
| 299
| 11.208333
| 0.416667
| 0.178439
| 0.267658
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.080268
| 299
| 7
| 57
| 42.714286
| 0.978182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
dd65b870998d80f2ebebb6853507b2956888465a
| 22,226
|
py
|
Python
|
TEMPy/Cluster.py
|
OniDaito/ChimeraXTempy
|
a32ef6c54a403580f3a530ab36d91e475bf4b2dc
|
[
"MIT"
] | 2
|
2020-04-03T03:38:08.000Z
|
2020-06-21T02:31:38.000Z
|
TEMPy/Cluster.py
|
OniDaito/ChimeraXTempy
|
a32ef6c54a403580f3a530ab36d91e475bf4b2dc
|
[
"MIT"
] | 16
|
2017-06-16T20:06:14.000Z
|
2017-07-31T17:32:32.000Z
|
TEMPy/Cluster.py
|
OniDaito/ChimeraXTempy
|
a32ef6c54a403580f3a530ab36d91e475bf4b2dc
|
[
"MIT"
] | 1
|
2020-06-21T02:31:44.000Z
|
2020-06-21T02:31:44.000Z
|
#===============================================================================
# This file is part of TEMPy.
#
# TEMPy is a software designed to help the user in the manipulation
# and analyses of macromolecular assemblies using 3D electron microscopy maps.
#
# Copyright 2015 Birkbeck College University of London.
#
# Authors: Maya Topf, Daven Vasishtan, Arun Prasad Pandurangan,
# Irene Farabella, Agnel-Praveen Joseph, Harpal Sahota
#
# This software is made available under GPL V3 license
# http://www.gnu.org/licenses/gpl-3.0.html
#
#
# Please cite your use of TEMPy in published work:
#
# Farabella, I., Vasishtan, D., Joseph, A.P., Pandurangan, A.P., Sahota, H. & Topf, M. (2015). J. Appl. Cryst. 48.
#
#===============================================================================
from TEMPy.StructureBlurrer import StructureBlurrer
from TEMPy.ScoringFunctions import ScoringFunctions
from numpy import zeros
import sys
class Cluster:
"""A class to clustering an ensemble of structure instance"""
def __init__(self):
pass
def _print_results_cluster(self,models,class_num,number_top_mod,score,write=False):
"""
private function used in Cluster_Ensemble
"""
out_list=[]
if write==True:
outp = open("top"+str(number_top_mod)+str(score)+"_classes.txt", "w")
outp.write("pdb_name\tscore\tlrms\tclass\n")
for i in range(1,class_num+1):
# print the fits of each class ordered by the highest score
for ipdb in models:
if (ipdb[-1] == i):
out_list.append([ipdb[0],ipdb[2],ipdb[3],ipdb[4]])
outp.write("%s\t%.5f\t%.3f\t%d\n" %(ipdb[0],ipdb[2],ipdb[3],ipdb[4]))
outp.close()
else:
for i in range(1,class_num+1):
for ipdb in models:
if (ipdb[-1] == i):
out_list.append([ipdb[0],ipdb[2],ipdb[3],ipdb[4]])
return out_list
def _print_results_cluster2(self,models,write=True):
"""
private function used in Cluster_Ensemble
"""
out_list=[]
if write==True:
outp = open("top_rank.txt", "w")
outp.write("pdb_name\tscore\tlrms\tclass\n")
for i in models:
#[name_mod,mod,score_mod,int(0),int(0)]
# print the fits of each class ordered by the highest score
outp.write("%s\t%.5f\n" %(i[0],i[2]))
outp.close()
else:
print('this is for print!!!')
def cluster_fit_ensemble_top_fit(self,ensemble_list,score,rms_cutoff,res_target_map,sigma_coeff,number_top_mod=0,write=False,targetMap=False):
"""
RMSD clustering of the multiple "fits" starting from the best scoring model accordingly with a chosen score.
Cluster the fits based on Calpha RMSD (starting from the best scoring model)
Arguments:
*ensemble_list*
Input list of Structure Instances.
*targetMap*
Target Map Instance.
*score*
Scoring function to use.
See ScoringFunctions class for a list of the available Scoring Function.
E.g. set score='CCC' to use the Cross-correlation coefficient.
Score option are:
i 'CCC' - Cross-correlation coefficient;
ii 'LAP' - Laplacian-filtered cross-correlation coefficient: useful for maps with resolutions worse than 10-15 A;
iii 'MI' - Mutual information score: a good and robust score but relatively slow to calculate;
iv 'ENV' - Envelope score: the fastest score to calculate due to binarisation of the map.
v-vii 'NV','NV_Sobel','NV_Laplace'- Normal vector score: a vector-based surface superimposition score with or without Sobel/Laplace filter.
viii 'CD' - Chamfer Distance: a score used in computer vision algorithms as a fast similarity metric
*rms_cutoff*
float, the Calpha RMSD cutoff based on which you want to cluster the solutions. For example 3.5 (for 3.5 A).
*res_target_map*
the resolution, in Angstroms, of the target Map.
*sigma_coeff*
the sigma value (multiplied by the resolution) that controls the width of the Gaussian.
Default values is 0.356.
Other values used :
0.187R corresponding with the Gaussian width of the Fourier transform falling to half the maximum at 1/resolution, as used in Situs (Wriggers et al, 1999);
0.225R which makes the Fourier transform of the distribution fall to 1/e of its maximum value at wavenumber 1/resolution, the default in Chimera (Petterson et al, 2004)
0.356R corresponding to the Gaussian width at 1/e maximum height equaling the resolution, an option in Chimera (Petterson et al, 2004);
0.425R the fullwidth half maximum being equal to the resolution, as used by FlexEM (Topf et al, 2008);
0.5R the distance between the two inflection points being the same length as the resolution, an option in Chimera (Petterson et al, 2004);
1R where the sigma value simply equal to the resolution, as used by NMFF (Tama et al, 2004).
*number_top_mod*
Number of Fits to cluster. Default is all.
*write*
True will write out a file that contains the list of the structure instances representing different fits scored and clustered.
note the lrms column is the Calpha RMSD of each fit from the first fit in its class
"""
blurrer = StructureBlurrer()
scorer = ScoringFunctions()
cluster=Cluster()
count=0
dict_ensembl={}
list_ordered=cluster.rank_fit_ensemble(ensemble_list,score,res_target_map,sigma_coeff,number_top_mod=0,write=False,targetMap=targetMap.copy())
#cluster fits by local rmsd
if number_top_mod==0:
ini_num = 0
end_num = len(list_ordered)
fit_class = 0
for ipdb in list_ordered:
print("model num %d: %s\n", list_ordered.index(ipdb)+1, ipdb[0])
ini_num1 = list_ordered.index(ipdb)
mod1=ipdb[1]
print('next index ' + str(ini_num1))
if ipdb[-1] == 0:
fit_class+=1
for ipdb1 in list_ordered[ini_num1 : end_num]:
mod2=ipdb1[1]
if ipdb1[-1] == 0:
rmsd_val=float(mod1.RMSD_from_same_structure(mod2,CA=True))
ipdb1[3]=rmsd_val
print("rmsd of %s from best local fit (%s)= %.2f", ipdb1[0], ipdb[0], rmsd_val)
if rmsd_val < rms_cutoff:
ipdb1[-1] = fit_class
print('class= ' + str(ipdb1[-1]))
else: continue
else: continue
return cluster._print_results_cluster(list_ordered,fit_class,number_top_mod,score,write)
else:
x=int(number_top_mod)
ini_num = 0
end_num = len(list_ordered[:x])
fit_class = 0
for ipdb in list_ordered[:x]:
print("model num %d: %s\n", list_ordered.index(ipdb)+1, ipdb[0])
ini_num1 = list_ordered.index(ipdb)
mod1=ipdb[1]
print('next index ' + str(ini_num1))
if ipdb[-1] == 0:
fit_class+=1
for ipdb1 in list_ordered[ini_num1 : end_num]:
mod2=ipdb1[1]
if ipdb1[-1] == 0:
rmsd_val=float(mod1.RMSD_from_same_structure(mod2,CA=True))
print("rms of %s from best local fit (%s)= %.2f", ipdb1[0], ipdb[0], rmsd_val)
ipdb1[3]=rmsd_val
if rmsd_val < rms_cutoff:
ipdb1[-1] = fit_class
print('class= ' + str(ipdb1[-1]))
else: continue
else: continue
return cluster._print_results_cluster(list_ordered[:x],fit_class,number_top_mod,score,write)
def RMSD_ensemble(self,rank_fit_ensemble,ensemble_list,CA=True):
"""
Calculates the pairwise RMSD matrix for all Structure Instance in the ensemble.
Arguments:
*rank_fit_ensemble*
Ensemble of Structure Instance ranked using cluster.rank_fit_ensemble
*ensemble_list*
Input list of Structure Instances
*CA is set to True if only CA-RMSD is needed*
Return:
A numpy array
"""
list_rotate_models_dict={}
for i in ensemble_list:
list_rotate_models_dict[i[0]]=i[1]
sorted_rank=rank_fit_ensemble
mxRMSD = zeros(shape=(len(sorted_rank),len(sorted_rank)))
for mod1 in sorted_rank:
for mod2 in sorted_rank:
print(mod1[0],mod2[0])
rmsd_val=float(list_rotate_models_dict[mod1[0]].RMSD_from_same_structure(list_rotate_models_dict[mod2[0]],CA=CA))
m1=sorted_rank.index(mod1)
m2=sorted_rank.index(mod2)
mxRMSD[m1][m2]=rmsd_val
return mxRMSD
def rank_fit_ensemble(self,ensemble_list,score,res_target_map,sigma_coeff,number_top_mod=0,\
write=False,targetMap=False,cont_targetMap=None):
"""
RMSD clustering of the multiple "fits" accordingly with a chosen score.
Cluster the fits based on Calpha RMSD (starting from the best scoring model)
Arguments:
*ensemble_list*
Input list of Structure Instances.
*targetMap*
Target Map Instance.
*score*
Scoring function to use.
See ScoringFunctions class for a list of the available Scoring Function.
E.g. set score='CCC' to use the Cross-correlation coefficient.
Score option are:
i 'CCC' - Cross-correlation coefficient;
ii 'LAP' - Laplacian-filtered cross-correlation coefficient: useful for maps with resolutions worse than 10-15 A;
iii 'MI' - Mutual information score: a good and robust score but relatively slow to calculate;
iv 'ENV' - Envelope score: the fastest score to calculate due to binarisation of the map.
v-vii 'NV','NV_Sobel','NV_Laplace'- Normal vector score: a vector-based surface superimposition score with or without Sobel/Laplace filter.
viii 'CD' - Chamfer Distance: a score used in computer vision algorithms as a fast similarity metric
*rms_cutoff*
float, the Calpha RMSD cutoff based on which you want to cluster the solutions. For example 3.5 (for 3.5 A).
*res_target_map*
the resolution, in Angstroms, of the target Map.
*sigma_coeff*
the sigma value (multiplied by the resolution) that controls the width of the Gaussian.
Default values is 0.356.
Other values used :
0.187R corresponding with the Gaussian width of the Fourier transform falling to half the maximum at 1/resolution, as used in Situs (Wriggers et al, 1999);
0.225R which makes the Fourier transform of the distribution fall to 1/e of its maximum value at wavenumber 1/resolution, the default in Chimera (Petterson et al, 2004)
0.356R corresponding to the Gaussian width at 1/e maximum height equaling the resolution, an option in Chimera (Petterson et al, 2004);
0.425R the fullwidth half maximum being equal to the resolution, as used by FlexEM (Topf et al, 2008);
0.5R the distance between the two inflection points being the same length as the resolution, an option in Chimera (Petterson et al, 2004);
1R where the sigma value simply equal to the resolution, as used by NMFF (Tama et al, 2004).
*number_top_mod*
Number of Fits to cluster. Default is all.
*write*
True will write out a file that contains the list of the structure instances representing different fits scored and clustered.
note the lrms column is the Calpha RMSD of each fit from the first fit in its class
"""
blurrer = StructureBlurrer()
scorer = ScoringFunctions()
cluster=Cluster()
count=0
dict_ensembl={}
list_to_order=[]
#print targetMap
if targetMap==False:
#targetMap = self.protMap(prot, min(resolution/4., 3.5), resolution)
print("WARNING:Need target map")
sys.exit()
if score not in ['CCC','LAP','MI','NV','NV_Sobel','NV_Laplace','ENV','CD']:
print('Incorrect Scoring Function: %s', score)
print('Please select from one of the following scoring functions: %s', ''.join(['CCC','LAP','MI','NV','NV_Sobel','NV_Laplace','ENV','CD']))
sys.exit()
targetMap=targetMap.copy()
if score=='CCC':
for mod1 in ensemble_list:
count+=1
name_mod=mod1[0]
mod=mod1[1]
sim_map = blurrer.gaussian_blur(mod, res_target_map,densMap=targetMap,sigma_coeff=sigma_coeff)
if not cont_targetMap is None: score_mod=scorer.CCC_map(sim_map,targetMap,0.5*sim_map.fullMap.std(),cont_targetMap,2,True)[0]#CCC(sim_map,targetMap)
else: score_mod=scorer.CCC_map(sim_map,targetMap,0.0,0.0,True)[0]
#else: score_mod=scorer.CCC(sim_map,targetMap)
#'name_file','structure_instance','score','lrmsd','class'
list_to_order.append([name_mod,mod,score_mod,0,0])
if score=='LAP':
for mod1 in ensemble_list:
count+=1
name_mod=mod1[0]
mod=mod1[1]
sim_map = blurrer.gaussian_blur(mod, res_target_map,densMap=targetMap,sigma_coeff=sigma_coeff)
score_mod=scorer.laplace_CCC(sim_map,targetMap)
#'name_file','structure_instance','score','lrmsd','class'
list_to_order.append([name_mod,mod,score_mod,0,0])
if score=='MI':
for mod1 in ensemble_list:
count+=1
name_mod=mod1[0]
mod=mod1[1]
sim_map = blurrer.gaussian_blur(mod, res_target_map,densMap=targetMap,sigma_coeff=sigma_coeff)
if not cont_targetMap is None: score_mod=scorer.MI(sim_map,targetMap,0.5*sim_map.fullMap.std(),cont_targetMap,1)
else: score_mod=scorer.MI(sim_map,targetMap)
list_to_order.append([name_mod,mod,score_mod,0,0])
if score=='NV':
for mod1 in ensemble_list:
count+=1
name_mod=mod1[0]
mod=mod1[1]
#These two values should be calculated for the experimental map, and only
#need to be calculated once, at the beginning
sim_map = blurrer.gaussian_blur(mod, res_target_map,densMap=targetMap,sigma_coeff=sigma_coeff)
if not cont_targetMap is None: score_mod=scorer.normal_vector_score(targetMap,sim_map, cont_targetMap-(0.1*targetMap.std()), cont_targetMap+(0.1*targetMap.std()),Filter=None)
else:
min_thr=targetMap.get_primary_boundary(mod.get_prot_mass_from_atoms(), targetMap.min(), targetMap.max())
points=targetMap.get_point_map(min_thr,percentage=0.2)
max_thr=targetMap.get_second_boundary(min_thr, points, min_thr, targetMap.max(),err_percent=1)
score_mod=scorer.normal_vector_score(targetMap,sim_map, min_thr, max_thr,Filter=None)
score_mod = 1 - (score_mod/3.14)
list_to_order.append([name_mod,mod,score_mod,0,0])
if score=='NV_Sobel':
for mod1 in ensemble_list:
count+=1
name_mod=mod1[0]
mod=mod1[1]
sim_map = blurrer.gaussian_blur(mod, res_target_map,densMap=targetMap,sigma_coeff=sigma_coeff)
if not cont_targetMap is None: score_mod=scorer.normal_vector_score(targetMap,sim_map, cont_targetMap-(0.1*targetMap.std()), cont_targetMap+(0.1*targetMap.std()),Filter='Sobel')
else:
min_thr=targetMap.get_primary_boundary(mod.get_prot_mass_from_atoms(), targetMap.min(), targetMap.max())
points=targetMap.get_point_map(min_thr,percentage=0.2)
max_thr=targetMap.get_second_boundary(min_thr, points, min_thr, targetMap.max(),err_percent=1)
score_mod=scorer.normal_vector_score(targetMap,sim_map, min_thr, max_thr,Filter='Sobel')
score_mod = 1 - (score_mod/3.14)
list_to_order.append([name_mod,mod,score_mod,0,0])
if score=='NV_Laplace':
for mod1 in ensemble_list:
count+=1
name_mod=mod1[0]
mod=mod1[1]
sim_map = blurrer.gaussian_blur(mod, res_target_map,densMap=targetMap,sigma_coeff=sigma_coeff)
if not cont_targetMap is None: score_mod=scorer.normal_vector_score(targetMap,sim_map, cont_targetMap-(0.1*targetMap.std()), cont_targetMap+(0.1*targetMap.std()),Filter='Laplace')
else:
min_thr=targetMap.get_primary_boundary(mod.get_prot_mass_from_atoms(), targetMap.min(), targetMap.max())
points=targetMap.get_point_map(min_thr,percentage=0.2)
max_thr=targetMap.get_second_boundary(min_thr, points, min_thr, targetMap.max(),err_percent=1)
score_mod=scorer.normal_vector_score(targetMap,sim_map, min_thr, max_thr,Filter='Laplace')
score_mod = 1 - (score_mod/3.14)
list_to_order.append([name_mod,mod,score_mod,0,0])
if score=='ENV':
for mod1 in ensemble_list:
count+=1
name_mod=mod1[0]
mod=mod1[1]
min_thr=targetMap.get_primary_boundary(mod.get_prot_mass_from_atoms(), targetMap.min(), targetMap.max())
score_mod=scorer.envelope_score(targetMap,min_thr,mod)
#'name_file','structure_instance','score','lrmsd','class'
list_to_order.append([name_mod,mod,score_mod,0,0])
if score=='CD':
for mod1 in ensemble_list:
count+=1
name_mod=mod1[0]
mod=mod1[1]
sim_map = blurrer.gaussian_blur(mod, res_target_map,densMap=targetMap,sigma_coeff=sigma_coeff)
if not cont_targetMap is None:
score_mod=scorer._surface_distance_score(sim_map,targetMap,0.5*sim_map.fullMap.std(),cont_targetMap,'Minimum')
else:
min_thr=targetMap.get_primary_boundary(mod.get_prot_mass_from_atoms(), targetMap.min(), targetMap.max())
points=targetMap.get_point_map(min_thr,percentage=0.2)
max_thr=targetMap.get_second_boundary(min_thr, points, min_thr, targetMap.max(),err_percent=1)
score_mod=scorer.chamfer_distance(sim_map,targetMap, min_thr, max_thr, kdtree=None)
score_mod = 1/score_mod
list_to_order.append([name_mod,mod,score_mod,0,0])
if score in ['NV','NV_Sobel','NV_Laplace']:
list_ordered=sorted(list_to_order, key=lambda x: x[2],reverse=True)#was false when NV was negative
else:
list_ordered=sorted(list_to_order, key=lambda x: x[2],reverse=True)
if number_top_mod==0:
if write==True:
return cluster._print_results_cluster2(list_ordered,write)
return list_ordered
else:
x=int(number_top_mod)
if write==True:
return cluster._print_results_cluster2(list_ordered[:x],write)
return list_ordered[:x]
| 53.68599
| 196
| 0.548862
| 2,677
| 22,226
| 4.385506
| 0.143071
| 0.021806
| 0.017888
| 0.011499
| 0.808859
| 0.795145
| 0.777939
| 0.766525
| 0.753918
| 0.747956
| 0
| 0.025883
| 0.362053
| 22,226
| 413
| 197
| 53.815981
| 0.802102
| 0.365653
| 0
| 0.648649
| 0
| 0
| 0.044166
| 0.004766
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027027
| false
| 0.004505
| 0.018018
| 0
| 0.085586
| 0.085586
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
06db35de3fccd81c6b0762bb58b7518314063cf5
| 23
|
py
|
Python
|
sdr-py/util/__init__.py
|
jdstmporter/SDRAudio
|
40392ab443de2e565f8e6b448af6cc3012c906db
|
[
"BSD-3-Clause"
] | null | null | null |
sdr-py/util/__init__.py
|
jdstmporter/SDRAudio
|
40392ab443de2e565f8e6b448af6cc3012c906db
|
[
"BSD-3-Clause"
] | null | null | null |
sdr-py/util/__init__.py
|
jdstmporter/SDRAudio
|
40392ab443de2e565f8e6b448af6cc3012c906db
|
[
"BSD-3-Clause"
] | null | null | null |
from .log import SYSLOG
| 23
| 23
| 0.826087
| 4
| 23
| 4.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130435
| 23
| 1
| 23
| 23
| 0.95
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
06fe274d6b0082bef9c2bdffdf6655abc2f70a1c
| 9,917
|
py
|
Python
|
dexp/utils/backends/_cupy/texture/_test/test_texture.py
|
haesleinhuepf/dexp
|
2ea84f3db323724588fac565fae56f0d522bc5ca
|
[
"BSD-3-Clause"
] | 16
|
2021-04-21T14:09:19.000Z
|
2022-03-22T02:30:59.000Z
|
dexp/utils/backends/_cupy/texture/_test/test_texture.py
|
haesleinhuepf/dexp
|
2ea84f3db323724588fac565fae56f0d522bc5ca
|
[
"BSD-3-Clause"
] | 28
|
2021-04-15T17:43:08.000Z
|
2022-03-29T16:08:35.000Z
|
dexp/utils/backends/_cupy/texture/_test/test_texture.py
|
haesleinhuepf/dexp
|
2ea84f3db323724588fac565fae56f0d522bc5ca
|
[
"BSD-3-Clause"
] | 3
|
2022-02-08T17:41:30.000Z
|
2022-03-18T15:32:27.000Z
|
from arbol import aprint, asection
from dexp.utils.backends import CupyBackend
from dexp.utils.backends._cupy.texture.texture import create_cuda_texture
def test_cupy_texture_4channels():
try:
import cupy
with CupyBackend():
source = r"""
extern "C"{
__global__ void copyKernel(float* output,
cudaTextureObject_t texObj,
int width, int height)
{
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
// Read from texture and write to global memory
float u = x+0.5f;
float v = y+0.5f;
if (x < width && y < height)
{
output[y * 4 *width + 4 *x +0] = tex2D<float4>(texObj, u, v).x;
output[y * 4 *width + 4 *x +1] = tex2D<float4>(texObj, u, v).y;
output[y * 4 *width + 4 *x +2] = tex2D<float4>(texObj, u, v).z;
output[y * 4 *width + 4 *x +3] = tex2D<float4>(texObj, u, v).w;
}
}
}
"""
width = 3
height = 5
# allocate input/output arrays
tex_data = cupy.arange(width * height * 4, dtype=cupy.float32).reshape(height, width, 4)
# set up a texture object
texobj, cuda_array = create_cuda_texture(
tex_data, num_channels=4, sampling_mode="nearest", dtype=cupy.float32
)
real_output = cupy.zeros_like(tex_data)
expected_output = tex_data.copy()
# get the kernel, which copies from texture memory
kernel = cupy.RawKernel(source, "copyKernel")
# launch it
block_x = 4
block_y = 4
grid_x = (width + block_x - 1) // block_x
grid_y = (height + block_y - 1) // block_y
kernel((grid_x, grid_y), (block_x, block_y), (real_output, texobj, width, height))
del texobj, cuda_array
# test outcome
assert cupy.allclose(real_output, expected_output)
except ModuleNotFoundError:
print("Cupy module not found! Test passes nevertheless!")
def test_cupy_texture_1channel_normcoord():
try:
import cupy
with CupyBackend():
source = r"""
extern "C"{
__global__ void texture_1channel_normcoord_kernel(float* output,
cudaTextureObject_t texObj,
int width,
int height)
{
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
// Read from texture and write to global memory
float u = (float(x)+0.5f)/width;
float v = (float(y)+0.5f)/height;
if (x < width && y < height)
{
float value = tex2D<float>(texObj, u, v);
printf("(%f, %f)=%f\n", u, v, value);
output[y * width + x] = value;
}
}
}
"""
width = 3
height = 5
# allocate input/output arrays
tex_data = cupy.arange(width * height, dtype=cupy.float32).reshape(height, width)
# set up a texture object
texobj, cuda_array = create_cuda_texture(
tex_data, num_channels=1, normalised_coords=True, sampling_mode="linear", dtype=cupy.float32
)
real_output = cupy.zeros_like(tex_data)
expected_output = tex_data.copy()
# get the kernel, which copies from texture memory
kernel = cupy.RawKernel(source, "texture_1channel_normcoord_kernel")
# launch it
block_x = 4
block_y = 4
grid_x = (width + block_x - 1) // block_x
grid_y = (height + block_y - 1) // block_y
kernel((grid_x, grid_y), (block_x, block_y), (real_output, texobj, width, height))
del texobj, cuda_array
# test outcome
assert cupy.allclose(real_output, expected_output)
except ModuleNotFoundError:
print("Cupy module not found! Test passes nevertheless!")
def test_cupy_texture_1channel():
try:
import cupy
with CupyBackend():
source = r"""
extern "C"{
__global__ void copyKernel(float* output,
cudaTextureObject_t texObj,
int width, int height)
{
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
// Read from texture and write to global memory
float u = x+0.5f;
float v = y+0.5f;
if (x < width && y < height)
{
float value = tex2D<float>(texObj, u, v);
printf("(%f, %f)=%f\n", u, v, value);
output[y * width + x] = value;
}
}
}
"""
width = 3
height = 5
# allocate input/output arrays
tex_data = cupy.arange(width * height, dtype=cupy.float32).reshape(height, width)
tex_data[1, 2] = 1
# set up a texture object
texobj, cuda_array = create_cuda_texture(
tex_data, num_channels=1, sampling_mode="linear", dtype=cupy.float32
)
real_output = cupy.zeros_like(tex_data)
expected_output = tex_data.copy()
# get the kernel, which copies from texture memory
kernel = cupy.RawKernel(source, "copyKernel")
# launch it
block_x = 4
block_y = 4
grid_x = (width + block_x - 1) // block_x
grid_y = (height + block_y - 1) // block_y
kernel((grid_x, grid_y), (block_x, block_y), (real_output, texobj, width, height))
del texobj, cuda_array
# test outcome
assert cupy.allclose(real_output, expected_output)
except ModuleNotFoundError:
print("Cupy module not found! Test passes nevertheless!")
def test_basic_cupy_texture():
try:
import cupy
with CupyBackend():
source = r"""
extern "C"{
__global__ void copyKernel(float* output,
cudaTextureObject_t texObj,
int width, int height)
{
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
// Read from texture and write to global memory
float u = x;
float v = y;
if (x < width && y < height)
output[y * width + x] = tex2D<float>(texObj, u, v);
}
}
"""
width = 8
height = 16
# set up a texture object
ch = cupy.cuda.texture.ChannelFormatDescriptor(32, 0, 0, 0, cupy.cuda.runtime.cudaChannelFormatKindFloat)
arr2 = cupy.cuda.texture.CUDAarray(ch, width, height)
res = cupy.cuda.texture.ResourceDescriptor(cupy.cuda.runtime.cudaResourceTypeArray, cuArr=arr2)
tex = cupy.cuda.texture.TextureDescriptor(
(cupy.cuda.runtime.cudaAddressModeClamp, cupy.cuda.runtime.cudaAddressModeClamp),
cupy.cuda.runtime.cudaFilterModePoint,
cupy.cuda.runtime.cudaReadModeElementType,
)
texobj = cupy.cuda.texture.TextureObject(res, tex)
# allocate input/output arrays
tex_data = cupy.arange(width * height, dtype=cupy.float32).reshape(height, width)
real_output = cupy.zeros_like(tex_data)
expected_output = cupy.zeros_like(tex_data)
arr2.copy_from(tex_data)
arr2.copy_to(expected_output)
# get the kernel, which copies from texture memory
ker = cupy.RawKernel(source, "copyKernel")
# launch it
block_x = 4
block_y = 4
grid_x = (width + block_x - 1) // block_x
grid_y = (height + block_y - 1) // block_y
ker((grid_x, grid_y), (block_x, block_y), (real_output, texobj, width, height))
del texobj, arr2
# test outcome
assert cupy.allclose(real_output, expected_output)
except ModuleNotFoundError:
print("Cupy module not found! Test passes nevertheless!")
def test_basic_cupy_texture_leak():
try:
import cupy
with CupyBackend():
# allocate input/output arrays
length = 512
tex_data = cupy.arange(length ** 3, dtype=cupy.float32).reshape(length, length, length)
with asection("loop"):
for i in range(100):
aprint(f"i={i}")
texobj, cuda_array = create_cuda_texture(
tex_data, num_channels=1, sampling_mode="linear", dtype=cupy.float32
)
except ModuleNotFoundError:
print("Cupy module not found! Test passes nevertheless!")
| 37.422642
| 117
| 0.502571
| 1,043
| 9,917
| 4.620326
| 0.137105
| 0.027599
| 0.029882
| 0.017639
| 0.816974
| 0.787715
| 0.765512
| 0.741855
| 0.733555
| 0.724424
| 0
| 0.018617
| 0.4096
| 9,917
| 264
| 118
| 37.564394
| 0.804441
| 0.053242
| 0
| 0.640625
| 0
| 0.020833
| 0.417636
| 0.021565
| 0
| 0
| 0
| 0
| 0.020833
| 1
| 0.026042
| false
| 0.026042
| 0.041667
| 0
| 0.067708
| 0.046875
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
663dffa08d698c3482de75263890caeea5f21bf5
| 45
|
py
|
Python
|
adaptive/__init__.py
|
empiricalstateofmind/adaptive
|
86bc2477309fcb18b3bfc4739888bb9c97b992b3
|
[
"Apache-2.0"
] | null | null | null |
adaptive/__init__.py
|
empiricalstateofmind/adaptive
|
86bc2477309fcb18b3bfc4739888bb9c97b992b3
|
[
"Apache-2.0"
] | null | null | null |
adaptive/__init__.py
|
empiricalstateofmind/adaptive
|
86bc2477309fcb18b3bfc4739888bb9c97b992b3
|
[
"Apache-2.0"
] | null | null | null |
from .model import *
from .functions import *
| 22.5
| 24
| 0.755556
| 6
| 45
| 5.666667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.155556
| 45
| 2
| 24
| 22.5
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b09aefedc004f2f99a01fd9056c0363047ad1f37
| 32
|
py
|
Python
|
syntax.py
|
franbeep/TWDM-PON-Sim
|
c34f626c737f03d280bb96fd1dbd4eaa291383e3
|
[
"MIT"
] | 1
|
2021-11-19T07:20:09.000Z
|
2021-11-19T07:20:09.000Z
|
syntax.py
|
franbeep/TWDM-PON-Sim
|
c34f626c737f03d280bb96fd1dbd4eaa291383e3
|
[
"MIT"
] | null | null | null |
syntax.py
|
franbeep/TWDM-PON-Sim
|
c34f626c737f03d280bb96fd1dbd4eaa291383e3
|
[
"MIT"
] | null | null | null |
import sim
print("syntax ok!")
| 8
| 19
| 0.6875
| 5
| 32
| 4.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15625
| 32
| 3
| 20
| 10.666667
| 0.814815
| 0
| 0
| 0
| 0
| 0
| 0.3125
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
9fe9eb9ec13e00689fd731255c8929f87227e409
| 143
|
py
|
Python
|
examples/pybind11-project/python-src/pybind11_project/sub_package/sub.py
|
tttapa/py-build-cmake
|
29a6970102f567952993ee681cbe0b2d85166adf
|
[
"MIT"
] | 2
|
2022-02-16T22:37:54.000Z
|
2022-03-05T19:27:11.000Z
|
examples/pybind11-project/python-src/pybind11_project/sub_package/sub.py
|
tttapa/py-build-cmake
|
29a6970102f567952993ee681cbe0b2d85166adf
|
[
"MIT"
] | null | null | null |
examples/pybind11-project/python-src/pybind11_project/sub_package/sub.py
|
tttapa/py-build-cmake
|
29a6970102f567952993ee681cbe0b2d85166adf
|
[
"MIT"
] | null | null | null |
"""Example module that subtracts two integers in Python."""
def sub(a: int, b: int) -> int:
"""Subtracts two integers"""
return a - b
| 23.833333
| 59
| 0.629371
| 21
| 143
| 4.285714
| 0.666667
| 0.266667
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.216783
| 143
| 6
| 60
| 23.833333
| 0.803571
| 0.531469
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
b01fa5cf41e52dfa1c60492ed529de3bcf1d7268
| 2,795
|
py
|
Python
|
tests/factor_max_min_tests.py
|
petermlm/ProbPy
|
efb55962283e1c6c2422de812ec8689ffb9dbf16
|
[
"MIT"
] | 16
|
2015-01-05T19:14:24.000Z
|
2021-08-19T22:25:04.000Z
|
tests/factor_max_min_tests.py
|
petermlm/ProbPy
|
efb55962283e1c6c2422de812ec8689ffb9dbf16
|
[
"MIT"
] | null | null | null |
tests/factor_max_min_tests.py
|
petermlm/ProbPy
|
efb55962283e1c6c2422de812ec8689ffb9dbf16
|
[
"MIT"
] | 7
|
2015-04-10T18:24:58.000Z
|
2018-01-26T23:54:59.000Z
|
from nose.tools import with_setup, nottest
from tests.test_base import TestBase
from ProbPy import Factor, Event
class TestFactorMaxMin(TestBase):
def max_test_0(self):
"""
Maximum with one variable
"""
for i, domain in enumerate(self.X.domain):
fac = Factor(self.X, [1, 2])
fac.values[i] = 10
assert fac.max() == 10
def max_test_1(self):
"""
Maximum with two variables
"""
for i, domainx in enumerate(self.X.domain):
for j, domainy in enumerate(self.Y.domain):
fac = Factor([self.X, self.Y], [1, 2, 3, 4])
fac.values[i + j * 2] = 10
assert fac.max() == 10
def min_test_2(self):
"""
Minimum with one variable
"""
for i, domain in enumerate(self.X.domain):
fac = Factor(self.X, [1, 2])
fac.values[i] = -10
assert fac.min() == -10
def min_test_3(self):
"""
Minimum with two variables
"""
for i, domainx in enumerate(self.X.domain):
for j, domainy in enumerate(self.Y.domain):
fac = Factor([self.X, self.Y], [1, 2, 3, 4])
fac.values[i + j * 2] = -10
assert fac.min() == -10
def argmax_test_4(self):
"""
Maximum argument with one variable
"""
for i, domain in enumerate(self.X.domain):
fac = Factor(self.X, [1, 2])
fac.values[i] = 10
event = Event([(self.X, domain)])
assert fac.argmax() == event
def argmax_test_5(self):
"""
Maximum argument with two variables
"""
for i, domainx in enumerate(self.X.domain):
for j, domainy in enumerate(self.Y.domain):
fac = Factor([self.X, self.Y], [1, 2, 3, 4])
fac.values[i + j * 2] = 10
event = Event([(self.X, domainx), (self.Y, domainy)])
assert fac.argmax() == event
def argmin_test_6(self):
"""
Minimum argument with one variable
"""
for i, domain in enumerate(self.X.domain):
fac = Factor(self.X, [1, 2])
fac.values[i] = -10
event = Event([(self.X, domain)])
assert fac.argmin() == event
def argmin_test_7(self):
"""
Minimum argument with two variables
"""
for i, domainx in enumerate(self.X.domain):
for j, domainy in enumerate(self.Y.domain):
fac = Factor([self.X, self.Y], [1, 2, 3, 4])
fac.values[i + j * 2] = -10
event = Event([(self.X, domainx), (self.Y, domainy)])
assert fac.argmin() == event
| 27.135922
| 69
| 0.494454
| 355
| 2,795
| 3.842254
| 0.143662
| 0.073314
| 0.131965
| 0.093842
| 0.769795
| 0.749267
| 0.725806
| 0.725806
| 0.725806
| 0.725806
| 0
| 0.034227
| 0.372809
| 2,795
| 102
| 70
| 27.401961
| 0.743868
| 0.088372
| 0
| 0.769231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 1
| 0.153846
| false
| 0
| 0.057692
| 0
| 0.230769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b03e94828f73db79f8d3fd228f54b760469f5c15
| 35
|
py
|
Python
|
lime-transport-websocket/src/lime_transport_websocket/__init__.py
|
mirlarof/lime-python-transports
|
992a8cff44e4a3a2156514c5da0077d11653248b
|
[
"MIT"
] | null | null | null |
lime-transport-websocket/src/lime_transport_websocket/__init__.py
|
mirlarof/lime-python-transports
|
992a8cff44e4a3a2156514c5da0077d11653248b
|
[
"MIT"
] | 1
|
2021-06-30T21:47:08.000Z
|
2021-06-30T21:47:08.000Z
|
lime-transport-websocket/src/lime_transport_websocket/__init__.py
|
mirlarof/lime-python-transports
|
992a8cff44e4a3a2156514c5da0077d11653248b
|
[
"MIT"
] | 1
|
2021-12-30T12:55:56.000Z
|
2021-12-30T12:55:56.000Z
|
from .websocket_transport import *
| 17.5
| 34
| 0.828571
| 4
| 35
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 35
| 1
| 35
| 35
| 0.903226
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b04ef9ef1aced43c7ae6fd2c803e7f39278542fe
| 69
|
py
|
Python
|
main.py
|
twtg93/tupl
|
2c1275e0c9af526f06267c7be64df009e3174dc0
|
[
"MIT"
] | null | null | null |
main.py
|
twtg93/tupl
|
2c1275e0c9af526f06267c7be64df009e3174dc0
|
[
"MIT"
] | null | null | null |
main.py
|
twtg93/tupl
|
2c1275e0c9af526f06267c7be64df009e3174dc0
|
[
"MIT"
] | null | null | null |
import os
os.system('python basic.py')
os.system('python shell.py')
| 13.8
| 28
| 0.724638
| 12
| 69
| 4.166667
| 0.583333
| 0.32
| 0.56
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.101449
| 69
| 4
| 29
| 17.25
| 0.806452
| 0
| 0
| 0
| 0
| 0
| 0.434783
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
c68dde390c63fcb58df33a97210dce62214082f0
| 3,911
|
py
|
Python
|
p011.py
|
drcsturm/project-euler
|
07c4e6593f14eed039e580009d5cd5be5f541dfb
|
[
"MIT"
] | null | null | null |
p011.py
|
drcsturm/project-euler
|
07c4e6593f14eed039e580009d5cd5be5f541dfb
|
[
"MIT"
] | null | null | null |
p011.py
|
drcsturm/project-euler
|
07c4e6593f14eed039e580009d5cd5be5f541dfb
|
[
"MIT"
] | null | null | null |
# In the 20×20 grid below, four numbers along a diagonal line have been marked in red.
# 08 02 22 97 38 15 00 40 00 75 04 05 07 78 52 12 50 77 91 08
# 49 49 99 40 17 81 18 57 60 87 17 40 98 43 69 48 04 56 62 00
# 81 49 31 73 55 79 14 29 93 71 40 67 53 88 30 03 49 13 36 65
# 52 70 95 23 04 60 11 42 69 24 68 56 01 32 56 71 37 02 36 91
# 22 31 16 71 51 67 63 89 41 92 36 54 22 40 40 28 66 33 13 80
# 24 47 32 60 99 03 45 02 44 75 33 53 78 36 84 20 35 17 12 50
# 32 98 81 28 64 23 67 10 26 38 40 67 59 54 70 66 18 38 64 70
# 67 26 20 68 02 62 12 20 95 63 94 39 63 08 40 91 66 49 94 21
# 24 55 58 05 66 73 99 26 97 17 78 78 96 83 14 88 34 89 63 72
# 21 36 23 09 75 00 76 44 20 45 35 14 00 61 33 97 34 31 33 95
# 78 17 53 28 22 75 31 67 15 94 03 80 04 62 16 14 09 53 56 92
# 16 39 05 42 96 35 31 47 55 58 88 24 00 17 54 24 36 29 85 57
# 86 56 00 48 35 71 89 07 05 44 44 37 44 60 21 58 51 54 17 58
# 19 80 81 68 05 94 47 69 28 73 92 13 86 52 17 77 04 89 55 40
# 04 52 08 83 97 35 99 16 07 97 57 32 16 26 26 79 33 27 98 66
# 88 36 68 87 57 62 20 72 03 46 33 67 46 55 12 32 63 93 53 69
# 04 42 16 73 38 25 39 11 24 94 72 18 08 46 29 32 40 62 76 36
# 20 69 36 41 72 30 23 88 34 62 99 69 82 67 59 85 74 04 36 16
# 20 73 35 29 78 31 90 01 74 31 49 71 48 86 81 16 23 57 05 54
# 01 70 54 71 83 51 54 69 16 92 33 48 61 43 52 01 89 19 67 48
# The product of these numbers is 26 × 63 × 78 × 14 = 1788696.
# What is the greatest product of four adjacent numbers in the same direction (up, down, left, right, or diagonally) in the 20×20 grid?
grid = """
08 02 22 97 38 15 00 40 00 75 04 05 07 78 52 12 50 77 91 08
49 49 99 40 17 81 18 57 60 87 17 40 98 43 69 48 04 56 62 00
81 49 31 73 55 79 14 29 93 71 40 67 53 88 30 03 49 13 36 65
52 70 95 23 04 60 11 42 69 24 68 56 01 32 56 71 37 02 36 91
22 31 16 71 51 67 63 89 41 92 36 54 22 40 40 28 66 33 13 80
24 47 32 60 99 03 45 02 44 75 33 53 78 36 84 20 35 17 12 50
32 98 81 28 64 23 67 10 26 38 40 67 59 54 70 66 18 38 64 70
67 26 20 68 02 62 12 20 95 63 94 39 63 08 40 91 66 49 94 21
24 55 58 05 66 73 99 26 97 17 78 78 96 83 14 88 34 89 63 72
21 36 23 09 75 00 76 44 20 45 35 14 00 61 33 97 34 31 33 95
78 17 53 28 22 75 31 67 15 94 03 80 04 62 16 14 09 53 56 92
16 39 05 42 96 35 31 47 55 58 88 24 00 17 54 24 36 29 85 57
86 56 00 48 35 71 89 07 05 44 44 37 44 60 21 58 51 54 17 58
19 80 81 68 05 94 47 69 28 73 92 13 86 52 17 77 04 89 55 40
04 52 08 83 97 35 99 16 07 97 57 32 16 26 26 79 33 27 98 66
88 36 68 87 57 62 20 72 03 46 33 67 46 55 12 32 63 93 53 69
04 42 16 73 38 25 39 11 24 94 72 18 08 46 29 32 40 62 76 36
20 69 36 41 72 30 23 88 34 62 99 69 82 67 59 85 74 04 36 16
20 73 35 29 78 31 90 01 74 31 49 71 48 86 81 16 23 57 05 54
01 70 54 71 83 51 54 69 16 92 33 48 61 43 52 01 89 19 67 48
"""
import re
import numpy as np
adjacent_nums = 4
data = [int(i) for i in re.findall("[0-9]+", grid)] # make a list of integers
arr = np.array(data, dtype=np.int32).reshape(20,20) # put in a matrix of 20 by 20
def max_from_row(vec, n):
max_prod = 0
for i in range(len(vec[:-n+1])):
prod = 1
for j in range(n):
prod *= vec[i + j]
# print(prod)
max_prod = max(max_prod, prod)
return max_prod
def max_from_array(arr):
max_prod = 0
for row in arr:
max_prod = max(max_prod, max_from_row(row, adjacent_nums))
return max_prod
def max_from_array_diag(arr):
n = len(arr) - adjacent_nums
max_prod = 0
for i in range(-n,n+1):
# print(arr.diagonal(i))
max_prod = max(max_prod, max_from_row(arr.diagonal(i), adjacent_nums))
return max_prod
matrix_maxes = []
# Left or Right
matrix_maxes.append(max_from_array(arr))
# Up or Down
matrix_maxes.append(max_from_array(np.rot90(arr)))
# Down diagonals
matrix_maxes.append(max_from_array_diag(arr))
# Up diagonals
matrix_maxes.append(max_from_array_diag(np.fliplr(arr)))
# print(matrix_maxes)
print(max(matrix_maxes))
| 39.11
| 135
| 0.661723
| 1,060
| 3,911
| 2.404717
| 0.164151
| 0.032954
| 0.028246
| 0.031385
| 0.77756
| 0.752452
| 0.718713
| 0.681836
| 0.627697
| 0.627697
| 0
| 0.592219
| 0.290207
| 3,911
| 99
| 136
| 39.505051
| 0.324207
| 0.419074
| 0
| 0.115385
| 0
| 0
| 0.540771
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0.057692
| false
| 0
| 0.038462
| 0
| 0.153846
| 0.019231
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c6b527a710ad330e1fb9491a54ac0f7ca624fa63
| 107
|
py
|
Python
|
python/desc/skycatalogs/__init__.py
|
LSSTDESC/skyCatalogs
|
39807b6fb510e45d7db79cf903e2eaa59befa81b
|
[
"BSD-3-Clause"
] | 1
|
2021-12-20T01:51:00.000Z
|
2021-12-20T01:51:00.000Z
|
python/desc/skycatalogs/__init__.py
|
LSSTDESC/skyCatalogs
|
39807b6fb510e45d7db79cf903e2eaa59befa81b
|
[
"BSD-3-Clause"
] | 3
|
2021-11-09T20:20:31.000Z
|
2022-01-20T20:23:21.000Z
|
python/desc/skycatalogs/__init__.py
|
LSSTDESC/skyCatalogs
|
39807b6fb510e45d7db79cf903e2eaa59befa81b
|
[
"BSD-3-Clause"
] | null | null | null |
from ._version import *
from .skyCatalogs import *
from .translate import *
from .catalog_creator import *
| 21.4
| 30
| 0.775701
| 13
| 107
| 6.230769
| 0.538462
| 0.37037
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.149533
| 107
| 4
| 31
| 26.75
| 0.89011
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c6cea6970e48448932656800af59cc6740f23e75
| 26
|
py
|
Python
|
run.py
|
wiky-avis/journeys
|
7fc8a7045495e2ab4c16e7ab04681e8f7b9d14f2
|
[
"MIT"
] | null | null | null |
run.py
|
wiky-avis/journeys
|
7fc8a7045495e2ab4c16e7ab04681e8f7b9d14f2
|
[
"MIT"
] | null | null | null |
run.py
|
wiky-avis/journeys
|
7fc8a7045495e2ab4c16e7ab04681e8f7b9d14f2
|
[
"MIT"
] | null | null | null |
import app
app.app.run()
| 6.5
| 13
| 0.692308
| 5
| 26
| 3.6
| 0.6
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 26
| 3
| 14
| 8.666667
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
c6d82b3e2cea0215b7e2a5b46bde59390dc6a102
| 21,728
|
py
|
Python
|
lib/datasets/loader/offset_loader.py
|
shampooma/openseg.pytorch
|
d1da408a1e870d52c058c359583bc098f7f3d9e2
|
[
"MIT"
] | 1,069
|
2019-01-21T04:32:05.000Z
|
2022-03-30T12:07:36.000Z
|
lib/datasets/loader/offset_loader.py
|
shampooma/openseg.pytorch
|
d1da408a1e870d52c058c359583bc098f7f3d9e2
|
[
"MIT"
] | 88
|
2019-02-13T03:43:09.000Z
|
2022-03-27T08:23:29.000Z
|
lib/datasets/loader/offset_loader.py
|
shampooma/openseg.pytorch
|
d1da408a1e870d52c058c359583bc098f7f3d9e2
|
[
"MIT"
] | 124
|
2019-01-23T01:46:00.000Z
|
2022-03-26T14:07:23.000Z
|
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: JingyiXie, RainbowSecret
## Microsoft Research
## yuyua@microsoft.com
## Copyright (c) 2019
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import cv2
import torch
import numpy as np
import scipy.io as io
from torch.utils import data
from lib.utils.helpers.image_helper import ImageHelper
from lib.extensions.parallel.data_container import DataContainer
from lib.utils.tools.logger import Logger as Log
from lib.utils.helpers.offset_helper import DTOffsetHelper
class DTOffsetLoader(data.Dataset):
"""
Load [image, label, offset, boundary, name]
"""
def __init__(self, root_dir, aug_transform=None, dataset=None,
img_transform=None, label_transform=None, configer=None):
self.configer = configer
self.aug_transform = aug_transform
self.img_transform = img_transform
self.label_transform = label_transform
self.img_list, self.label_list, self.offset_list, self.name_list = self.__list_dirs(root_dir, dataset)
self.root_dir = root_dir
self.dataset = dataset
# check whether or not stack the data
size_mode = self.configer.get(self.dataset, 'data_transformer')['size_mode']
self.is_stack = size_mode != 'diverse_size'
def __len__(self):
return len(self.img_list)
def _load_maps(self, filename, labelmap):
dct = self._load_mat(filename)
distance_map = dct['depth'].astype(np.int32)
dir_deg = dct['dir_deg'].astype(np.float) # in [0, 360 / deg_reduce]
deg_reduce = dct['deg_reduce'][0][0]
dir_deg = deg_reduce * dir_deg - 180 # in [-180, 180]
return distance_map, dir_deg
def load_boundary(self, fn):
if fn.endswith('mat'):
mat = io.loadmat(fn)
if 'depth' in mat:
dist_map, _ = self._load_maps(fn, None)
boundary_map = DTOffsetHelper.distance_to_mask_label(dist_map, np.zeros_like(dist_map)).astype(np.float32)
else:
boundary_map = mat['mat'].transpose(1, 2, 0)
else:
boundary_map = ImageHelper.read_image(fn,
tool=self.configer.get('data', 'image_tool'), mode='P')
boundary_map = boundary_map.astype(np.float32) / 255
return boundary_map
def __getitem__(self, index):
img = ImageHelper.read_image(self.img_list[index],
tool=self.configer.get('data', 'image_tool'),
mode=self.configer.get('data', 'input_mode'))
img_size = ImageHelper.get_size(img)
labelmap = ImageHelper.read_image(self.label_list[index],
tool=self.configer.get('data', 'image_tool'), mode='P')
if self.configer.exists('data', 'label_list'):
labelmap = self._encode_label(labelmap)
distance_map, angle_map = self._load_maps(self.offset_list[index], labelmap)
if self.configer.exists('data', 'reduce_zero_label') and self.configer.get('data', 'reduce_zero_label') == True:
labelmap = self._reduce_zero_label(labelmap)
ori_target = ImageHelper.tonp(labelmap).astype(np.int)
ori_target[ori_target == 255] = -1
ori_distance_map = np.array(distance_map)
ori_angle_map = np.array(angle_map)
if self.aug_transform is not None:
img, labelmap, distance_map, angle_map = self.aug_transform(img, labelmap=labelmap, distance_map=distance_map, angle_map=angle_map)
old_img = img
border_size = ImageHelper.get_size(img)
if self.img_transform is not None:
img = self.img_transform(img)
if self.label_transform is not None:
labelmap = self.label_transform(labelmap)
distance_map = torch.from_numpy(distance_map)
angle_map = torch.from_numpy(angle_map)
if set(self.configer.get('val_trans', 'trans_seq')) & set(['random_crop', 'crop']):
ori_target = labelmap.numpy()
ori_distance_map = distance_map.numpy()
ori_angle_map = angle_map.numpy()
img_size = ori_target.shape[:2][::-1]
meta = dict(
ori_img_size=img_size,
border_size=border_size,
ori_target=ori_target,
ori_distance_map=ori_distance_map,
ori_angle_map=ori_angle_map,
basename=os.path.basename(self.label_list[index])
)
return dict(
img=DataContainer(img, stack=self.is_stack),
labelmap=DataContainer(labelmap, stack=self.is_stack),
distance_map=DataContainer(distance_map, stack=self.is_stack),
angle_map=DataContainer(angle_map, stack=self.is_stack),
meta=DataContainer(meta, stack=False, cpu_only=True),
name=DataContainer(self.name_list[index], stack=False, cpu_only=True),
)
def _load_mat(self, filename):
return io.loadmat(filename)
def _replace_ext(self, filename, ext):
return '.'.join([filename.rpartition('.')[0], ext])
def _reduce_zero_label(self, labelmap):
if not self.configer.get('data', 'reduce_zero_label'):
return labelmap
labelmap = np.array(labelmap)
encoded_labelmap = labelmap - 1
if self.configer.get('data', 'image_tool') == 'pil':
encoded_labelmap = ImageHelper.np2img(encoded_labelmap.astype(np.uint8))
return encoded_labelmap
def _encode_label(self, labelmap):
labelmap = np.array(labelmap)
shape = labelmap.shape
encoded_labelmap = np.ones(shape=(shape[0], shape[1]), dtype=np.float32) * 255
for i in range(len(self.configer.get('data', 'label_list'))):
class_id = self.configer.get('data', 'label_list')[i]
encoded_labelmap[labelmap == class_id] = i
if self.configer.get('data', 'image_tool') == 'pil':
encoded_labelmap = ImageHelper.np2img(encoded_labelmap.astype(np.uint8))
return encoded_labelmap
def __list_dirs(self, root_dir, dataset):
if os.environ.get('use_cityscapes_style'):
if 'GTA5_small' in root_dir:
root_dir = root_dir.replace('GTA5_small', 'GTA5_Cityscapes')
else:
root_dir = root_dir.replace('GTA5', 'GTA5_Cityscapes')
Log.info_once('Using Cityscapes style, switch to {}'.format(root_dir))
else:
Log.info_once('Using default root dir: {}'.format(root_dir))
img_list = list()
label_list = list()
offset_list = list()
name_list = list()
image_subdir = os.environ.get('image_subdir', 'image')
label_subdir = os.environ.get('label_dir', 'label')
Log.info_once('Using label dir: {}'.format(label_subdir))
offset_subdir = os.environ.get('offset_dir', 'dt_offset')
Log.info_once('Using distance transform based offset: {}'.format(offset_subdir))
image_dir = os.path.join(root_dir, dataset, image_subdir)
label_dir = os.path.join(root_dir, dataset, label_subdir)
offset_dir = os.path.join(root_dir, dataset, offset_subdir)
img_extension = os.listdir(image_dir)[0].split('.')[-1]
file_list_txt = os.environ.get('use_file_list')
if file_list_txt is None:
Log.info_once('Using file list: all')
files = sorted(os.listdir(label_dir))
else:
Log.info_once('Using file list: {}'.format(file_list_txt))
with open(os.path.join(root_dir, dataset, 'file_list', file_list_txt)) as f:
files = [x.strip() for x in f]
if os.environ.get('chunk'):
n, i = map(int, os.environ.get('chunk').split('_'))
step = len(files) // n + 4
files = files[step * i: step * (i + 1)]
for file_name in files:
image_name = '.'.join(file_name.split('.')[:-1])
img_path = os.path.join(image_dir, '{}.{}'.format(image_name, img_extension))
label_path = os.path.join(label_dir, file_name)
offset_path = os.path.join(offset_dir, self._replace_ext(file_name, 'mat'))
if not os.path.exists(label_path) or not os.path.exists(img_path):
Log.error('Label Path: {} not exists.'.format(label_path))
continue
img_list.append(img_path)
label_list.append(label_path)
offset_list.append(offset_path)
name_list.append(image_name)
if dataset == 'train' and self.configer.get('data', 'include_val'):
Log.info_once('Include val set for training ...')
image_dir = os.path.join(root_dir, 'val', image_subdir)
label_dir = os.path.join(root_dir, 'val', label_subdir)
offset_dir = os.path.join(root_dir, 'val', offset_subdir)
if file_list_txt is None:
files = sorted(os.listdir(label_dir))
else:
with open(os.path.join(root_dir, 'val', 'file_list', file_list_txt)) as f:
files = [x.strip() for x in f]
for file_name in files:
image_name = '.'.join(file_name.split('.')[:-1])
img_path = os.path.join(image_dir, '{}.{}'.format(image_name, img_extension))
label_path = os.path.join(label_dir, file_name)
offset_path = os.path.join(offset_dir, self._replace_ext(file_name, 'mat'))
if not os.path.exists(label_path) or not os.path.exists(img_path):
Log.error('Label Path: {} not exists.'.format(label_path))
continue
img_list.append(img_path)
label_list.append(label_path)
offset_list.append(offset_path)
name_list.append(image_name)
return img_list, label_list, offset_list, name_list
class SWOffsetLoader(data.Dataset):
def __init__(self, root_dir, aug_transform=None, dataset=None,
img_transform=None, label_transform=None, configer=None):
self.configer = configer
self.aug_transform = aug_transform
self.img_transform = img_transform
self.label_transform = label_transform
self.img_list, self.label_list, self.offset_h_list, self.offset_w_list, self.name_list = self.__list_dirs(root_dir, dataset)
self.root_dir = root_dir
self.dataset = dataset
# check whether or not stack the data
size_mode = self.configer.get(dataset, 'data_transformer')['size_mode']
self.is_stack = size_mode != 'diverse_size'
def __len__(self):
return len(self.img_list)
def __getitem__(self, index):
img = ImageHelper.read_image(self.img_list[index],
tool=self.configer.get('data', 'image_tool'),
mode=self.configer.get('data', 'input_mode'))
img_size = ImageHelper.get_size(img)
labelmap = ImageHelper.read_image(self.label_list[index],
tool=self.configer.get('data', 'image_tool'), mode='P')
offsetmap_h = self._load_mat(self.offset_h_list[index])
offsetmap_w = self._load_mat(self.offset_w_list[index])
if os.environ.get('train_no_offset') and self.dataset == 'train':
offsetmap_h = np.zeros_like(offsetmap_h)
offsetmap_w = np.zeros_like(offsetmap_w)
if self.configer.exists('data', 'label_list'):
labelmap = self._encode_label(labelmap)
if self.configer.exists('data', 'reduce_zero_label') and self.configer.get('data', 'reduce_zero_label') == True:
labelmap = self._reduce_zero_label(labelmap)
# Log.info('use dataset {}'.format(self.configer.get('dataset')))
ori_target = ImageHelper.tonp(labelmap).astype(np.int)
ori_target[ori_target == 255] = -1
ori_offset_h = np.array(offsetmap_h)
ori_offset_w = np.array(offsetmap_w)
if self.aug_transform is not None:
img, labelmap, offsetmap_h, offsetmap_w = self.aug_transform(img, labelmap=labelmap, offset_h_map=offsetmap_h, offset_w_map=offsetmap_w)
border_size = ImageHelper.get_size(img)
if self.img_transform is not None:
img = self.img_transform(img)
if self.label_transform is not None:
labelmap = self.label_transform(labelmap)
offsetmap_h = torch.from_numpy(np.array(offsetmap_h)).long()
offsetmap_w = torch.from_numpy(np.array(offsetmap_w)).long()
meta = dict(
ori_img_size=img_size,
border_size=border_size,
ori_target=ori_target,
ori_offset_h=ori_offset_h,
ori_offset_w=ori_offset_w,
)
return dict(
img=DataContainer(img, stack=self.is_stack),
labelmap=DataContainer(labelmap, stack=self.is_stack),
offsetmap_h=DataContainer(offsetmap_h, stack=self.is_stack),
offsetmap_w=DataContainer(offsetmap_w, stack=self.is_stack),
meta=DataContainer(meta, stack=False, cpu_only=True),
name=DataContainer(self.name_list[index], stack=False, cpu_only=True),
)
def _load_mat(self, filename):
return io.loadmat(filename)['mat']
def _replace_ext(self, filename, ext):
return '.'.join([filename.rpartition('.')[0], ext])
def _reduce_zero_label(self, labelmap):
if not self.configer.get('data', 'reduce_zero_label'):
return labelmap
labelmap = np.array(labelmap)
encoded_labelmap = labelmap - 1
if self.configer.get('data', 'image_tool') == 'pil':
encoded_labelmap = ImageHelper.np2img(encoded_labelmap.astype(np.uint8))
return encoded_labelmap
def _encode_label(self, labelmap):
labelmap = np.array(labelmap)
shape = labelmap.shape
encoded_labelmap = np.ones(shape=(shape[0], shape[1]), dtype=np.float32) * 255
for i in range(len(self.configer.get('data', 'label_list'))):
class_id = self.configer.get('data', 'label_list')[i]
encoded_labelmap[labelmap == class_id] = i
if self.configer.get('data', 'image_tool') == 'pil':
encoded_labelmap = ImageHelper.np2img(encoded_labelmap.astype(np.uint8))
return encoded_labelmap
def __list_dirs(self, root_dir, dataset):
img_list = list()
label_list = list()
offset_h_list = list()
offset_w_list = list()
name_list = list()
image_dir = os.path.join(root_dir, dataset, 'image')
label_dir = os.path.join(root_dir, dataset, 'label')
offset_h_dir = None
offset_w_dir = None
subdir = os.environ.get('offset_dir')
if subdir is not None:
Log.info_once('Using offset dir: {}'.format(subdir))
offset_h_dir = os.path.join(root_dir, dataset, subdir, 'h')
offset_w_dir = os.path.join(root_dir, dataset, subdir, 'w')
else:
offset_type = self.configer.get('data', 'offset_type')
assert(offset_type is not None)
offset_h_dir = os.path.join(root_dir, dataset, offset_type, 'h')
offset_w_dir = os.path.join(root_dir, dataset, offset_type, 'w')
img_extension = os.listdir(image_dir)[0].split('.')[-1]
for file_name in os.listdir(label_dir):
image_name = '.'.join(file_name.split('.')[:-1])
img_path = os.path.join(image_dir, '{}.{}'.format(image_name, img_extension))
label_path = os.path.join(label_dir, file_name)
offset_h_path = os.path.join(offset_h_dir, self._replace_ext(file_name, 'mat'))
offset_w_path = os.path.join(offset_w_dir, self._replace_ext(file_name, 'mat'))
if not os.path.exists(label_path) or not os.path.exists(img_path):
Log.error('Label Path: {} not exists.'.format(label_path))
continue
img_list.append(img_path)
label_list.append(label_path)
offset_h_list.append(offset_h_path)
offset_w_list.append(offset_w_path)
name_list.append(image_name)
if dataset == 'train' and self.configer.get('data', 'include_val'):
image_dir = os.path.join(root_dir, 'val/image')
label_dir = os.path.join(root_dir, 'val/label')
subdir = os.environ.get('offset_dir')
if subdir is not None:
Log.info_once('Using offset dir: {}'.format(subdir))
offset_h_dir = os.path.join(root_dir, 'val', subdir, 'h')
offset_w_dir = os.path.join(root_dir, 'val', subdir, 'w')
else:
offset_type = self.configer.get('data', 'offset_type')
assert(offset_type is not None)
offset_h_dir = os.path.join(root_dir, 'val', offset_type, 'h')
offset_w_dir = os.path.join(root_dir, 'val', offset_type, 'w')
for file_name in os.listdir(label_dir):
image_name = '.'.join(file_name.split('.')[:-1])
img_path = os.path.join(image_dir, '{}.{}'.format(image_name, img_extension))
label_path = os.path.join(label_dir, file_name)
offset_h_path = os.path.join(offset_h_dir, self._replace_ext(file_name, 'mat'))
offset_w_path = os.path.join(offset_w_dir, self._replace_ext(file_name, 'mat'))
if not os.path.exists(label_path) or not os.path.exists(img_path):
Log.error('Label Path: {} not exists.'.format(label_path))
continue
img_list.append(img_path)
label_list.append(label_path)
offset_h_list.append(offset_h_path)
offset_w_list.append(offset_w_path)
name_list.append(image_name)
return img_list, label_list, offset_h_list, offset_w_list, name_list
class SWOffsetTestLoader(data.Dataset):
def __init__(self, root_dir, dataset='val', img_transform=None, configer=None):
self.configer = configer
self.img_transform = img_transform
self.img_list, self.offset_h_list, self.offset_w_list, self.name_list = self.__list_dirs(root_dir, dataset)
size_mode = self.configer.get(dataset, 'data_transformer')['size_mode']
self.is_stack = (size_mode != 'diverse_size')
def __len__(self):
return len(self.img_list)
def __getitem__(self, index):
img = ImageHelper.read_image(self.img_list[index],
tool=self.configer.get('data', 'image_tool'),
mode=self.configer.get('data', 'input_mode'))
offsetmap_h = self._load_mat(self.offset_h_list[index])
offsetmap_w = self._load_mat(self.offset_w_list[index])
img_size = ImageHelper.get_size(img)
if self.img_transform is not None:
img = self.img_transform(img)
meta = dict(
ori_img_size=img_size,
border_size=img_size,
)
return dict(
img=DataContainer(img, stack=self.is_stack),
offsetmap_h=DataContainer(offsetmap_h, stack=self.is_stack),
offsetmap_w=DataContainer(offsetmap_w, stack=self.is_stack),
meta=DataContainer(meta, stack=False, cpu_only=True),
name=DataContainer(self.name_list[index], stack=False, cpu_only=True),
)
def _load_mat(self, filename):
return io.loadmat(filename)['mat']
def _replace_ext(self, filename, ext):
return '.'.join([filename.rpartition('.')[0], ext])
def __list_dirs(self, root_dir, dataset):
img_list = list()
offset_h_list = list()
offset_w_list = list()
name_list = list()
image_dir = os.path.join(root_dir, dataset, 'image')
offset_h_dir = None
offset_w_dir = None
offset_type = self.configer.get('data', 'offset_type')
assert(offset_type is not None)
offset_h_dir = os.path.join(root_dir, dataset, offset_type, 'h')
offset_w_dir = os.path.join(root_dir, dataset, offset_type, 'w')
img_extension = os.listdir(image_dir)[0].split('.')[-1]
for file_name in os.listdir(label_dir):
image_name = '.'.join(file_name.split('.')[:-1])
img_path = os.path.join(image_dir, '{}.{}'.format(image_name, img_extension))
offset_h_path = os.path.join(offset_h_dir, self._replace_ext(file_name, 'mat'))
offset_w_path = os.path.join(offset_w_dir, self._replace_ext(file_name, 'mat'))
if not os.path.exists(label_path) or not os.path.exists(img_path):
Log.error('Label Path: {} not exists.'.format(label_path))
continue
img_list.append(img_path)
offset_h_list.append(offset_h_path)
offset_w_list.append(offset_w_path)
name_list.append(image_name)
return img_list, offset_h_list, offset_w_list, name_list
def load_mat(filename):
return io.loadmat(filename)['mat']
def replace_ext(filename, ext):
return '.'.join([filename.rpartition('.')[0], ext])
if __name__ == "__main__":
pass
| 42.856016
| 148
| 0.615565
| 2,812
| 21,728
| 4.467283
| 0.076102
| 0.024359
| 0.031842
| 0.039325
| 0.802977
| 0.793504
| 0.759752
| 0.744706
| 0.731173
| 0.684684
| 0
| 0.005481
| 0.261046
| 21,728
| 507
| 149
| 42.856016
| 0.776906
| 0.027016
| 0
| 0.699746
| 0
| 0
| 0.066101
| 0
| 0
| 0
| 0
| 0
| 0.007634
| 1
| 0.066158
| false
| 0.002545
| 0.033079
| 0.02799
| 0.170483
| 0.002545
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
af16340c1ed1ba8ab8685932d8db0fa3c3847f4a
| 47
|
py
|
Python
|
rydprop/hohi/__init__.py
|
jdrtommey/rydprops
|
cdc7e14d61ff33929844ee5d779a18fd64f89f4f
|
[
"MIT"
] | null | null | null |
rydprop/hohi/__init__.py
|
jdrtommey/rydprops
|
cdc7e14d61ff33929844ee5d779a18fd64f89f4f
|
[
"MIT"
] | null | null | null |
rydprop/hohi/__init__.py
|
jdrtommey/rydprops
|
cdc7e14d61ff33929844ee5d779a18fd64f89f4f
|
[
"MIT"
] | null | null | null |
from .adiabatic_solver import Adiabatic_solver
| 23.5
| 46
| 0.893617
| 6
| 47
| 6.666667
| 0.666667
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085106
| 47
| 1
| 47
| 47
| 0.930233
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
af771e76560ad0e368223c5eacd2d55b98e4ba77
| 23
|
py
|
Python
|
pyunirpc/__init__.py
|
aivclab/pyunirpc
|
1ff8da1a6a8aef0df1aa5486bdf471851ecb4647
|
[
"MIT"
] | 2
|
2020-11-17T07:43:47.000Z
|
2020-11-17T08:27:27.000Z
|
pyunirpc/__init__.py
|
aivclab/pyunirpc
|
1ff8da1a6a8aef0df1aa5486bdf471851ecb4647
|
[
"MIT"
] | null | null | null |
pyunirpc/__init__.py
|
aivclab/pyunirpc
|
1ff8da1a6a8aef0df1aa5486bdf471851ecb4647
|
[
"MIT"
] | null | null | null |
from .pyunirpc import *
| 23
| 23
| 0.782609
| 3
| 23
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130435
| 23
| 1
| 23
| 23
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
afa5da4c2cc0e58a1ddcb101bee077040b12234b
| 54
|
py
|
Python
|
textsemantics/__init__.py
|
PrimozGodec/text-semantics
|
194b0bce7adcc8937a30643959681f0b175927ab
|
[
"MIT"
] | 11
|
2021-01-27T07:43:33.000Z
|
2021-12-18T11:58:00.000Z
|
textsemantics/__init__.py
|
PrimozGodec/text-semantics
|
194b0bce7adcc8937a30643959681f0b175927ab
|
[
"MIT"
] | 32
|
2020-11-24T12:42:46.000Z
|
2021-12-06T12:01:22.000Z
|
textsemantics/__init__.py
|
PrimozGodec/text-semantics
|
194b0bce7adcc8937a30643959681f0b175927ab
|
[
"MIT"
] | 3
|
2020-11-10T15:29:16.000Z
|
2020-11-28T11:42:52.000Z
|
from .ontology_api import *
from .server_api import *
| 18
| 27
| 0.777778
| 8
| 54
| 5
| 0.625
| 0.45
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 54
| 2
| 28
| 27
| 0.869565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
afc6af331b84f10911f8624f50bda8a9f290eb70
| 6,737
|
py
|
Python
|
morse-stf/unittest/test_selectshare.py
|
alipay/Antchain-MPC
|
f6916465e1da5722ca7efadc4eeaca13ec229707
|
[
"Apache-2.0"
] | 33
|
2021-11-23T09:04:03.000Z
|
2022-03-14T07:56:31.000Z
|
morse-stf/unittest/test_selectshare.py
|
qizhi-zhang/Antchain-MPC
|
f551170f68b0baff328e6594484e9832230fe719
|
[
"Apache-2.0"
] | null | null | null |
morse-stf/unittest/test_selectshare.py
|
qizhi-zhang/Antchain-MPC
|
f551170f68b0baff328e6594484e9832230fe719
|
[
"Apache-2.0"
] | 6
|
2021-11-25T12:38:41.000Z
|
2022-02-23T03:29:51.000Z
|
import unittest
import numpy as np
from stensorflow.basic.basic_class.pair import SharedTensorBase,SharedPairBase
from stensorflow.basic.basic_class.private import PrivateTensor
import tensorflow as tf
from stensorflow.basic.operator.selectshare import native_select, select_share
from stensorflow.global_var import StfConfig
from stensorflow.random.random import random_init
from stensorflow.engine.start_server import start_local_server
import os
start_local_server(os.path.join(os.environ.get("stf_home", ".."), "conf", "config.json"))
class MyTestCase(unittest.TestCase):
def setUp(self):
self.sess = tf.compat.v1.Session("grpc://0.0.0.0:8887")
def tearDown(self):
self.sess.close()
def test_native_select(self):
with tf.device(StfConfig.workerL[0]):
tL = np.random.randint(low=0, high=2, size=[32,8])
tL = tf.constant(tL)
tL = SharedTensorBase(inner_value=tL, module=2)
xL = np.random.randint(low=-(1<<62), high=(1<<62), size=[32,8])
xL = tf.constant(xL, dtype='int64')
xL = SharedTensorBase(inner_value=xL)
with tf.device(StfConfig.workerR[0]):
tR = np.random.randint(low=0, high=2, size=[32,8])
tR = tf.constant(tR)
tR = SharedTensorBase(inner_value=tR, module=2)
xR = np.random.randint(low=-(1<<62), high=(1<<62), size=[32,8])
xR = tf.constant(xR, dtype='int64')
xR = SharedTensorBase(inner_value=xR)
t = SharedPairBase(ownerL=StfConfig.workerL[0], ownerR=StfConfig.workerR[0], xL=tL, xR=tR, fixedpoint=0)
x = SharedPairBase(ownerL=StfConfig.workerL[0], ownerR=StfConfig.workerR[0], xL=xL, xR=xR, fixedpoint=0)
tx = native_select(t, x, prf_flag=True, compress_flag=True)
z = tx.to_tf_tensor("R")-t.to_tf_tensor("R")*x.to_tf_tensor("R")
self.sess.run(random_init())
self.assertEqual(np.count_nonzero(self.sess.run(z)), 0)
def test_select_using_pm1_act(self):
with tf.device(StfConfig.workerL[0]):
tL = np.random.randint(low=0, high=2, size=[32,8])
tL = tf.constant(tL)
tL = SharedTensorBase(inner_value=tL, module=2)
xL = np.random.randint(low=-(1<<62), high=(1<<62), size=[32,8])
xL = tf.constant(xL, dtype='int64')
xL = SharedTensorBase(inner_value=xL)
with tf.device(StfConfig.workerR[0]):
tR = np.random.randint(low=0, high=2, size=[32,8])
tR = tf.constant(tR)
tR = SharedTensorBase(inner_value=tR, module=2)
xR = np.random.randint(low=-(1<<62), high=(1<<62), size=[32,8])
xR = tf.constant(xR, dtype='int64')
xR = SharedTensorBase(inner_value=xR)
t = SharedPairBase(ownerL=StfConfig.workerL[0], ownerR=StfConfig.workerR[0], xL=tL, xR=tR, fixedpoint=0)
x = SharedPairBase(ownerL=StfConfig.workerL[0], ownerR=StfConfig.workerR[0], xL=xL, xR=xR, fixedpoint=0)
tx = select_share(t, x, prf_flag=True, compress_flag=True)
z = tx.to_tf_tensor("R")-t.to_tf_tensor("R")*x.to_tf_tensor("R")
self.sess.run(random_init())
self.assertEqual(np.count_nonzero(self.sess.run(z)), 0)
def test_select_Private_Private_share(self):
with tf.device(StfConfig.workerL[0]):
tL = np.random.randint(low=0, high=2, size=[32,8])
tL = tf.constant(tL)
tL = PrivateTensor(inner_value=tL, module=2, fixedpoint=0, owner=StfConfig.workerL[0])
with tf.device(StfConfig.workerR[0]):
xR = np.random.randint(low=-(1<<62), high=(1<<62), size=[32,8])
xR = tf.constant(xR, dtype='int64')
xR = PrivateTensor(inner_value=xR, owner=StfConfig.workerR[0])
tx = select_share(tL, xR, prf_flag=True, compress_flag=True, )
z = tx.to_tf_tensor("R")-tL.to_tf_tensor("R")*xR.to_tf_tensor("R")
self.sess.run(random_init())
self.assertEqual(np.count_nonzero(self.sess.run(z)), 0)
def test_select_Private_SharedPair_share(self):
with tf.device(StfConfig.workerL[0]):
tL = np.random.randint(low=0, high=2, size=[32,8])
tL = tf.constant(tL)
tL = PrivateTensor(inner_value=tL, module=2, fixedpoint=0, owner=StfConfig.workerL[0])
xL = np.random.randint(low=-(1<<62), high=(1<<62), size=[32,8])
xL = tf.constant(xL, dtype='int64')
xL = SharedTensorBase(inner_value=xL)
with tf.device(StfConfig.workerR[0]):
# tR = np.random.randint(low=0, high=2, size=[32,8])
# tR = tf.constant(tR)
# tR = SharedTensorBase(inner_value=tR, module=2)
xR = np.random.randint(low=-(1<<62), high=(1<<62), size=[32,8])
xR = tf.constant(xR, dtype='int64')
xR = SharedTensorBase(inner_value=xR)
# t = SharedPairBase(ownerL=StfConfig.workerL[0], ownerR=StfConfig.workerR[0], xL=tL, xR=tR, fixedpoint=0)
x = SharedPairBase(ownerL=StfConfig.workerL[0], ownerR=StfConfig.workerR[0], xL=xL, xR=xR, fixedpoint=0)
tx = select_share(tL, x, prf_flag=True, compress_flag=True, )
z = tx.to_tf_tensor("R")-tL.to_tf_tensor("R")*x.to_tf_tensor("R")
self.sess.run(random_init())
self.assertEqual(np.count_nonzero(self.sess.run(z)), 0)
def test_select_SharedPair_Private(self):
with tf.device(StfConfig.workerL[0]):
tL = np.random.randint(low=0, high=2, size=[32,8])
tL = tf.constant(tL)
tL = SharedTensorBase(inner_value=tL, module=2)
# xL = np.random.randint(low=-(1<<62), high=(1<<62), size=[32,8])
# xL = tf.constant(xL, dtype='int64')
# xL = SharedTensorBase(inner_value=xL)
with tf.device(StfConfig.workerR[0]):
tR = np.random.randint(low=0, high=2, size=[32,8])
tR = tf.constant(tR)
tR = SharedTensorBase(inner_value=tR, module=2)
xR = np.random.randint(low=-(1<<62), high=(1<<62), size=[32,8])
xR = tf.constant(xR, dtype='int64')
xR = PrivateTensor(inner_value=xR, fixedpoint=0, owner=StfConfig.workerR[0])
t = SharedPairBase(ownerL=StfConfig.workerL[0], ownerR=StfConfig.workerR[0], xL=tL, xR=tR, fixedpoint=0)
#x = SharedPairBase(ownerL=StfConfig.workerL[0], ownerR=StfConfig.workerR[0], xL=xL, xR=xR, fixedpoint=0)
tx = select_share(t, xR, prf_flag=False, compress_flag=False)
z = tx.to_tf_tensor("R")-t.to_tf_tensor("R")*xR.to_tf_tensor("R")
self.sess.run(random_init())
self.assertEqual(np.count_nonzero(self.sess.run(z)), 0)
if __name__ == '__main__':
unittest.main()
| 43.746753
| 114
| 0.621345
| 971
| 6,737
| 4.197734
| 0.108136
| 0.035329
| 0.066241
| 0.07949
| 0.833415
| 0.814279
| 0.807164
| 0.807164
| 0.807164
| 0.807164
| 0
| 0.039757
| 0.219682
| 6,737
| 153
| 115
| 44.03268
| 0.735591
| 0.06917
| 0
| 0.691589
| 0
| 0
| 0.017087
| 0
| 0
| 0
| 0
| 0
| 0.046729
| 1
| 0.065421
| false
| 0
| 0.093458
| 0
| 0.168224
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
bb96f0d6f73a39ea52fd9a3e0782e379f8770d43
| 44
|
py
|
Python
|
je_editor/ui/ui_event/execute/__init__.py
|
JE-Chen/je_editor
|
2f18dedb6f0eb27c38668dc53f520739c8d5c6c6
|
[
"MIT"
] | 1
|
2021-12-10T14:57:15.000Z
|
2021-12-10T14:57:15.000Z
|
je_editor/ui/ui_event/execute/__init__.py
|
JE-Chen/je_editor
|
2f18dedb6f0eb27c38668dc53f520739c8d5c6c6
|
[
"MIT"
] | null | null | null |
je_editor/ui/ui_event/execute/__init__.py
|
JE-Chen/je_editor
|
2f18dedb6f0eb27c38668dc53f520739c8d5c6c6
|
[
"MIT"
] | null | null | null |
from je_editor.ui.ui_event.execute import *
| 22
| 43
| 0.818182
| 8
| 44
| 4.25
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 44
| 1
| 44
| 44
| 0.85
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
bbbf16874c8477aaa46c3a15a2c21672320619b7
| 3,481
|
py
|
Python
|
src/bitcoin_tag/models.py
|
RosaSineSpinis/twitter_bitcon_tag_analyser
|
3311022b6fd629ce85f0c4fa0516e310bed05d74
|
[
"bzip2-1.0.6"
] | null | null | null |
src/bitcoin_tag/models.py
|
RosaSineSpinis/twitter_bitcon_tag_analyser
|
3311022b6fd629ce85f0c4fa0516e310bed05d74
|
[
"bzip2-1.0.6"
] | null | null | null |
src/bitcoin_tag/models.py
|
RosaSineSpinis/twitter_bitcon_tag_analyser
|
3311022b6fd629ce85f0c4fa0516e310bed05d74
|
[
"bzip2-1.0.6"
] | null | null | null |
from django.db import models
# Create your models here.
from picklefield.fields import PickledObjectField
from django.utils import timezone
# class YearModel(models.Model):
# tag_dictionary = PickledObjectField()
# tag_date = models.DateField(auto_now_add=False) # date of save
# tag_time = models.TimeField(auto_now_add=False) # time of save
# beginning_datetime = models.DateTimeField(blank=False) # comes from MonthModel
# ending_datetime = models.DateTimeField(blank=False) # comes from MonthModel
#
# def __str__(self):
# return f'{self.tag_date} {self.tag_time}'
def default_semantic_analysis_dict():
return {0: 0, 1: 0, -1: 0}
class MonthModel(models.Model):
tag_dictionary = PickledObjectField()
semantic_analysis = PickledObjectField(default=default_semantic_analysis_dict)
tag_date = models.DateField(default=timezone.now) # date of save
tag_time = models.TimeField(default=timezone.now) # time of save
tag_datetime = models.DateTimeField(default=timezone.now) #, blank=True, null=True)
beginning_datetime = models.DateTimeField(blank=False) # comes from HourModel the earliest object
ending_datetime = models.DateTimeField(blank=False) # comes from HourModel the latest object
def __str__(self):
return f'{self.tag_date} {self.tag_time}'
class DayModel(models.Model):
tag_dictionary = PickledObjectField()
semantic_analysis = PickledObjectField(default=default_semantic_analysis_dict)
tag_date = models.DateField(auto_now_add=True) # date of save
tag_time = models.TimeField(auto_now_add=True) # time of save
tag_datetime = models.DateTimeField(default=False) #, blank=True, null=True)
beginning_datetime = models.DateTimeField(blank=False, null=True) # comes from HourModel the earliest object
ending_datetime = models.DateTimeField(blank=False, null=True) # comes from HourModel the latest object
def __str__(self):
return f'{self.tag_date} {self.tag_time}'
class HourModel(models.Model):
tag_dictionary = PickledObjectField() # dictionary of tags --> {#tagname: number}
semantic_analysis = PickledObjectField(default=default_semantic_analysis_dict)
tag_date = models.DateField(auto_now_add=False) # date of tag save
tag_time = models.TimeField(auto_now_add=False) # time of tag save
tag_datetime = models.DateTimeField(auto_now_add=False)
def __str__(self):
return f'{self.tag_date} {self.tag_time}'
# class MinutesModel(models.Model):
# tag_dictionary = PickledObjectField()
# tag_date = models.DateField(auto_now=False, auto_now_add=False, blank=False)
# tag_time = models.TimeField(auto_now=False, auto_now_add=False)
#
# def __str__(self):
# return f'{self.tag_date} {self.tag_time}'
class Test(models.Model):
user_name = models.TextField(max_length=200, default="aaa")
user_surname = models.CharField(max_length=200, default="missing")
def __str__(self):
return self.user_name, self.user_surname
# class DayModel(models.Model):
# dictionary = models.CharField(max_length=30)
# date = models.DateField(auto_now=False, auto_now_add=False)
# time = models.TimeField(auto_now=False, auto_now_add=False)
#
#
# class MonthModel(models.Model):
# dictionary = models.CharField(max_length=30)
# date = models.DateField(auto_now=False, auto_now_add=False)
# time = models.TimeField(auto_now=False, auto_now_add=False)
| 40.011494
| 113
| 0.738294
| 453
| 3,481
| 5.423841
| 0.154525
| 0.054131
| 0.05291
| 0.067155
| 0.79243
| 0.761498
| 0.760277
| 0.747253
| 0.657306
| 0.657306
| 0
| 0.005461
| 0.158288
| 3,481
| 86
| 114
| 40.476744
| 0.833106
| 0.435507
| 0
| 0.342105
| 0
| 0
| 0.053674
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.131579
| false
| 0
| 0.078947
| 0.131579
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
bbc7ecfc8c29f021d9863e8b84c09584d8b3a62f
| 33
|
py
|
Python
|
Aula Python/Aula 08 ex3.py
|
ayresmajor/Curso-python
|
006229cec38ea365bf43b19e3ce93fbd32e1dca6
|
[
"MIT"
] | null | null | null |
Aula Python/Aula 08 ex3.py
|
ayresmajor/Curso-python
|
006229cec38ea365bf43b19e3ce93fbd32e1dca6
|
[
"MIT"
] | null | null | null |
Aula Python/Aula 08 ex3.py
|
ayresmajor/Curso-python
|
006229cec38ea365bf43b19e3ce93fbd32e1dca6
|
[
"MIT"
] | null | null | null |
import emoji
print(emoji.emojize)
| 16.5
| 20
| 0.848485
| 5
| 33
| 5.6
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.060606
| 33
| 2
| 20
| 16.5
| 0.903226
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
a55bfa8c321db2b74fa1d8ee007eff64362b8ac7
| 40,245
|
py
|
Python
|
tests/macro_liquidMG_UOZrFe.py
|
niamorelreillet/openiec_with_OC
|
9e027c7052ca98398bf09758bc05b3daf1aba151
|
[
"MIT"
] | null | null | null |
tests/macro_liquidMG_UOZrFe.py
|
niamorelreillet/openiec_with_OC
|
9e027c7052ca98398bf09758bc05b3daf1aba151
|
[
"MIT"
] | null | null | null |
tests/macro_liquidMG_UOZrFe.py
|
niamorelreillet/openiec_with_OC
|
9e027c7052ca98398bf09758bc05b3daf1aba151
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
import matplotlib as mpl
from cycler import cycler
import os
from openiec.property.coherentenergy_OC import CoherentGibbsEnergy_OC
from openiec.calculate.calcsigma_OC import SigmaCoherent_OC2
from pyOC import opencalphad as oc
from pyOC import GridMinimizerStatus as gmStat
from scipy.optimize import minimize, Bounds, LinearConstraint, NonlinearConstraint, BFGS
from functools import partial
# mass density laws (from Barrachin2004)
constituentDensityLaws = {
'U1': lambda T: 17270.0 - 1.358 * (T - 1408),
'ZR1': lambda T: 6844.51 - 0.609898 * T + 2.05008E-4 * T ** 2 - 4.47829E-8 * T ** 3 + 3.26469E-12 * T ** 4,
'O2U1': lambda T: 8860.0 - 9.285E-1 * (T - 3120),
'O2ZR1': lambda T: 5150 - 0.445 * (T - 2983),
'FE1': lambda T: 7030 - 0.88 * (T - 1808),
'NI1': lambda T: 7900 - 1.19 * (T - 1728),
'CR1': lambda T: 6290 - 0.72 * (T - 2178),
'O1': lambda T: 1.141, # set to meaningless value but ok as, no 'free' oxygen in the considered mixtures
'FE1O1': lambda T: 7030 - 0.88 * (T - 1808),
# set to Fe value but ok as, almost no such component in the considered mixtures
'FE1O1_5': lambda T: 7030 - 0.88 * (T - 1808),
# set to Fe value but ok as, almost no such component in the considered mixtures
}
constituentDensityLaws['U'] = constituentDensityLaws['U1']
constituentDensityLaws['ZR'] = constituentDensityLaws['ZR1']
constituentDensityLaws['O'] = constituentDensityLaws['O1']
constituentDensityLaws['FE'] = constituentDensityLaws['FE1']
constituentDensityLaws['NI'] = constituentDensityLaws['NI1']
constituentDensityLaws['CR'] = constituentDensityLaws['CR1']
def constituentToEndmembersConverter(constituentMolarFractions, constituentsDescription):
endmemberMolarFractions = {
'U1' : constituentMolarFractions['sublattice 0']['U+4']*constituentMolarFractions['sublattice 1']['VA'],
'O2U1' : constituentMolarFractions['sublattice 0']['U+4']*constituentMolarFractions['sublattice 1']['O-2'],
'O1' : constituentMolarFractions['sublattice 1']['O'],
'ZR1' : constituentMolarFractions['sublattice 0']['ZR+4']*constituentMolarFractions['sublattice 1']['VA'],
'FE1' : constituentMolarFractions['sublattice 0']['FE+2']*constituentMolarFractions['sublattice 1']['VA'],
'O2ZR1' : constituentMolarFractions['sublattice 0']['ZR+4']*constituentMolarFractions['sublattice 1']['O-2'],
'FE1O1' : constituentMolarFractions['sublattice 0']['FE+2']*constituentMolarFractions['sublattice 1']['O-2'],
'FE1O1_5' : constituentMolarFractions['sublattice 1']['FEO3/2'],
}
endmemberMolarMasses = {
'U1' : constituentsDescription['U+4']['mass'],
'O1' : constituentsDescription['O']['mass'],
'O2U1' : constituentsDescription['U+4']['mass']+2.0*constituentsDescription['O']['mass'],
'ZR1' : constituentsDescription['ZR+4']['mass'],
'FE1' : constituentsDescription['FE+2']['mass'],
'O2ZR1' : constituentsDescription['ZR+4']['mass']+2.0*constituentsDescription['O']['mass'],
'FE1O1' : constituentsDescription['FE+2']['mass']+1.0*constituentsDescription['O']['mass'],
'FE1O1_5' : constituentsDescription['FE+2']['mass']+1.5*constituentsDescription['O']['mass'],
}
endMemberMassFractions = {k : endmemberMolarFractions[k]*endmemberMolarMasses[k] for k in endmemberMolarFractions}
factor=1.0/sum(endMemberMassFractions.values())
for k in endMemberMassFractions:
endMemberMassFractions[k] = endMemberMassFractions[k]*factor
return endMemberMassFractions
def ComputeEquilibriumWithConstraints(objectfunction, x0, bulkX, method="trust-constr", tol=1e-16):
print("********************")
print("starting point: ", x0)
print("objective function value at starting point: ", objectfunction(x0))
print("excluded points (bulk composition): ", bulkX)
print(bulkX)
linearConstraint = LinearConstraint([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [1.0, 1.0, 1.0]], [0.0, 0.0, 0.0, 0.0], [1.0, 1.0, 1.0, 1.0], keep_feasible=True)
n = len(bulkX)
def cons_f(x):
f0=np.sqrt((x[0]-bulkX[0][0])**2+(x[1]-bulkX[1][0])**2+(x[2]-bulkX[2][0])**2)
f1=np.sqrt((x[0]-bulkX[0][1])**2+(x[1]-bulkX[1][1])**2+(x[2]-bulkX[2][1])**2)
return [f0, f1]
def cons_J(x):
f = cons_f(x)
return [ [(x[0]-bulkX[0][0])/f[0], (x[1]-bulkX[1][0])/f[0], (x[2]-bulkX[2][0])/f[0]],
[(x[0]-bulkX[0][1])/f[1], (x[1]-bulkX[1][1])/f[1], (x[2]-bulkX[2][1])/f[1]] ]
def cons_H(x, v):
f = cons_f(x)
a11 = 1/f[0]-(x[0]-bulkX[0][0])**2/f[0]**3
a12 = -(x[0]-bulkX[0][0])*(x[1]-bulkX[1][0])/f[0]**3
a13 = -(x[0]-bulkX[0][0])*(x[2]-bulkX[2][0])/f[0]**3
a22 = 1/f[0]-(x[1]-bulkX[1][0])**2/f[0]**3
a23 = -(x[1]-bulkX[1][0])*(x[2]-bulkX[2][0])/f[0]**3
a33 = 1/f[0]-(x[2]-bulkX[2][0])**2/f[0]**3
b11 = 1/f[1]-(x[0]-bulkX[0][1])**2/f[1]**3
b12 = -(x[0]-bulkX[0][1])*(x[1]-bulkX[1][1])/f[1]**3
b13 = -(x[0]-bulkX[0][1])*(x[2]-bulkX[2][1])/f[1]**3
b22 = 1/f[1]-(x[1]-bulkX[1][1])**2/f[1]**3
b23 = -(x[1]-bulkX[1][1])*(x[2]-bulkX[2][1])/f[1]**3
b33 = 1/f[1]-(x[2]-bulkX[2][1])**2/f[1]**3
return v[0]*np.array([[a11, a12, a13], [a12, a22, a23], [a13, a23, a33]]) + v[1]*np.array([[b11, b12, b13], [b12, b22, b23], [b13, b23, b33]])
nonlinearConstraint = NonlinearConstraint(cons_f, 1E-6, np.inf, jac=cons_J, hess=cons_H, keep_feasible=True)
res = minimize(objectfunction, x0, method=method, constraints=[linearConstraint, nonlinearConstraint],
options={'xtol': tol, 'gtol': tol, 'maxiter': 3000, 'initial_constr_penalty': 0.5, 'verbose': 1})
print(res.x)
print(res.fun)
#if (res.fun>1E-2):
# raise ValueError('misconvergence!')
print("********************")
return res.x
def run():
print('### test U-O-Zr-Fe coherent interface in the liquid miscibility gap ###\n')
# tdb filepath
tdbFile = os.environ['TDBDATA_PRIVATE']+'/NUCLEA-19_1_mod.TDB'
# tdbFile='tests/TAF_uzrofe_V10.TDB'
# components
comps = ['O', 'U', 'ZR', 'FE']
# phase names
phasenames = ['LIQUID', 'LIQUID']
# pressure
P = 1E5
# Given initial alloy composition. x0 is the mole fractions of U, Zr, Fe.
# # RU/Zr=0.60 CZr=0.3 xSteel=0.1
# x0 = [0.1550142, 0.2583569, 0.1215864]
# RU/Zr=1.20 CZr=0.50 xSteel=0.10
x0 = [1.883189e-01, 1.569325e-01, 1.211783e-01]
# Composition step for searching initial interfacial equilibrium composition.
#dx = 0.5
# Convergence criterion for loop on interfacial composition
epsilonX = 1E-5
# temperature range
Tmin = 2900.0
Tmax = 4200.0
Trange = np.linspace(Tmin, Tmax, num=11, endpoint=True)
results = pd.DataFrame(columns=['temperature', 'n_phase1', 'n_phase2', 'xU_phase1', 'xU_phase2','xZr_phase1', 'xZr_phase2', 'xFe_phase1', 'xFe_phase2','xU_interface','xZr_interface','xFe_interface', 'sigma','VmU','VmZr','VmFe'])
x=None
for T in Trange:
# calculate global equilibrium and retrieve associated chemical potentials
CoherentGibbsEnergy_OC.initOC(tdbFile, comps)
oc.raw().pytqtgsw(4) # no merging of grid points
#oc.raw().pytqtgsw(23) # denser grid
model = CoherentGibbsEnergy_OC(T, 1E5, phasenames)
mueq = model.chemicalpotential(x0)
phasesAtEquilibrium = oc.getPhasesAtEquilibrium()
phasesAtEquilibriumMolarAmounts = phasesAtEquilibrium.getPhaseMolarAmounts()
if (len(phasesAtEquilibriumMolarAmounts)==1):
# it is possible that the miscibility gap has not been detected correctly (can happen when T increases)
#print(phasesAtEquilibriumMolarAmounts)
# ad hoc strategy: 1) calculate an equilibrium at lower temperature (hopefully finding the two phases)
# 2) redo the calculation at the target temperature afterwards without the grid minimizer
model = CoherentGibbsEnergy_OC(Tmin, 1E5, phasenames)
mueq = model.chemicalpotential(x0)
phasesAtEquilibrium = oc.getPhasesAtEquilibrium()
phasesAtEquilibriumMolarAmounts = phasesAtEquilibrium.getPhaseMolarAmounts()
#print(phasesAtEquilibriumMolarAmounts)
oc.setTemperature(T)
oc.calculateEquilibrium(gmStat.Off)
mueq = model.getChemicalPotentials()
phasesAtEquilibrium = oc.getPhasesAtEquilibrium()
phasesAtEquilibriumMolarAmounts = phasesAtEquilibrium.getPhaseMolarAmounts()
phasesAtEquilibriumElementCompositions = phasesAtEquilibrium.getPhaseElementComposition()
print(phasesAtEquilibriumElementCompositions)
if (set(phasesAtEquilibriumMolarAmounts)==set(['LIQUID#1', 'LIQUID_AUTO#2'])):
# Composition range for searching initial interfacial equilibrium composition
# calculated from the actual phase compositions
componentsWithLimits = comps[1:]
#limit = [ [1.0, 0.0] for each in componentsWithLimits ]
#for phase in phasesAtEquilibriumElementCompositions:
# for element in phasesAtEquilibriumElementCompositions[phase]:
# elementMolarFraction = phasesAtEquilibriumElementCompositions[phase][element]
# if element in componentsWithLimits:
# limit[componentsWithLimits.index(element)][0] = min(limit[componentsWithLimits.index(element)][0], elementMolarFraction)
# limit[componentsWithLimits.index(element)][1] = max(limit[componentsWithLimits.index(element)][1], elementMolarFraction)
#limit = [ [each[0]+dx*(each[1]-each[0]), each[1]-dx*(each[1]-each[0])] for each in limit ]
bulkX = [ [ phasesAtEquilibriumElementCompositions[phase][element] for phase in phasesAtEquilibriumMolarAmounts ] for element in componentsWithLimits ]
notConverged = True
if (x==None):
x = [ 0.5*(phasesAtEquilibriumElementCompositions['LIQUID#1'][comp] + phasesAtEquilibriumElementCompositions['LIQUID_AUTO#2'][comp]) for comp in componentsWithLimits ]
# Iterate on interfacial molar composition
while (notConverged):
# Molar volumes of pure components evaluated at x
CoherentGibbsEnergy_OC.initOC(tdbFile, comps)
model = CoherentGibbsEnergy_OC(T, P, phasenames[0], False)
if ('TAF' in tdbFile):
functions=model.constantPartialMolarVolumeFunctions(x, constituentDensityLaws, 1E-5, constituentToEndmembersConverter)
else:
functions=model.constantPartialMolarVolumeFunctions(x, constituentDensityLaws, 1E-5)
# calculate interfacial energy
sigma = SigmaCoherent_OC2(
T=T,
x0=x0,
db=tdbFile,
comps=comps,
phasenames=phasenames,
purevms=functions,
guess=x,
computeEquilibriumFunction=partial(ComputeEquilibriumWithConstraints, bulkX=bulkX),
enforceGridMinimizerForLocalEq=False,
mueq=mueq
)
print('at T=', T, ' sigma=', sigma.Interfacial_Energy.values, '\n')
notConverged = np.linalg.norm(x[:]-sigma.Interfacial_Composition.values[1:], np.inf)>epsilonX
print('convergence: ', not notConverged, x[:], sigma.Interfacial_Composition.values[1:])
x[:]=sigma.Interfacial_Composition.values[1:]
# store results in pandas dataframe
if (np.abs(sigma.Interfacial_Energy.values)>1E-6):
print(sigma, "\n")
if (abs(np.max(sigma.Partial_Interfacial_Energy.values)-np.min(sigma.Partial_Interfacial_Energy.values))>1E-3):
raise ValueError('wrong value discarded')
results = results.append({'temperature' : T,
'n_phase1' : phasesAtEquilibriumMolarAmounts['LIQUID#1'],
'n_phase2' : phasesAtEquilibriumMolarAmounts['LIQUID_AUTO#2'],
'xU_phase1' : phasesAtEquilibriumElementCompositions['LIQUID#1']['U'],
'xU_phase2' : phasesAtEquilibriumElementCompositions['LIQUID_AUTO#2']['U'],
'xZr_phase1' : phasesAtEquilibriumElementCompositions['LIQUID#1']['ZR'],
'xZr_phase2' : phasesAtEquilibriumElementCompositions['LIQUID_AUTO#2']['ZR'],
'xFe_phase1' : phasesAtEquilibriumElementCompositions['LIQUID#1']['FE'],
'xFe_phase2' : phasesAtEquilibriumElementCompositions['LIQUID_AUTO#2']['FE'],
'xU_interface' : sigma.Interfacial_Composition.values[1],
'xZr_interface' : sigma.Interfacial_Composition.values[2],
'xFe_interface' : sigma.Interfacial_Composition.values[3],
'sigma' : sigma.Interfacial_Energy.values,
'VmU' : functions[1](T),
'VmZr' : functions[2](T),
'VmFe' : functions[3](T),
'VmO' : functions[0](T),
},
ignore_index = True)
else:
print(sigma, "\n")
raise ValueError('wrong value discarded')
else:
print('at T=', T, ' out of the miscibility gap')
print('phases at equilibrium:', phasesAtEquilibriumMolarAmounts)
# write csv result file
results.to_csv('macro_liquidMG_UOZrFe_NUCLEA19_varyingT.csv')
def run2():
print('### test U-O coherent interface in the liquid miscibility gap ###\n')
# tdb filepath
#tdbFile=os.environ['TDBDATA_PRIVATE']+'/feouzr.tdb'
#tdbFile=os.environ['TDBDATA_PRIVATE']+'/NUCLEA-17_1_mod.TDB'
#tdbFile=os.environ['TDBDATA_PRIVATE']+'/NUCLEA-19_1_mod.TDB'
tdbFile='tests/TAF_uzrofe_V10.TDB'
# components
comps = ['O', 'U', 'ZR', 'FE']
# mass density laws (from Barrachin2004)
constituentDensityLaws = {
'U1' : lambda T: 17270.0-1.358*(T-1408),
'ZR1' : lambda T: 6844.51-0.609898*T+2.05008E-4*T**2-4.47829E-8*T**3+3.26469E-12*T**4,
'O2U1' : lambda T: 8860.0-9.285E-1*(T-3120),
'O2ZR1': lambda T: 5150-0.445*(T-2983),
'FE1' : lambda T: 7030 - 0.88*(T-1808),
'NI1' : lambda T: 7900 - 1.19*(T-1728),
'CR1' : lambda T: 6290 - 0.72*(T-2178),
'O1' : lambda T: 1.141, # set to meaningless value but ok as, no 'free' oxygen in the considered mixtures
'FE1O1' : lambda T: 7030 - 0.88*(T-1808), # set to Fe value but ok as, almost no such component in the considered mixtures
'FE1O1_5' : lambda T: 7030 - 0.88*(T-1808), # set to Fe value but ok as, almost no such component in the considered mixtures
}
constituentDensityLaws['U'] = constituentDensityLaws['U1']
constituentDensityLaws['ZR'] = constituentDensityLaws['ZR1']
constituentDensityLaws['O'] = constituentDensityLaws['O1']
constituentDensityLaws['FE'] = constituentDensityLaws['FE1']
# phase names
phasenames = ['LIQUID', 'LIQUID']
# pressure
P = 1E5
# Given initial alloy composition. x0 is the mole fractions of U, Zr, Fe.
# # RU/Zr=0.60 CZr=0.3 xSteel=0.1
# x0 = [0.1550142, 0.2583569, 0.1215864]
# RU/Zr=1.20 CZr=0.50 xSteel=0.10
x0 = [1.883189e-01, 1.569325e-01, 1.211783e-01]
# Composition step for searching initial interfacial equilibrium composition.
#dx = 0.5
# Convergence criterion for loop on interfacial composition
epsilonX = 1E-5
inputs = pd.read_csv('macro_liquidMG_UOZrFe_run.csv')
results = pd.DataFrame(columns=['temperature', 'n_phase1', 'n_phase2', 'xU_phase1', 'xU_phase2','xZr_phase1', 'xZr_phase2', 'xFe_phase1', 'xFe_phase2','xU_interface','xZr_interface','xFe_interface', 'VmU', 'VmZr','VmFe','sigma'])
x = None
for i,T in enumerate(inputs['temperature']):
# calculate global equilibrium and retrieve associated chemical potentials
CoherentGibbsEnergy_OC.initOC(tdbFile, comps)
oc.raw().pytqtgsw(4) # no merging of grid points
#oc.raw().pytqtgsw(23) # denser grid
model = CoherentGibbsEnergy_OC(T, 1E5, phasenames)
mueq = model.chemicalpotential(x0)
phasesAtEquilibrium = oc.getPhasesAtEquilibrium()
phasesAtEquilibriumMolarAmounts = phasesAtEquilibrium.getPhaseMolarAmounts()
if (len(phasesAtEquilibriumMolarAmounts)==1):
# it is possible that the miscibility gap has not been detected correctly (can happen when T increases)
#print(phasesAtEquilibriumMolarAmounts)
# ad hoc strategy: 1) calculate an equilibrium at lower temperature (hopefully finding the two phases)
# 2) redo the calculation at the target temperature afterwards without the grid minimizer
model = CoherentGibbsEnergy_OC(2800.0, 1E5, phasenames)
mueq = model.chemicalpotential(x0)
phasesAtEquilibrium = oc.getPhasesAtEquilibrium()
phasesAtEquilibriumMolarAmounts = phasesAtEquilibrium.getPhaseMolarAmounts()
#print(phasesAtEquilibriumMolarAmounts)
oc.setTemperature(T)
oc.calculateEquilibrium(gmStat.Off)
mueq = model.getChemicalPotentials()
phasesAtEquilibrium = oc.getPhasesAtEquilibrium()
phasesAtEquilibriumMolarAmounts = phasesAtEquilibrium.getPhaseMolarAmounts()
phasesAtEquilibriumElementCompositions = phasesAtEquilibrium.getPhaseElementComposition()
print(phasesAtEquilibriumMolarAmounts)
print(phasesAtEquilibriumElementCompositions)
if (set(phasesAtEquilibriumMolarAmounts)==set(['LIQUID#1', 'LIQUID_AUTO#2'])):
# Composition range for searching initial interfacial equilibrium composition
# calculated from the actual phase compositions
componentsWithLimits = comps[1:]
#limit = [ [1.0, 0.0] for each in componentsWithLimits ]
#for phase in phasesAtEquilibriumElementCompositions:
# for element in phasesAtEquilibriumElementCompositions[phase]:
# elementMolarFraction = phasesAtEquilibriumElementCompositions[phase][element]
# if element in componentsWithLimits:
# limit[componentsWithLimits.index(element)][0] = min(limit[componentsWithLimits.index(element)][0], elementMolarFraction)
# limit[componentsWithLimits.index(element)][1] = max(limit[componentsWithLimits.index(element)][1], elementMolarFraction)
#limit = [ [each[0]+dx*(each[1]-each[0]), each[1]-dx*(each[1]-each[0])] for each in limit ]
bulkX = [ [ phasesAtEquilibriumElementCompositions[phase][element] for phase in phasesAtEquilibriumMolarAmounts ] for element in componentsWithLimits ]
if (x==None):
x = [ 0.5*(phasesAtEquilibriumElementCompositions['LIQUID#1'][comp] + phasesAtEquilibriumElementCompositions['LIQUID_AUTO#2'][comp]) for comp in componentsWithLimits ]
#x = x0.copy()
# Molar volumes of pure components evaluated at x
functions = [ lambda _: inputs['VmO'][i], lambda _: inputs['VmU'][i], lambda _: inputs['VmZr'][i], lambda _: inputs['VmFe'][i]]
# calculate interfacial energy
sigma = SigmaCoherent_OC2(
T=T,
x0=x0,
db=tdbFile,
comps=comps,
phasenames=phasenames,
purevms=functions,
guess=x,
computeEquilibriumFunction=partial(ComputeEquilibriumWithConstraints, bulkX=bulkX),
enforceGridMinimizerForLocalEq=False,
mueq=mueq
)
print('at T=', T, ' sigma=', sigma.Interfacial_Energy.values, '\n')
x[:]=sigma.Interfacial_Composition.values[1:]
# Store result
if (np.abs(sigma.Interfacial_Energy.values)>1E-6):
# store results in pandas dataframe
print(sigma, "\n")
results = results.append({'temperature' : T,
'n_phase1' : phasesAtEquilibriumMolarAmounts['LIQUID#1'],
'n_phase2' : phasesAtEquilibriumMolarAmounts['LIQUID_AUTO#2'],
'xU_phase1' : phasesAtEquilibriumElementCompositions['LIQUID#1']['U'],
'xU_phase2' : phasesAtEquilibriumElementCompositions['LIQUID_AUTO#2']['U'],
'xZr_phase1' : phasesAtEquilibriumElementCompositions['LIQUID#1']['ZR'],
'xZr_phase2' : phasesAtEquilibriumElementCompositions['LIQUID_AUTO#2']['ZR'],
'xFe_phase1' : phasesAtEquilibriumElementCompositions['LIQUID#1']['FE'],
'xFe_phase2' : phasesAtEquilibriumElementCompositions['LIQUID_AUTO#2']['FE'],
'xU_interface' : sigma.Interfacial_Composition.values[1],
'xZr_interface' : sigma.Interfacial_Composition.values[2],
'xFe_interface' : sigma.Interfacial_Composition.values[3],
'sigma' : sigma.Interfacial_Energy.values,
'VmU' : functions[0](T),
'VmZr' : functions[1](T),
'VmFe' : functions[2](T),
},
ignore_index = True)
else:
print(sigma, "\n")
raise ValueError('wrong value discarded')
else:
print('at T=', T, ' out of the miscibility gap')
print('phases at equilibrium:', phasesAtEquilibriumMolarAmounts)
# write csv result file
results.to_csv('macro_liquidMG_UOZrFe_TAFID_varyingT.csv')
def run3(tdbFile, RUZr):
print('### test U-O-Zr-Fe coherent interface in the liquid miscibility gap ###\n')
# components
comps = ['O', 'U', 'ZR', 'FE']
# phase names
phasenames = ['LIQUID', 'LIQUID']
# pressure
P = 1E5
# initial alloy compositions. x0 is the mole fractions of U, Zr, Fe.
read = pd.read_csv('tests/{0:2.1f}RUZr.csv'.format(RUZr), delim_whitespace=True)
# Composition step for searching initial interfacial equilibrium composition.
#dx = 0.5
# Convergence criterion for loop on interfacial composition
epsilonX = 1E-4
# temperature range
T = 3000
# Trange = np.linspace(Tmin, Tmax, num=10, endpoint=True)
results = pd.DataFrame(columns=['temperature', 'n_phase1', 'n_phase2', 'xU_phase1', 'xU_phase2','xZr_phase1', 'xZr_phase2', 'xFe_phase1', 'xFe_phase2','xU_interface','xZr_interface','xFe_interface', 'sigma','VmU','VmZr','VmFe'])
x = None
for ii in range(read.shape[0]):
x0=[read['xU'][ii],read['xZr'][ii],read['xFe'][ii]]
print("*********({0:d}/{1:d})*********".format(ii+1, read.shape[0]))
print("x0: ",x0)
# calculate global equilibrium and retrieve associated chemical potentials
CoherentGibbsEnergy_OC.initOC(tdbFile, comps)
oc.raw().pytqtgsw(4) # no merging of grid points
#oc.raw().pytqtgsw(23) # denser grid
model = CoherentGibbsEnergy_OC(T, 1E5, phasenames)
mueq = model.chemicalpotential(x0)
phasesAtEquilibrium = oc.getPhasesAtEquilibrium()
phasesAtEquilibriumMolarAmounts = phasesAtEquilibrium.getPhaseMolarAmounts()
if (len(phasesAtEquilibriumMolarAmounts)==1):
# it is possible that the miscibility gap has not been detected correctly (can happen when T increases)
#print(phasesAtEquilibriumMolarAmounts)
# ad hoc strategy: 1) calculate an equilibrium at lower temperature (hopefully finding the two phases)
# 2) redo the calculation at the target temperature afterwards without the grid minimizer
model = CoherentGibbsEnergy_OC(2900, 1E5, phasenames)
mueq = model.chemicalpotential(x0)
phasesAtEquilibrium = oc.getPhasesAtEquilibrium()
phasesAtEquilibriumMolarAmounts = phasesAtEquilibrium.getPhaseMolarAmounts()
#print(phasesAtEquilibriumMolarAmounts)
oc.setTemperature(T)
oc.calculateEquilibrium(gmStat.Off)
mueq = model.getChemicalPotentials()
phasesAtEquilibrium = oc.getPhasesAtEquilibrium()
phasesAtEquilibriumMolarAmounts = phasesAtEquilibrium.getPhaseMolarAmounts()
phasesAtEquilibriumElementCompositions = phasesAtEquilibrium.getPhaseElementComposition()
print(phasesAtEquilibriumMolarAmounts)
if (set(phasesAtEquilibriumMolarAmounts)==set(['LIQUID#1', 'LIQUID_AUTO#2'])):
# Composition range for searching initial interfacial equilibrium composition
# calculated from the actual phase compositions
componentsWithLimits = comps[1:]
#limit = [ [1.0, 0.0] for each in componentsWithLimits ]
#for phase in phasesAtEquilibriumElementCompositions:
#for element in phasesAtEquilibriumElementCompositions[phase]:
# elementMolarFraction = phasesAtEquilibriumElementCompositions[phase][element]
# if element in componentsWithLimits:
# limit[componentsWithLimits.index(element)][0] = min(limit[componentsWithLimits.index(element)][0], elementMolarFraction)
# limit[componentsWithLimits.index(element)][1] = max(limit[componentsWithLimits.index(element)][1], elementMolarFraction)
#limit = [ [each[0]+dx*(each[1]-each[0]), each[1]-dx*(each[1]-each[0])] for each in limit ]
bulkX = [ [ phasesAtEquilibriumElementCompositions[phase][element] for phase in phasesAtEquilibriumMolarAmounts ] for element in componentsWithLimits ]
notConverged = True
if (x==None):
x = [ 0.5*(phasesAtEquilibriumElementCompositions['LIQUID#1'][comp] + phasesAtEquilibriumElementCompositions['LIQUID_AUTO#2'][comp]) for comp in componentsWithLimits ]
# Iterate on interfacial molar composition
while (notConverged):
# Molar volumes of pure components evaluated at x
CoherentGibbsEnergy_OC.initOC(tdbFile, comps)
model = CoherentGibbsEnergy_OC(T, P, phasenames[0], False)
if ('TAF' in tdbFile):
functions=model.constantPartialMolarVolumeFunctions(x, constituentDensityLaws, 1E-5, constituentToEndmembersConverter)
else:
functions=model.constantPartialMolarVolumeFunctions(x, constituentDensityLaws, 1E-5)
# calculate interfacial energy
sigma = SigmaCoherent_OC2(
T=T,
x0=x0,
db=tdbFile,
comps=comps,
phasenames=phasenames,
purevms=functions,
guess=x,
computeEquilibriumFunction=partial(ComputeEquilibriumWithConstraints, bulkX=bulkX),
enforceGridMinimizerForLocalEq=False,
mueq=mueq
)
print('at T=', T, ' sigma=', sigma.Interfacial_Energy.values, '\n')
notConverged = np.linalg.norm(x[:]-sigma.Interfacial_Composition.values[1:], np.inf)>epsilonX
print('convergence: ', not notConverged, x[:], sigma.Interfacial_Composition.values[1:])
x[:]=sigma.Interfacial_Composition.values[1:]
# store results in pandas dataframe
if (np.abs(sigma.Interfacial_Energy.values)>1E-5):
print(sigma, "\n")
if (abs(np.max(sigma.Partial_Interfacial_Energy.values)-np.min(sigma.Partial_Interfacial_Energy.values))>1E-3):
print(np.min(sigma.Partial_Interfacial_Energy.values))
print(np.max(sigma.Partial_Interfacial_Energy.values))
raise ValueError('wrong value discarded')
results = results.append({'temperature' : T,
'n_phase1' : phasesAtEquilibriumMolarAmounts['LIQUID#1'],
'n_phase2' : phasesAtEquilibriumMolarAmounts['LIQUID_AUTO#2'],
'xU_phase1' : phasesAtEquilibriumElementCompositions['LIQUID#1']['U'],
'xU_phase2' : phasesAtEquilibriumElementCompositions['LIQUID_AUTO#2']['U'],
'xZr_phase1' : phasesAtEquilibriumElementCompositions['LIQUID#1']['ZR'],
'xZr_phase2' : phasesAtEquilibriumElementCompositions['LIQUID_AUTO#2']['ZR'],
'xFe_phase1' : phasesAtEquilibriumElementCompositions['LIQUID#1']['FE'],
'xFe_phase2' : phasesAtEquilibriumElementCompositions['LIQUID_AUTO#2']['FE'],
'xU_interface' : sigma.Interfacial_Composition.values[1],
'xZr_interface' : sigma.Interfacial_Composition.values[2],
'xFe_interface' : sigma.Interfacial_Composition.values[3],
'sigma' : sigma.Interfacial_Energy.values,
'VmU' : functions[1](T),
'VmZr' : functions[2](T),
'VmFe' : functions[3](T),
'VmO' : functions[0](T),
},
ignore_index = True)
else:
raise ValueError('wrong value discarded')
else:
print('at T=', T, ' out of the miscibility gap')
print('phases at equilibrium:', phasesAtEquilibriumMolarAmounts)
# write csv result file
if ('TAF' in tdbFile):
results.to_csv('macro_liquidMG_UOZrFe_TAFID_RUZR={0:2.1f}.csv'.format(RUZr))
else:
results.to_csv('macro_liquidMG_UOZrFe_NUCLEA19_RUZR={0:2.1f}.csv'.format(RUZr))
def fit():
results = pd.read_csv('macro_liquidMG_UOZrFe_NUCLEA_varyingT.csv')
# Function to calculate the power-law with constants sigma0, Tc, mu, sigmaC
def power_law_plus_const(T, sigma0, Tc, mu, sigmaC):
return sigma0*np.power(1.0-T/Tc, mu)+sigmaC
def power_law_no_const(T, sigma0, Tc, mu):
return sigma0*np.power(1.0-T/Tc, mu)
# Fit the power-law data
power_law = power_law_no_const
print(results['temperature'])
print(results['sigma'])
pars, cov = curve_fit(f=power_law, xdata=results['temperature'], ydata=results['sigma'], p0=[0.7, results['temperature'][len(results['temperature']) - 1], 1.9], bounds=(-np.inf, np.inf))
# Get the standard deviations of the parameters (square roots of the # diagonal of the covariance)
stdevs = np.sqrt(np.diag(cov))
# Calculate the residuals
print(power_law(results['temperature'], *pars))
res = results['sigma'] - power_law(results['temperature'], *pars)
print(pars, stdevs)
plt.rcParams['figure.figsize'] = (12,7)
fig,axes=plt.subplots(2,2,constrained_layout=True)
# Plots associated with interfacial energy
ax = axes[0,0]
ax.grid(True)
ax.plot(results['temperature'], results['sigma'], marker = 'o', ls='', color='tab:cyan', label='calculated values: $\sigma_{calculated}$')
legLabel = 'fit: $\sigma_{fit}='+'{0:4.3f} (1-T/{1:4.1f})^'.format(pars[0], pars[1])+'{'+'{0:4.3f}'.format(pars[2])+'}$'
ax.plot(results['temperature'], power_law(results['temperature'], *pars), linestyle='--', linewidth=2, color='black', label=legLabel)
ax.set_xlabel('temperature T (K)',fontsize=12)
ax.set_ylabel('interfacial energy $\sigma$ (N.m$^{-1}$)',fontsize=12)
ax.legend(loc='upper right')
# Plots associated with composition
ax = axes[0,1]
ax.grid(True)
ax.plot(results['xU_interface'], results['sigma'], marker = 'o', ls='--', color='tab:cyan')
ax.set_ylabel('interfacial energy $\sigma$ (N.m$^{-1}$)',fontsize=12)
ax.set_xlabel('interface U molar fraction',fontsize=12)
ax = axes[1,0]
ax.grid(True)
ax.plot(results['xZr_interface'], results['sigma'], marker = 'o', ls='--', color='tab:cyan')
ax.set_ylabel('interfacial energy $\sigma$ (N.m$^{-1}$)',fontsize=12)
ax.set_xlabel('interface Zr molar fraction',fontsize=12)
ax = axes[1,1]
ax.grid(True)
ax.plot(results['xFe_interface'], results['sigma'], marker = 'o', ls='--', color='tab:cyan')
ax.set_ylabel('interfacial energy $\sigma$ (N.m$^{-1}$)',fontsize=12)
ax.set_xlabel('interface Fe molar fraction',fontsize=12)
plt.savefig('macro_liquidMG_UOZrFe_fit.pdf')
plt.show()
def plot(tdbFile, RUZr):
inputs = pd.read_csv('tests/{0:2.1f}RUZr.csv'.format(RUZr), delim_whitespace=True)
CZr=inputs['CZr']
xSteel=inputs['xSteel']
# write csv result file
if ('TAF' in tdbFile):
results = pd.read_csv('macro_liquidMG_UOZrFe_TAFID_RUZR={0:2.1f}.csv'.format(RUZr))
else:
results = pd.read_csv('macro_liquidMG_UOZrFe_NUCLEA19_RUZR={0:2.1f}.csv'.format(RUZr))
#
epsilon=1E-4
def calculateSet(array, tol):
sortedArray = array.copy()
sortedArray.sort()
results = [sortedArray.pop(0), ]
for value in sortedArray:
if abs(results[-1] - value) <= tol:
continue
results.append(value)
return results
#
colors=['tab:blue', 'tab:orange', 'tab:green', 'tab:red', 'tab:purple', 'tab:cyan', 'tab:brown', 'tab:pink']
markers=['x', '+', 'o', '*', '^', 'v', '<', '>']
prop_cycle = cycler(color=colors) + cycler(marker=markers) + cycler(markevery=[0.1]*len(markers))
prop_cycle2 = prop_cycle * cycler(linestyle=['-', '--', '-.'])
#
ftSize = 10
plt.rcParams["figure.figsize"] = (12,10)
plt.rcParams["legend.fontsize"] = ftSize
CZr_set = calculateSet(inputs['CZr'].tolist(), epsilon)
x_Fe_set = calculateSet(inputs['xSteel'].tolist(), epsilon)
#
fig,axes=plt.subplots(2,2,constrained_layout=True)
ax1 = axes[0,0]
ax1.grid(True)
ax1.set_prop_cycle(prop_cycle)
ax2 = axes[0,1]
ax2.grid(True)
ax2.set_prop_cycle(prop_cycle2)
ax3 = axes[1,0]
ax3.grid(True)
ax3.set_prop_cycle(prop_cycle2)
ax4 = axes[1,1]
ax4.grid(True)
ax4.set_prop_cycle(prop_cycle2)
for valCZr in CZr_set:
indI = [i for i, val in enumerate(CZr) if abs(val - valCZr) < epsilon]
legLabel="$C_{Zr}"+"={0:3.2f}$".format(valCZr)
csf = ax1.plot(xSteel[indI], results['sigma'][indI], label=legLabel)
legLabel="$C_{Zr}"+"={0:3.2f}$".format(valCZr)+" - interfacial liquid"
csf = ax2.plot(xSteel[indI], results['xU_interface'][indI], label=legLabel)
xmin = [max(results['xU_phase1'][i], results['xU_phase2'][i]) for i in indI]
xmax = [min(results['xU_phase1'][i], results['xU_phase2'][i]) for i in indI]
legLabel="$C_{Zr}"+"={0:3.2f}$".format(valCZr)+" - bulk metal"
csf = ax2.plot(xSteel[indI], xmin, label=legLabel)
legLabel="$C_{Zr}"+"={0:3.2f}$".format(valCZr)+" - bulk oxide"
csf = ax2.plot(xSteel[indI], xmax, label=legLabel)
legLabel="$C_{Zr}"+"={0:3.2f}$".format(valCZr)+" - interfacial liquid"
csf = ax3.plot(xSteel[indI], results['xZr_interface'][indI], label=legLabel)
xmin = [max(results['xZr_phase1'][i], results['xZr_phase2'][i]) for i in indI]
xmax = [min(results['xZr_phase1'][i], results['xZr_phase2'][i]) for i in indI]
legLabel="$C_{Zr}"+"={0:3.2f}$".format(valCZr)+" - bulk metal"
csf = ax3.plot(xSteel[indI], xmin, label=legLabel)
legLabel="$C_{Zr}"+"={0:3.2f}$".format(valCZr)+" - bulk oxide"
csf = ax3.plot(xSteel[indI], xmax, label=legLabel)
legLabel="$C_{Zr}"+"={0:3.2f}$".format(valCZr)+" - interfacial liquid"
csf = ax4.plot(xSteel[indI], results['xFe_interface'][indI], label=legLabel)
xmin = [max(results['xFe_phase1'][i], results['xFe_phase2'][i]) for i in indI]
xmax = [min(results['xFe_phase1'][i], results['xFe_phase2'][i]) for i in indI]
legLabel="$C_{Zr}"+"={0:3.2f}$".format(valCZr)+" - bulk metal"
csf = ax4.plot(xSteel[indI], xmax, label=legLabel)
legLabel="$C_{Zr}"+"={0:3.2f}$".format(valCZr)+" - bulk oxide"
csf = ax4.plot(xSteel[indI], xmin, label=legLabel)
ax1.set_xlabel("$x_{steel}$", fontsize=ftSize)
ax1.set_ylabel("interfacial energy $\sigma$ (N.m$^{-1}$)", fontsize=ftSize)
ax1.set_title("$R_{U/Zr}"+"={0:2.1f}$".format(RUZr), fontsize=ftSize)
#ax1.legend(loc="best", ncol=2)
ax2.set_xlabel("$x_{steel}$", fontsize=ftSize)
ax2.set_ylabel("U molar fraction", fontsize=ftSize)
ax2.set_title("$R_{U/Zr}"+"={0:2.1f}$".format(RUZr), fontsize=ftSize)
#ax2.legend(loc="best", ncol=2)
ax3.set_xlabel("$x_{steel}$", fontsize=ftSize)
ax3.set_ylabel("Zr molar fraction", fontsize=ftSize)
ax3.set_title("$R_{U/Zr}"+"={0:2.1f}$".format(RUZr), fontsize=ftSize)
#ax3.legend(loc="best", ncol=2)
ax4.set_xlabel("$x_{steel}$", fontsize=ftSize)
ax4.set_ylabel("Fe molar fraction", fontsize=ftSize)
ax4.set_title("$R_{U/Zr}"+"={0:2.1f}$".format(RUZr), fontsize=ftSize)
#ax4.legend(loc="best", ncol=2)
lines, labels = fig.axes[-1].get_legend_handles_labels()
fig.legend(lines, labels, loc = 'center')
if ('TAF' in tdbFile):
figName = 'macro_liquidMG_UOZrFe_TAFID_RUZR={0:2.1f}_plot'.format(RUZr)
else:
figName = 'macro_liquidMG_UOZrFe_NUCLEA19_RUZR={0:2.1f}_plot'.format(RUZr)
plt.savefig(figName+'.pdf')
plt.savefig(figName+'.png')
plt.show()
if __name__ == '__main__':
#run()
#run2()
#fit()
#
# tdb filepath
#tdbFile=os.environ['TDBDATA_PRIVATE']+'/feouzr.tdb'
tdbFile=os.environ['TDBDATA_PRIVATE'] + '/NUCLEA-19_1_mod.TDB'
#tdbFile='tests/TAF_uzrofe_V10.TDB'
RUZr=1.2
#run3(tdbFile, RUZr)
plot(tdbFile, RUZr)
| 60.156951
| 233
| 0.5919
| 4,294
| 40,245
| 5.466931
| 0.11714
| 0.002812
| 0.002556
| 0.022492
| 0.8118
| 0.78803
| 0.770096
| 0.746241
| 0.715953
| 0.699936
| 0
| 0.045172
| 0.272257
| 40,245
| 668
| 234
| 60.247006
| 0.756351
| 0.169462
| 0
| 0.559921
| 0
| 0
| 0.139217
| 0.01818
| 0.021611
| 0
| 0
| 0
| 0
| 1
| 0.02554
| false
| 0
| 0.02554
| 0.003929
| 0.066798
| 0.076621
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a57f29bd0b22567e1079a65486b55435f9aed6e1
| 86
|
py
|
Python
|
voltagemetricspublisher/core/version.py
|
SumudithaR/svc.voltage-metrics-publisher
|
4e0418c855920d3e984acf097681e2fc8c8ec081
|
[
"Apache-2.0"
] | null | null | null |
voltagemetricspublisher/core/version.py
|
SumudithaR/svc.voltage-metrics-publisher
|
4e0418c855920d3e984acf097681e2fc8c8ec081
|
[
"Apache-2.0"
] | null | null | null |
voltagemetricspublisher/core/version.py
|
SumudithaR/svc.voltage-metrics-publisher
|
4e0418c855920d3e984acf097681e2fc8c8ec081
|
[
"Apache-2.0"
] | null | null | null |
VERSION = (0, 0, 1, 'alpha', 0)
def get_version(version=VERSION):
return VERSION
| 17.2
| 33
| 0.662791
| 13
| 86
| 4.307692
| 0.538462
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.057143
| 0.186047
| 86
| 4
| 34
| 21.5
| 0.742857
| 0
| 0
| 0
| 0
| 0
| 0.05814
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.333333
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
3c2db8a51625215900fa135312a2a8971e721b83
| 6,736
|
py
|
Python
|
src/haizea/pluggable/accounting/models.py
|
Hamdy/haizea
|
797e1b0ae19b41887c8970298de3adb9498034f3
|
[
"Apache-2.0"
] | 1
|
2017-10-31T22:17:31.000Z
|
2017-10-31T22:17:31.000Z
|
src/haizea/pluggable/accounting/models.py
|
Hamdy/haizea
|
797e1b0ae19b41887c8970298de3adb9498034f3
|
[
"Apache-2.0"
] | null | null | null |
src/haizea/pluggable/accounting/models.py
|
Hamdy/haizea
|
797e1b0ae19b41887c8970298de3adb9498034f3
|
[
"Apache-2.0"
] | null | null | null |
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, ForeignKey, Float
from sqlalchemy.orm import relationship, backref
Base = declarative_base()
class Experiment(Base):
__tablename__ = 'experiments'
def __init__(self):
self.description = ""
id = Column(Integer, primary_key=True)
description = Column(String)
total_accepted_ar = Column(Integer)
total_rejected_ar = Column(Integer)
total_accepted_im = Column(Integer)
total_rejected_im = Column(Integer)
total_completed_be = Column(Integer)
be_completed_after = Column(Float)
def __repr__(self):
return self.description
class CPU(Base):
__tablename__ = 'cpu_utilizations'
id = Column(Integer, primary_key=True)
experiment_id = Column(Integer, ForeignKey('experiments.id', ondelete='CASCADE'))
experiment = relationship("Experiment", backref=backref('cpu_utilizations', cascade='all, delete, delete-orphan', order_by=id))
time = Column(String)
node = Column(String)
value = Column(String)
avg = Column(String)
def __repr__(self):
return "Cpu Utilization for experiment %s" % self.experiment_id
class CPUPnode(Base):
__tablename__ = 'cpu_pnode_load'
id = Column(Integer, primary_key=True)
experiment_id = Column(Integer, ForeignKey('experiments.id', ondelete='CASCADE'))
experiment = relationship("Experiment", backref=backref('pnodes_cpu_load', cascade='all, delete, delete-orphan', order_by=id))
time = Column(String)
node = Column(String)
value = Column(Float)
def __repr__(self):
return "Cpu Utilization for single physical node %s in experiment %s " % (self.node, self.experiment_id)
class DiskPnode(Base):
__tablename__ = 'disk_pnode_load'
id = Column(Integer, primary_key=True)
experiment_id = Column(Integer, ForeignKey('experiments.id', ondelete='CASCADE'))
experiment = relationship("Experiment", backref=backref('pnodes_disk_load', cascade='all, delete, delete-orphan', order_by=id))
time = Column(String)
node = Column(String)
value = Column(Float)
def __repr__(self):
return "Disk Utilization for single physical node %s in experiment %s " % (self.node, self.experiment_id)
class NetInPnode(Base):
__tablename__ = 'net_in_pnode_load'
id = Column(Integer, primary_key=True)
experiment_id = Column(Integer, ForeignKey('experiments.id', ondelete='CASCADE'))
experiment = relationship("Experiment", backref=backref('pnodes_net_in_load', cascade='all, delete, delete-orphan', order_by=id))
time = Column(String)
node = Column(String)
value = Column(Float)
def __repr__(self):
return "Net in Utilization for single physical node %s in experiment %s " % (self.node, self.experiment_id)
class NetOutPnode(Base):
__tablename__ = 'net_out_pnode_load'
id = Column(Integer, primary_key=True)
experiment_id = Column(Integer, ForeignKey('experiments.id', ondelete='CASCADE'))
experiment = relationship("Experiment", backref=backref('pnodes_net_out_load', cascade='all, delete, delete-orphan', order_by=id))
time = Column(String)
node = Column(String)
value = Column(Float)
class MemoryPnode(Base):
__tablename__ = 'memory_pnode_load'
id = Column(Integer, primary_key=True)
experiment_id = Column(Integer, ForeignKey('experiments.id', ondelete='CASCADE'))
experiment = relationship("Experiment", backref=backref('pnodes_memory_load', cascade='all, delete, delete-orphan', order_by=id))
time = Column(String)
node = Column(String)
value = Column(Float)
def __repr__(self):
return "Memory Utilization for single physical node %s in experiment %s " % (self.node, self.experiment_id)
class LeaseStatistics(Base):
__tablename__ = 'lease_statistics'
id = Column(Integer, primary_key=True)
experiment_id = Column(Integer, ForeignKey('experiments.id', ondelete='CASCADE'))
experiment = relationship("Experiment", backref=backref('lease_statistics', cascade='all, delete, delete-orphan', order_by=id))
lease_id = Column(Integer)
waiting_time = Column(Float)
slowdown = Column(Float)
class AcceptedAR(Base):
__tablename__ = 'accepted_ar'
id = Column(Integer, primary_key=True)
experiment_id = Column(Integer, ForeignKey('experiments.id', ondelete='CASCADE'))
experiment = relationship("Experiment", backref=backref('accepted_ars', cascade='all, delete, delete-orphan', order_by=id))
time = Column(Float)
lease_id = Column(Integer)
count = Column(Integer)
class AcceptedIM(Base):
__tablename__ = 'accepted_im'
id = Column(Integer, primary_key=True)
experiment_id = Column(Integer, ForeignKey('experiments.id', ondelete='CASCADE'))
experiment = relationship("Experiment", backref=backref('accepted_ims', cascade='all, delete, delete-orphan', order_by=id))
time = Column(Float)
lease_id = Column(Integer)
count = Column(Integer)
class RejectedAR(Base):
__tablename__ = 'rejected_ar'
id = Column(Integer, primary_key=True)
experiment_id = Column(Integer, ForeignKey('experiments.id', ondelete='CASCADE'))
experiment = relationship("Experiment", backref=backref('rejected_ars', cascade='all, delete, delete-orphan', order_by=id))
time = Column(Float)
lease_id = Column(Integer)
count = Column(Integer)
class RejectedIM(Base):
__tablename__ = 'rejected_im'
id = Column(Integer, primary_key=True)
experiment_id = Column(Integer, ForeignKey('experiments.id', ondelete='CASCADE'))
experiment = relationship("Experiment", backref=backref('rejected_ims', cascade='all, delete, delete-orphan', order_by=id))
time = Column(Float)
lease_id = Column(Integer)
count = Column(Integer)
class CompletedBE(Base):
__tablename__ = 'completed_be'
id = Column(Integer, primary_key=True)
experiment_id = Column(Integer, ForeignKey('experiments.id', ondelete='CASCADE'))
experiment = relationship("Experiment", backref=backref('completed_bes', cascade='all, delete, delete-orphan', order_by=id))
time = Column(Float)
lease_id = Column(Integer)
count = Column(Integer)
class QueueSizeBE(Base):
__tablename__ = 'queue_size_be'
id = Column(Integer, primary_key=True)
experiment_id = Column(Integer, ForeignKey('experiments.id', ondelete='CASCADE'))
experiment = relationship("Experiment", backref=backref('queue_size_bes', cascade='all, delete, delete-orphan', order_by=id))
time = Column(Float)
lease_id = Column(Integer)
count = Column(Integer)
| 39.162791
| 135
| 0.705909
| 780
| 6,736
| 5.848718
| 0.107692
| 0.131083
| 0.111793
| 0.067514
| 0.772907
| 0.772907
| 0.760412
| 0.752302
| 0.744191
| 0.744191
| 0
| 0
| 0.174436
| 6,736
| 172
| 136
| 39.162791
| 0.820356
| 0
| 0
| 0.518797
| 0
| 0
| 0.20944
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.022556
| 0.045113
| 0.984962
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
3c3352f2f9ab7f9b43d352013b807835e28ecd5d
| 148
|
py
|
Python
|
src/wai/json/error/_JSONSchemaError.py
|
waikato-datamining/wai-json
|
cb013fb16e7c1b8d91e040a387a143d29d4ced96
|
[
"MIT"
] | null | null | null |
src/wai/json/error/_JSONSchemaError.py
|
waikato-datamining/wai-json
|
cb013fb16e7c1b8d91e040a387a143d29d4ced96
|
[
"MIT"
] | 2
|
2020-07-30T22:41:42.000Z
|
2021-09-21T23:18:06.000Z
|
src/wai/json/error/_JSONSchemaError.py
|
waikato-datamining/wai-json
|
cb013fb16e7c1b8d91e040a387a143d29d4ced96
|
[
"MIT"
] | null | null | null |
from ._JSONError import JSONError
class JSONSchemaError(JSONError):
"""
Base class for all errors involving JSON schema.
"""
pass
| 16.444444
| 52
| 0.689189
| 16
| 148
| 6.3125
| 0.8125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.236486
| 148
| 8
| 53
| 18.5
| 0.893805
| 0.324324
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
3c7749796a32a2202bb3b4a118381b5a271e4853
| 247
|
py
|
Python
|
torchsketch/utils/__init__.py
|
songyzh/torchsketch
|
42bca1b31ab9699d9b6d77a102b1f46bba82fb33
|
[
"MIT"
] | 182
|
2020-03-25T01:59:11.000Z
|
2022-03-29T08:58:47.000Z
|
torchsketch/utils/__init__.py
|
songyzh/torchsketch
|
42bca1b31ab9699d9b6d77a102b1f46bba82fb33
|
[
"MIT"
] | 5
|
2020-03-25T13:16:50.000Z
|
2022-02-19T09:51:39.000Z
|
torchsketch/utils/__init__.py
|
songyzh/torchsketch
|
42bca1b31ab9699d9b6d77a102b1f46bba82fb33
|
[
"MIT"
] | 17
|
2020-03-25T12:40:49.000Z
|
2022-03-28T06:34:40.000Z
|
from torchsketch.utils import data_augmentation_utils
from torchsketch.utils import general_utils
from torchsketch.utils import metric_utils
from torchsketch.utils import self_supervised_utils
from torchsketch.utils import svg_specific_utils
| 41.166667
| 54
| 0.878543
| 33
| 247
| 6.333333
| 0.363636
| 0.358852
| 0.478469
| 0.62201
| 0.593301
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.101215
| 247
| 5
| 55
| 49.4
| 0.941441
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
3c7e541ee0be51f8533ccbb0260e6d7438d900de
| 145
|
py
|
Python
|
NF/Step/__init__.py
|
AWehenkel/Normalizing-Flows
|
fe535e25cda32781296557ac5a523a6d2ade1761
|
[
"BSD-3-Clause"
] | 9
|
2020-11-20T12:36:03.000Z
|
2022-03-21T03:18:12.000Z
|
NF/Step/__init__.py
|
AWehenkel/Normalizing-Flows
|
fe535e25cda32781296557ac5a523a6d2ade1761
|
[
"BSD-3-Clause"
] | null | null | null |
NF/Step/__init__.py
|
AWehenkel/Normalizing-Flows
|
fe535e25cda32781296557ac5a523a6d2ade1761
|
[
"BSD-3-Clause"
] | null | null | null |
from .NormalizingFlow import FCNormalizingFlow, NormalizingFlow, NormalizingFlowStep
from .AugmentedFlow import MNISTAugmentedFlow, MNISTBaseline
| 72.5
| 84
| 0.896552
| 11
| 145
| 11.818182
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.068966
| 145
| 2
| 85
| 72.5
| 0.962963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b1e97f9b7c8983f955d23d83925f9dcc3bf049f8
| 28,787
|
py
|
Python
|
envdsys/envnet/registry/registry.py
|
NOAA-PMEL/envDataSystem
|
4db4a3569d2329658799a3eef06ce36dd5c0597d
|
[
"Unlicense"
] | 1
|
2021-11-06T19:22:53.000Z
|
2021-11-06T19:22:53.000Z
|
envdsys/envnet/registry/registry.py
|
NOAA-PMEL/envDataSystem
|
4db4a3569d2329658799a3eef06ce36dd5c0597d
|
[
"Unlicense"
] | 25
|
2019-06-18T20:40:36.000Z
|
2021-07-23T20:56:48.000Z
|
envdsys/envnet/registry/registry.py
|
NOAA-PMEL/envDataSystem
|
4db4a3569d2329658799a3eef06ce36dd5c0597d
|
[
"Unlicense"
] | null | null | null |
from abc import abstractmethod
import asyncio
from os import name
from shutil import register_archive_format
from typing import AsyncIterable
# from asgiref.sync import sync_to_async
from channels.db import database_sync_to_async
from django.core.exceptions import MultipleObjectsReturned
from shared.data.status import Status
from shared.data.namespace import Namespace
# from daq.daq import DAQ
# from envdaq import data
from envnet.models import Network, ServiceRegistration, DAQRegistration
class ServiceRegistry:
# number of seconds before daq_server is considered
# disconnected
disconnected_service_limit = 60
disconnected_daq_limit = 10
# number of seconds before daq_server is removed
# from registry
auto_clean_limit = 600 # 10 minutes
local_network = None
run_state = "STOPPED"
# @staticmethod
# async def start(network="default_network"):
# await ServiceRegistry.start_no_wait(network)
async def start(network="default_network"):
await ServiceRegistry.start_registry(network)
# loop=asyncio.get_event_loop()
# print(f"registry: {loop}")
if ServiceRegistry.run_state != "RUNNING":
asyncio.create_task(ServiceRegistry.check_status())
ServiceRegistry.run_state = "RUNNING"
# regs = await ServiceRegistry.get_all_registrations()
# print(regs)
# print(f"all reg: {ServiceRegistry.get_all_registrations()}")
@staticmethod
@database_sync_to_async
def start_registry(network="default_network"):
print("starting service registry")
# deactivate all networks
nets = Network.objects.all()
for net in nets:
net.deactivate()
try:
net = Network.objects.get(name=network)
# net.activate()
except Network.MultipleObjectsReturned:
result = Network.objects.filter(name=network)
for s in result:
s.delete()
net = Network(name=network)
net.save()
except Network.DoesNotExist:
net = Network(name=network)
# net = Network(name=network)
net.save()
net.activate()
ServiceRegistry.local_network = net
# asyncio.create_task(ServiceRegistry.check_status())
# ServiceRegistry.run_state = "RUNNING"
# start broadcasting
# start housekeeping checks
# if ServiceRegistry.run_state == "STOPPED":
# ServiceRegistry.start_checks()
# ServiceRegistry.run_state = "RUNNING"
# return net
def start_checks():
# loop = asyncio.get_event_loop()
# task = asyncio.ensure_future(ServiceRegistry.check_status())
# print(task)
# loop.run_until_complete(task)
asyncio.create_task(ServiceRegistry.check_status())
# recieve from other servers
# add list of remote services
@staticmethod
async def register(local=True, config=None):
reg = await ServiceRegistry.register_no_wait(local, config)
if not reg:
reg = await ServiceRegistry.update_registration(local, config)
return reg
@staticmethod
@database_sync_to_async
def register_no_wait(local=True, config=None):
print(f"register service: {local}, {config}")
if config:
print(config["host"])
try:
# print(f'{config["HOST"]}, {config["PORT"]}')
reg = ServiceRegistration.objects.get(
host=config["host"], port=config["port"]
)
if reg.get_age() > ServiceRegistry.auto_clean_limit:
reg.delete()
else:
return None
# registration = ServiceRegistry.update_registration(local, config)
# print(f"1:{registration}")
# return registration
except ServiceRegistration.MultipleObjectsReturned:
result = ServiceRegistration.objects.filter(
host=config["host"], port=config["port"]
)
for s in result:
s.delete()
except ServiceRegistration.DoesNotExist:
pass
network = "default"
# if local:
# network = ServiceRegistry.local_network.name
# else:
# try:
# network = config["network"]
# except KeyError:
# pass # defaults to "default"
# create new Reg
reg = ServiceRegistration(
local_service=local,
host=config["host"],
port=config["port"],
status="CONNECTED"
# service_list = config.service_list
)
reg.save()
reg.add_services(config["service_list"])
reg.join_network(ServiceRegistry.get_network_name(local, config))
registration = reg.get_registration()
return registration
@staticmethod
async def update_registration(local=True, config=None):
reg = await ServiceRegistry.update_registration_no_wait(local, config)
return reg
@staticmethod
@database_sync_to_async
def update_registration_no_wait(local=True, config=None):
if config:
network = "default"
if local:
network = ServiceRegistry.local_network.name
else:
try:
network = config["network"]
except KeyError:
pass # defaults to "default"
try:
# srv = ServiceRegistration.objects.get(regkey=config["regkey"])
reg = ServiceRegistration.objects.get(
host=config["host"], port=config["port"]
)
if reg.get_age() > ServiceRegistry.auto_clean_limit:
reg.delete()
elif reg.regkey in config and config["regkey"] != reg.regkey:
reg.delete()
else:
reg.local_service = local
reg.host = config["host"]
reg.port = config["port"]
reg.status = "CONNECTED"
# srv.service_list = config.service_list
reg.save(do_update=True)
reg.add_services(config["service_list"])
# srv.save()
# if local:
# ServiceRegistry.local_network.add_registration(srv)
reg.join_network(ServiceRegistry.get_network_name(local, config))
registration = reg.get_registration()
print(f"3:{registration}")
return registration
except ServiceRegistration.DoesNotExist:
pass
# create new Reg here don't want to pass back to add ang get caught in loop?
reg = ServiceRegistration(
local_service=local,
host=config["host"],
port=config["port"],
status="CONNECTED"
# service_list = config.service_list
)
reg.add_services(config["service_list"])
reg.save(do_update=True)
reg.join_network(ServiceRegistry.get_network_name(local, config))
registration = reg.get_registration()
print(f"4:{registration}")
return registration
@staticmethod
async def unregister(local=True, config=None):
await ServiceRegistry.unregister_no_wait(local, config)
@staticmethod
@database_sync_to_async
def unregister_no_wait(local=True, config=None):
if config:
try:
srv = ServiceRegistration.objects.get(
host=config["host"], port=config["port"]
)
srv.delete()
except ServiceRegistration.DoesNotExist:
pass
# def ping(local=True, regkey=None, config=None):
@staticmethod
async def ping(local=True, config=None):
await ServiceRegistry.ping_no_wait(local, config)
@staticmethod
@database_sync_to_async
def ping_no_wait(local=True, config=None):
# theoretically, we should not be pinging local here
# if not regkey and config and (regkey in config):
# regkey = config["regkey"]
# if regkey:
if config:
try:
reg = ServiceRegistration.objects.get(
host=config["host"], port=config["port"]
)
reg.status = "CONNECTED"
# srv = ServiceRegistration.objects.get(regkey=config["regkey"])
reg.save(do_update=True) # update modified time stamp
except ServiceRegistration.DoesNotExist:
pass
@staticmethod
def get_network_name(local=True, config=None):
if local:
name = ServiceRegistry.local_network.name
elif config:
name = "default"
try:
name = config["network"]
except KeyError:
pass
return name
@staticmethod
@database_sync_to_async
def clean_registrations():
# regs = database_sync_to_async(ServiceRegistration.objects.filter)(
# network=ServiceRegistry.local_network
# )
regs = ServiceRegistration.objects.filter(
local_service=False, network=ServiceRegistry.local_network
)
# regs = None
# print(regs)
# return regs
for reg in regs:
# print(f"status: {reg}, age: {reg.get_age()}")
# print(f"check status: {reg}")
if reg.get_age() > ServiceRegistry.auto_clean_limit:
print(f"removing registration for {reg} due to auto timeout")
reg.delete()
elif reg.get_age() > ServiceRegistry.disconnected_service_limit:
reg.status = "DISCONNECTED"
print(reg.status)
reg.save()
# @sync_to_async
@staticmethod
async def check_status():
# print(tmp)
print("check_status")
while True:
await ServiceRegistry.clean_registrations()
# regs = await database_sync_to_async(ServiceRegistration.objects.filter(
# network=ServiceRegistry.local_network
# ))
# regs = await ServiceRegistry.get_all_registrations()
# print(regs)
# for reg in regs:
# print(f'check status: {reg.name}')
# if reg.get_age() > ServiceRegistry.auto_clean_limit:
# print(f"removing registration for {reg.name} due to auto timeout")
# reg.delete()
# elif reg.get_() > ServiceRegistry.disconnected_service_limit:
# reg.status = "DISCONNECTED"
# print("tick")
await asyncio.sleep(2)
class DAQRegistry:
# number of seconds before daq_server is considered
# disconnected
# disconnected_service_limit = 60
disconnected_limit = 10
# number of seconds before daq_server is removed
# from registry
auto_clean_limit = 600 # 10 minutes
local_network = None
run_state = "STOPPED"
# @staticmethod
# async def start(network="default_network"):
# await ServiceRegistry.start_no_wait(network)
@staticmethod
async def start():
print("starting daq registry")
# await DAQRegistry.start_registry()
# loop=asyncio.get_event_loop()
# print(f"registry: {loop}")
if DAQRegistry.run_state != "RUNNING":
while not ServiceRegistry.local_network:
print("waiting for service registry to spin up")
await asyncio.sleep(0.5)
DAQRegistry.local_network = ServiceRegistry.local_network
await DAQRegistry.clear()
asyncio.create_task(DAQRegistry.check_status())
DAQRegistry.run_state = "RUNNING"
# regs = await ServiceRegistry.get_all_registrations()
# print(regs)
# print(f"all reg: {ServiceRegistry.get_all_registrations()}")
@staticmethod
async def clear():
await DAQRegistry.clear_no_wait()
@staticmethod
@database_sync_to_async
def clear_no_wait():
regs = DAQRegistration.objects.all()
for reg in regs:
reg.delete()
# @staticmethod
# @database_sync_to_async
# def start_registry():
# print("starting registry")
# try:
# net = Network.objects.get(name=network)
# net.activate()
# except Network.MultipleObjectsReturned:
# result = Network.objects.filter(name=network)
# for s in result:
# s.delete()
# except Network.DoesNotExist:
# net = Network(name=network)
# # net = Network(name=network)
# net.save()
# net.activate()
# ServiceRegistry.local_network = net
# # asyncio.create_task(ServiceRegistry.check_status())
# # ServiceRegistry.run_state = "RUNNING"
# # start broadcasting
# # start housekeeping checks
# # if ServiceRegistry.run_state == "STOPPED":
# # ServiceRegistry.start_checks()
# # ServiceRegistry.run_state = "RUNNING"
# # return net
# def start_checks():
# # loop = asyncio.get_event_loop()
# # task = asyncio.ensure_future(ServiceRegistry.check_status())
# # print(task)
# # loop.run_until_complete(task)
# asyncio.create_task(DAQRegistry.check_status())
# async def register(
# reg_id="default",
# reg_id2=None,
# namespace={},
# type="DAQServer",
# config={},
# config2={},
# ):
@staticmethod
async def register(
reg_id=Namespace().get_namespace_sig(),
namespace=Namespace().to_dict(),
type=Namespace.DAQSERVER,
config=dict(),
):
# if not reg_id2:
# reg_id2 = Namespace().get_namespace_sig()
# registration = await DAQRegistry.register_no_wait(
# reg_id=reg_id,
# reg_id2=reg_id2,
# namespace=namespace,
# type=type,
# config=config,
# config2=config2,
# )
registration = await DAQRegistry.register_no_wait(
reg_id=reg_id,
namespace=namespace,
type=type,
config=config,
)
# if not registration:
# registration = await DAQRegistry.update_registration(namespace, type, config)
return registration
# @database_sync_to_async
# def register_no_wait(
# reg_id="default",
# reg_id2=None,
# namespace={},
# type="DAQServer",
# config={},
# config2={},
# ):
@staticmethod
@database_sync_to_async
def register_no_wait(
reg_id=Namespace().get_namespace_sig(),
namespace=Namespace().to_dict(),
type=Namespace.DAQSERVER,
config=dict(),
):
# print(f"register daq: {config}")
# if config:
# print(config["host"])
# if not reg_id2:
# reg_id2 = Namespace().get_namespace_sig()
try:
# print(f'{config["HOST"]}, {config["PORT"]}')
# registration = DAQRegistration.objects.get(reg_id2=reg_id2, daq_type=type)
registration = DAQRegistration.objects.get(reg_id=reg_id, daq_type=type)
if registration:
registration.delete()
except DAQRegistration.MultipleObjectsReturned:
# result = DAQRegistration.objects.filter(reg_id2=reg_id2, daq_type=type)
result = DAQRegistration.objects.filter(reg_id=reg_id, daq_type=type)
for s in result:
s.delete()
except DAQRegistration.DoesNotExist:
pass
network = "default"
# if local:
# network = ServiceRegistry.local_network.name
# else:
# try:
# network = config["network"]
# except KeyError:
# pass # defaults to "default"
# create new Reg
# registration = DAQRegistration(
# reg_id=reg_id,
# reg_id2=reg_id2,
# namespace=namespace,
# daq_type=type,
# config=config,
# config2=config2,
# status="CONNECTED",
# )
status2 = Status()
status2.set_connection_status(Status.CONNECTED)
print(f"register: {reg_id}, {namespace}")
registration = DAQRegistration(
reg_id=reg_id,
namespace=namespace,
daq_type=type,
config=config,
status="CONNECTED",
status2=status2.to_dict(),
)
registration.save()
# TODO: update service definition to include this reg
registration = registration.get_registration()
return registration
# @staticmethod
# async def update_registration(
# reg_id="default",
# reg_id2=Namespace().get_namespace_sig(),
# namespace={},
# type="DAQServer",
# registration=None,
# ):
@staticmethod
async def update_registration(
reg_id=Namespace().get_namespace_sig(),
namespace=Namespace().to_dict(),
type=Namespace.DAQSERVER,
config=dict(),
registration=None,
):
# reg = await DAQRegistry.update_registration_no_wait(
# reg_id=reg_id,
# reg_id2=reg_id2,
# namespace=namespace,
# type=type,
# registration=registration,
# )
reg = await DAQRegistry.update_registration_no_wait(
reg_id=reg_id,
namespace=namespace,
type=type,
registration=registration,
)
return reg
# @staticmethod
# @database_sync_to_async
# def update_registration_no_wait(
# reg_id="default",
# reg_id2=Namespace().get_namespace_sig(),
# namespace={},
# type="DAQServer",
# registration=None,
# ):
@staticmethod
@database_sync_to_async
def update_registration_no_wait(
reg_id=Namespace().get_namespace_sig(),
namespace=Namespace().to_dict(),
type=Namespace.DAQSERVER,
config=dict(),
registration=None,
):
# if config:
# network = "default"
# if local:
# network = ServiceRegistry.local_network.name
# else:
# try:
# network = config["network"]
# except KeyError:
# pass # defaults to "default"
try:
# srv = ServiceRegistration.objects.get(regkey=config["regkey"])
# reg = DAQRegistration.objects.get(reg_id2=reg_id2, daq_type=type)
reg = DAQRegistration.objects.get(reg_id=reg_id, daq_type=type)
except DAQRegistration.DoesNotExist:
reg = None
# if reg.get_age() > DAQRegistry.auto_clean_limit:
# reg.delete()
# elif config and reg.regkey in config and config["regkey"] != reg.regkey:
# reg.delete()
# else:
config = {}
# regkey = None
if registration:
config = registration["config"]
# config2 = registration["config2"]
# regkey = registration["regkey"]
if not reg:
# reg = DAQRegistration(
# reg_id2=reg_id2,
# namespace=namespace,
# daq_type=type,
# config=config,
# config2=config2,
# )
reg = DAQRegistration(
reg_id=reg_id,
namespace=namespace,
daq_type=type,
config=config,
)
if reg:
# reg.reg_id2 = reg_id2
reg.reg_id = reg_id
reg.namespace = namespace
reg.daq_type = type
reg.config = config
# reg.config2 = config2
reg.status = "CONNECTED"
status2 = Status()
status2.set_connection_status(Status.CONNECTED)
reg.status2 = status2.to_dict()
# if regkey:
# reg.regkey = regkey
# srv.service_list = config.service_list
reg.save(do_update=True)
# TODO: update service
# reg.add_services(config["service_list"])
# srv.save()
# if local:
# ServiceRegistry.local_network.add_registration(srv)
# reg.join_network(ServiceRegistry.get_network_name(local, config))
return reg.get_registration()
# print(f"3:{registration}")
# return registration
else:
return None
# # create new Reg here don't want to pass back to add ang get caught in loop?
# reg = DAQRegistration(
# namespace=namespace, daq_type=type, config=config, status="CONNECTED"
# )
# reg.save()
# # TODO: update service
# registration = reg.get_registration()
# # print(f"4:{registration}")
# return registration
# @staticmethod
# async def unregister(
# reg_id="default", reg_id2=Namespace().get_namespace_sig(), type="DAQServer"
# ):
# await DAQRegistry.unregister_no_wait(reg_id=reg_id, reg_id2=reg_id2, type=type)
@staticmethod
async def unregister(
reg_id=Namespace().get_namespace_sig(), type=Namespace.DAQSERVER
):
await DAQRegistry.unregister_no_wait(reg_id=reg_id, type=type)
# @staticmethod
# @database_sync_to_async
# def unregister_no_wait(
# reg_id="default", reg_id2=Namespace().get_namespace_sig(), type="DAQServer"
# ):
@staticmethod
@database_sync_to_async
def unregister_no_wait(
reg_id=Namespace().get_namespace_sig(), type=Namespace.DAQSERVER
):
try:
# print(f"unregister:{reg_id}")
# reg = DAQRegistration.objects.get(reg_id2=reg_id2, daq_type=type)
reg = DAQRegistration.objects.get(reg_id=reg_id, daq_type=type)
# print(f"Unregistering: {reg}")
reg.delete()
# print(f"success")
except DAQRegistration.DoesNotExist:
# print(f"unregister: reg_id does not exist {reg_id}")
pass
# @staticmethod
# async def get_registration(reg_id="default", reg_id2=Namespace().get_namespace_sig(), type="DAQServer"):
# registration = await DAQRegistry.get_registration_no_wait(reg_id=reg_id, reg_id2=reg_id2, type=type)
# return registration
@staticmethod
async def get_registration(
reg_id=Namespace().get_namespace_sig(), type=Namespace.DAQSERVER
):
# registration = await DAQRegistry.get_registration_no_wait(
# reg_id=reg_id, reg_id2=reg_id2, type=type
# )
registration = await DAQRegistry.get_registration_no_wait(
reg_id=reg_id, type=type
)
return registration
# @staticmethod
# @database_sync_to_async
# def get_registration_no_wait(
# reg_id="default", reg_id2=Namespace().get_namespace_sig(), type="DAQServer"
# ):
@staticmethod
@database_sync_to_async
def get_registration_no_wait(
reg_id=Namespace().get_namespace_sig(), type=Namespace.DAQSERVER
):
# theoretically, we should not be pinging local here
# if not regkey and config and (regkey in config):
# regkey = config["regkey"]
# if regkey:
print(f"get_reg: {reg_id}")
try:
# reg = DAQRegistration.objects.get(reg_id2=reg_id2, daq_type=type)
reg = DAQRegistration.objects.get(reg_id=reg_id, daq_type=type)
# reg.status = "CONNECTED"
# srv = ServiceRegistration.objects.get(regkey=config["regkey"])
# reg.save(do_update=True) # update modified time stamp
return reg.get_registration()
except DAQRegistration.DoesNotExist:
pass
return None
# def ping(local=True, regkey=None, config=None):
# @staticmethod
# async def ping(
# reg_id="default", reg_id2=Namespace().get_namespace_sig(), type="DAQServer"
# ):
# await DAQRegistry.ping_no_wait(reg_id, type)
@staticmethod
async def ping(
reg_id=Namespace().get_namespace_sig(), type=Namespace.DAQSERVER
):
await DAQRegistry.ping_no_wait(reg_id, type)
# @staticmethod
# @database_sync_to_async
# def ping_no_wait(
# reg_id="default", reg_id2=Namespace().get_namespace_sig(), type="DAQServer"
# ):
@staticmethod
@database_sync_to_async
def ping_no_wait(
reg_id=Namespace().get_namespace_sig(), type=Namespace.DAQSERVER
):
# theoretically, we should not be pinging local here
# if not regkey and config and (regkey in config):
# regkey = config["regkey"]
# if regkey:
try:
# print(f"ping server reg: {reg_id}")
# reg = DAQRegistration.objects.get(reg_id2=reg_id2, daq_type=type)
reg = DAQRegistration.objects.get(reg_id=reg_id, daq_type=type)
reg.status = "CONNECTED"
status2 = Status().from_dict(reg.status2)
status2.set_connection_status(Status.CONNECTED)
reg.status2 = status2.to_dict()
# srv = ServiceRegistration.objects.get(regkey=config["regkey"])
reg.save(do_update=True) # update modified time stamp
# print(f"ping success")
except DAQRegistration.DoesNotExist:
pass
@staticmethod
async def get_registry(type=Namespace.DAQSERVER):
return await DAQRegistry.get_registry_no_wait(type=type)
@staticmethod
@database_sync_to_async
def get_registry_no_wait(type=Namespace.DAQSERVER):
try:
regs = DAQRegistration.objects.filter(daq_type=type)
# print(f"regs: {regs}")
except DAQRegistration.DoesNotexist:
# TODO: return 404 ... lookup how
pass
regs = []
daq_registration_map = {}
if regs:
for reg in regs:
# id2 = reg.reg_id2
print(f"reg: {reg}")
daq_registration_map[f"{reg}"] = reg.get_registration()
return daq_registration_map
@staticmethod
async def check_status():
# print(tmp)
print("check_status")
while True:
await DAQRegistry.clean_registrations()
# regs = await database_sync_to_async(ServiceRegistration.objects.filter(
# network=ServiceRegistry.local_network
# ))
# regs = await ServiceRegistry.get_all_registrations()
# print(regs)
# for reg in regs:
# print(f'check status: {reg.name}')
# if reg.get_age() > ServiceRegistry.auto_clean_limit:
# print(f"removing registration for {reg.name} due to auto timeout")
# reg.delete()
# elif reg.get_() > ServiceRegistry.disconnected_service_limit:
# reg.status = "DISCONNECTED"
# print("tick")
await asyncio.sleep(2)
@staticmethod
@database_sync_to_async
def clean_registrations():
# regs = database_sync_to_async(ServiceRegistration.objects.filter)(
# network=ServiceRegistry.local_network
# )
# regs = DAQRegistration.objects.filter(
# local_service=False, network=ServiceRegistry.local_network
# )
regs = DAQRegistration.objects.all()
# regs = None
print(f"cleaning regs: {regs}")
# return regs
for reg in regs:
print(f"status: {reg}, age: {reg.get_age()}")
# print(f"check status: {reg}")
if reg.get_age() > DAQRegistry.auto_clean_limit:
print(f"removing registration for {reg} due to auto timeout")
reg.delete()
elif reg.get_age() > DAQRegistry.disconnected_limit:
reg.status = "DISCONNECTED"
status2 = Status().from_dict(reg.status2)
status2.set_connection_status(Status.NOT_CONNECTED)
print(reg.status)
reg.save()
| 35.020681
| 110
| 0.577552
| 2,865
| 28,787
| 5.608377
| 0.067016
| 0.020538
| 0.013941
| 0.029562
| 0.823625
| 0.779375
| 0.746079
| 0.71882
| 0.700523
| 0.669032
| 0
| 0.005264
| 0.326849
| 28,787
| 821
| 111
| 35.063337
| 0.823924
| 0.365755
| 0
| 0.637255
| 0
| 0
| 0.039891
| 0
| 0
| 0
| 0
| 0.001218
| 0
| 1
| 0.039216
| false
| 0.026961
| 0.02451
| 0
| 0.132353
| 0.044118
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3ca7c24956f1449d19480d48467dc05529a44e06
| 21
|
py
|
Python
|
nanome/_internal/_volumetric/_io/__init__.py
|
rramji/nanome-lib
|
2806598af31cfb4bb6e16366f0b300d2ddcc9c13
|
[
"MIT"
] | null | null | null |
nanome/_internal/_volumetric/_io/__init__.py
|
rramji/nanome-lib
|
2806598af31cfb4bb6e16366f0b300d2ddcc9c13
|
[
"MIT"
] | null | null | null |
nanome/_internal/_volumetric/_io/__init__.py
|
rramji/nanome-lib
|
2806598af31cfb4bb6e16366f0b300d2ddcc9c13
|
[
"MIT"
] | null | null | null |
from . import _em_map
| 21
| 21
| 0.809524
| 4
| 21
| 3.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 21
| 1
| 21
| 21
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
596ff737a198e40790c330402cf8d51db960b0d1
| 25
|
py
|
Python
|
src/gui/__init__.py
|
Airthee/LightshotSniffer
|
466abb28710c9401e6f3fd0e1d61776511f936bc
|
[
"WTFPL"
] | 1
|
2020-04-30T08:32:26.000Z
|
2020-04-30T08:32:26.000Z
|
src/gui/__init__.py
|
Airthee/LightshotSniffer
|
466abb28710c9401e6f3fd0e1d61776511f936bc
|
[
"WTFPL"
] | 12
|
2019-06-18T06:04:58.000Z
|
2022-01-13T01:20:36.000Z
|
src/gui/__init__.py
|
Airthee/LightshotSniffer
|
466abb28710c9401e6f3fd0e1d61776511f936bc
|
[
"WTFPL"
] | 3
|
2019-06-18T06:12:53.000Z
|
2020-12-03T08:46:44.000Z
|
from .lsswindow import *
| 12.5
| 24
| 0.76
| 3
| 25
| 6.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 25
| 1
| 25
| 25
| 0.904762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
59722ce3fb1d4a273cd95ac699ae4274f0b2b163
| 112
|
py
|
Python
|
orb_simulator/orbsim_language/orbsim_ast/start_sim_node.py
|
dmguezjaviersnet/IA-Sim-Comp-Project
|
8165b9546efc45f98091a3774e2dae4f45942048
|
[
"MIT"
] | 1
|
2022-01-19T22:49:09.000Z
|
2022-01-19T22:49:09.000Z
|
orb_simulator/orbsim_language/orbsim_ast/start_sim_node.py
|
dmguezjaviersnet/IA-Sim-Comp-Project
|
8165b9546efc45f98091a3774e2dae4f45942048
|
[
"MIT"
] | 15
|
2021-11-10T14:25:02.000Z
|
2022-02-12T19:17:11.000Z
|
orb_simulator/orbsim_language/orbsim_ast/start_sim_node.py
|
dmguezjaviersnet/IA-Sim-Comp-Project
|
8165b9546efc45f98091a3774e2dae4f45942048
|
[
"MIT"
] | null | null | null |
from orbsim_language.orbsim_ast.statement_node import StatementNode
class StartSimNode(StatementNode):
pass
| 28
| 67
| 0.857143
| 13
| 112
| 7.153846
| 0.846154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.098214
| 112
| 4
| 68
| 28
| 0.920792
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
5988d7946471e74d4a8d62bff737f7c847bd909a
| 4,053
|
py
|
Python
|
spvcm/ops.py
|
weikang9009/spvcm
|
00ec35331e0e1a67bcd841a6b3761a23099617f7
|
[
"MIT"
] | 14
|
2017-06-20T18:39:04.000Z
|
2021-03-27T02:21:46.000Z
|
spvcm/ops.py
|
weikang9009/spvcm
|
00ec35331e0e1a67bcd841a6b3761a23099617f7
|
[
"MIT"
] | 12
|
2018-05-11T11:13:21.000Z
|
2020-02-07T14:23:12.000Z
|
spvcm/ops.py
|
weikang9009/spvcm
|
00ec35331e0e1a67bcd841a6b3761a23099617f7
|
[
"MIT"
] | 8
|
2017-05-20T00:55:40.000Z
|
2020-07-02T14:52:49.000Z
|
import scipy.sparse as spar
import scipy.sparse.linalg as spla
import theano.tensor as tt
import theano.sparse as ts
import theano as th
from theano.gof import Apply
from theano import Op
import numpy as np
import theano as th
# define this as if it's in terms of rho and W, and give the derivatives in
# terms of rho, since that's what the graph is expecting
class Sparse_LapDet(Op):
"""Sparse Matrix Determinant of a Laplacian Matrix using Sparse LU
Decomposition"""
def __init__(self, W):
self.W = spar.csc_matrix(W)
self.I = spar.identity(W.shape[0]).tocsc()
self.Id = self.I.toarray()
self.Wd = self.W.toarray()
def make_node(self, rho):
rho = tt.as_tensor(rho)
ld = tt.scalar(dtype=rho.dtype)
return Apply(self, [rho], [ld])
def perform(self, node, inputs, outputs):
(rho,) = inputs
(z, ) = outputs
rW = rho * self.W
A = self.I - rW
Ud = spla.splu(A).U.diagonal()
ld = np.asarray(np.sum(np.log(np.abs(Ud))))
z[0] = ld
def grad(self, inputs, g_outputs):
(rho, ) = inputs
(gz,) = g_outputs
A = self.Id - tt.mul(rho, self.Wd)
dinv = tt.nlinalg.matrix_inverse(A).T
out = tt.mul(dinv, - self.Wd)
return [tt.as_tensor(tt.sum(tt.mul(out, gz)), ndim=1)]
# define this as if it's in terms of rho and W, and give the derivatives in
# terms of rho, since that's what the graph is expecting
class Sparse_AGrad_LapDet(Op):
"""Sparse Matrix Determinant of a Laplacian Matrix using Sparse LU
Decomposition"""
def __init__(self, W):
self.W = spar.csc_matrix(W)
self.WW = W.dot(W)
self.WWW = self.WW.dot(W)
self.I = spar.identity(W.shape[0]).tocsc()
self.Id = self.I.toarray()
self.Wd = self.W.toarray()
def make_node(self, rho):
rho = tt.as_tensor(rho)
ld = tt.scalar(dtype=rho.dtype)
return Apply(self, [rho], [ld])
def perform(self, node, inputs, outputs):
(rho,) = inputs
(z, ) = outputs
rW = rho * self.W
A = self.I - rW
Ud = spla.splu(A).U.diagonal()
ld = np.asarray(np.sum(np.log(np.abs(Ud))))
z[0] = ld
def grad(self, inputs, g_outputs):
(rho, ) = inputs
(gz,) = g_outputs
A = self.Id - tt.mul(rho, self.Wd)
dinv = self.I + ts.mul_s_d(self.W, rho)
dinv +=ts.mul_s_d(self.WW, rho**2)
dinv +=ts.mul_s_d(self.WWW, rho**3)
out = tt.mul(dinv, - self.Wd)
return [tt.as_tensor(tt.sum(tt.mul(out, gz)), ndim=1)]
class Dense_LULogDet(Op):
"""Log Determinant of a matrix by sparse LU decomposition,
from dense inputs. Use when casting has no significant overhead."""
def make_node(self, A):
A = tt.as_tensor(A)
ld = tt.scalar(dtype=A.dtype)
return Apply(self, [A], [ld])
def perform(self, node, inputs, outputs):
(A,) = inputs
(z,) = outputs
As = spar.csc_matrix(A)
Ud = spla.splu(As).U.diagonal()
ld = np.sum(np.log(np.abs(Ud)))
z[0] = ld
def grad(self, inputs, g_outputs):
[gz] = g_outputs
[A] = inputs
dinv = tt.nlinalg.matrix_inverse(A).T
dout = tt.dot(gz, dinv)
return [dout]
class Dense_LogDet(Op):
"""Log Determinant of a matrix using numpy.linalg.slogdet.
Use as a reference implementation"""
def make_node(self, A):
A = tt.as_tensor(A)
ld = tt.scalar(dtype=A.dtype)
return Apply(self, [A], [ld])
def perform(self, node, inputs, outputs):
(A,) = inputs
(z,) = outputs
sgn, ld = np.linalg.slogdet(A)
if sgn not in [-1,0,1]:
raise Exception('Loss of precision in log determinant')
ld *= sgn
z[0] = ld
def grad(self, inputs, g_outputs):
[gz] = g_outputs
[A] = inputs
dinv = tt.nlinalg.matrix_inverse(A).T
dout = tt.dot(gz, dinv)
return [dout]
| 31.664063
| 75
| 0.572909
| 622
| 4,053
| 3.663987
| 0.188103
| 0.019746
| 0.026327
| 0.021062
| 0.77183
| 0.767003
| 0.7319
| 0.721369
| 0.721369
| 0.721369
| 0
| 0.004528
| 0.291636
| 4,053
| 127
| 76
| 31.913386
| 0.789272
| 0.154207
| 0
| 0.77
| 0
| 0
| 0.010645
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.14
| false
| 0
| 0.09
| 0
| 0.35
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
59be6ef2e827a13e81112509c8fb433ce1b2cb25
| 92
|
py
|
Python
|
wpa_project/membership/views/__init__.py
|
s-amundson/wpa_2p1
|
43deb859123e5ef2eab3652e403c8d2f53d43b77
|
[
"MIT"
] | 1
|
2022-01-03T02:46:34.000Z
|
2022-01-03T02:46:34.000Z
|
wpa_project/membership/views/__init__.py
|
s-amundson/wpa_2p1
|
43deb859123e5ef2eab3652e403c8d2f53d43b77
|
[
"MIT"
] | 31
|
2021-12-29T17:43:06.000Z
|
2022-03-25T01:03:17.000Z
|
wpa_project/membership/views/__init__.py
|
s-amundson/wpa_2p1
|
43deb859123e5ef2eab3652e403c8d2f53d43b77
|
[
"MIT"
] | null | null | null |
from .level_view import LevelApiView, LevelView
from .membership_view import MembershipView
| 30.666667
| 47
| 0.869565
| 11
| 92
| 7.090909
| 0.727273
| 0.25641
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097826
| 92
| 2
| 48
| 46
| 0.939759
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
59cf6604e332531c6c5efa4d6d9ed9bc1d32febe
| 77
|
py
|
Python
|
optim/__init__.py
|
jpeg729/pytorch-bits
|
5d107094042c27472dfb7dee77506b603f5d3e45
|
[
"MIT"
] | 73
|
2017-12-29T14:43:16.000Z
|
2021-08-13T02:20:33.000Z
|
optim/__init__.py
|
jpeg729/pytorch-bits
|
5d107094042c27472dfb7dee77506b603f5d3e45
|
[
"MIT"
] | null | null | null |
optim/__init__.py
|
jpeg729/pytorch-bits
|
5d107094042c27472dfb7dee77506b603f5d3e45
|
[
"MIT"
] | 5
|
2017-12-30T14:07:39.000Z
|
2021-08-13T02:20:34.000Z
|
from .cocob import COCOB
from .adam_hd import Adam_HD, Adam_HD_lr_per_param
| 19.25
| 50
| 0.831169
| 15
| 77
| 3.866667
| 0.533333
| 0.310345
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12987
| 77
| 3
| 51
| 25.666667
| 0.865672
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
59d47b26e3df82f245286d8bcdc55944f48ad58f
| 39
|
py
|
Python
|
zipf/factories/zipf_from_url/__init__.py
|
LucaCappelletti94/zipf
|
956c3a1d56958384a02d5bb4671c6883cd9a25e3
|
[
"MIT"
] | 3
|
2018-11-07T01:56:09.000Z
|
2020-05-31T12:24:09.000Z
|
zipf/factories/zipf_from_url/__init__.py
|
LucaCappelletti94/zipf
|
956c3a1d56958384a02d5bb4671c6883cd9a25e3
|
[
"MIT"
] | 1
|
2018-05-15T15:58:06.000Z
|
2018-05-15T15:58:06.000Z
|
zipf/factories/zipf_from_url/__init__.py
|
LucaCappelletti94/zipf
|
956c3a1d56958384a02d5bb4671c6883cd9a25e3
|
[
"MIT"
] | null | null | null |
from .zipf_from_url import ZipfFromUrl
| 19.5
| 38
| 0.871795
| 6
| 39
| 5.333333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102564
| 39
| 1
| 39
| 39
| 0.914286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ab6fb41dbd60bf1d4f3293db88733722b676151d
| 6,894
|
py
|
Python
|
dual_encoder/model_utils_test.py
|
garyxcheng/federated
|
ba7133ead6127af71ea9356e26bfd05c02f8324a
|
[
"Apache-2.0"
] | 330
|
2020-09-14T23:10:16.000Z
|
2022-03-30T19:49:19.000Z
|
dual_encoder/model_utils_test.py
|
garyxcheng/federated
|
ba7133ead6127af71ea9356e26bfd05c02f8324a
|
[
"Apache-2.0"
] | 52
|
2020-09-30T06:10:51.000Z
|
2022-03-31T19:25:16.000Z
|
dual_encoder/model_utils_test.py
|
garyxcheng/federated
|
ba7133ead6127af71ea9356e26bfd05c02f8324a
|
[
"Apache-2.0"
] | 119
|
2020-09-24T04:54:46.000Z
|
2022-03-31T21:46:57.000Z
|
# Copyright 2021, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import absltest
import tensorflow as tf
from dual_encoder import model_utils as utils
class UtilsTest(absltest.TestCase):
def test_get_predicted_embeddings_with_l2_normalize(self):
y_pred = tf.constant(
[[1, 0],
[1.0, 1.0],
[1, 1],
[0, 1],
[1, 0],
[1, 0]]
)
y_true = tf.constant([[2], [3]])
context_embeddings, label_embeddings = utils.get_predicted_embeddings(
y_pred, y_true, normalization_fn=utils.l2_normalize_fn)
expected_context_embeddings = tf.constant(
[[1.0, 0.0],
[0.7071067, 0.7071067]]
)
expected_label_embeddings = tf.constant(
[[0.7071067, 0.7071067],
[0.0, 1.0],
[1.0, 0.0],
[1.0, 0.0]]
)
tf.debugging.assert_near(context_embeddings, expected_context_embeddings)
tf.debugging.assert_near(label_embeddings, expected_label_embeddings)
def test_get_predicted_embeddings_without_normalization(self):
y_pred = tf.constant(
[[1, 0],
[1.0, 1.0],
[1, 1],
[0, 1],
[1, 0],
[1, 0]]
)
y_true = tf.constant([[2], [3]])
context_embeddings, label_embeddings = utils.get_predicted_embeddings(
y_pred, y_true, normalization_fn=None)
expected_context_embeddings = tf.constant(
[[1.0, 0.0],
[1.0, 1.0]]
)
expected_label_embeddings = tf.constant(
[[1.0, 1.0],
[0.0, 1.0],
[1.0, 0.0],
[1.0, 0.0]]
)
tf.debugging.assert_near(context_embeddings, expected_context_embeddings)
tf.debugging.assert_near(label_embeddings, expected_label_embeddings)
def test_get_embeddings_and_similarities(self):
y_pred = tf.constant(
[[1, 0],
[1.0, 1.0],
[1, 1],
[0, 1],
[1, 0],
[1, 0]]
)
y_true = tf.constant([[2], [3]])
context_embeddings, label_embeddings, similarities = (
utils.get_embeddings_and_similarities(y_pred, y_true))
expected_context_embeddings = tf.constant(
[[1.0, 0.0],
[0.7071067, 0.7071067]]
)
expected_label_embeddings = tf.constant(
[[0.7071067, 0.7071067],
[0.0, 1.0],
[1.0, 0.0],
[1.0, 0.0]]
)
expected_similarities = tf.constant(
[[0.7071067, 0.0, 1.0, 1.0],
[1.0, 0.7071067, 0.7071067, 0.7071067]]
)
tf.debugging.assert_near(context_embeddings, expected_context_embeddings)
tf.debugging.assert_near(label_embeddings, expected_label_embeddings)
tf.debugging.assert_near(similarities, expected_similarities)
def test_get_embeddings_and_similarities_dot_product(self):
y_pred = tf.constant(
[[1, 0],
[1.0, 1.0],
[1, 1],
[0, 1],
[1, 0],
[1, 0]]
)
y_true = tf.constant([[2], [3]])
context_embeddings, label_embeddings, similarities = (
utils.get_embeddings_and_similarities(
y_pred, y_true, normalization_fn=None))
expected_context_embeddings = tf.constant(
[[1.0, 0.0],
[1.0, 1.0]]
)
expected_label_embeddings = tf.constant(
[[1.0, 1.0],
[0.0, 1.0],
[1.0, 0.0],
[1.0, 0.0]]
)
expected_similarities = tf.constant(
[[1.0, 0.0, 1.0, 1.0],
[2.0, 1.0, 1.0, 1.0]]
)
tf.debugging.assert_near(context_embeddings, expected_context_embeddings)
tf.debugging.assert_near(label_embeddings, expected_label_embeddings)
tf.debugging.assert_near(similarities, expected_similarities)
def test_get_embeddings_and_similarities_similarity(self):
y_pred = tf.constant(
[[0.7071067, 0.0, 1.0, 1.0],
[1.0, 0.7071067, 0.7071067, 0.7071067]]
)
y_true = tf.constant([[2], [3]])
context_embeddings, label_embeddings, similarities = (
utils.get_embeddings_and_similarities(
y_pred, y_true, expect_embeddings=False))
expected_similarities = tf.constant(
[[0.7071067, 0.0, 1.0, 1.0],
[1.0, 0.7071067, 0.7071067, 0.7071067]]
)
self.assertIsNone(context_embeddings)
self.assertIsNone(label_embeddings)
tf.debugging.assert_near(similarities, expected_similarities)
def test_similarities(self):
similarities_layer = utils.Similarities()
context_embedding = tf.constant(
[[1, 2, 3],
[4.0, 5.0, 6.0],
[1, 1, 1],
[1, 1, 1],
[1, 1, 2]])
label_embedding = tf.constant(
[[1, 2, 3],
[4.0, 5.0, 6.0],
[1, 1, 1],
[1, 1, 1],
[-1, -2, -3]])
similarities = similarities_layer([context_embedding, label_embedding])
expected_similarities = tf.constant(
[[0.9999999, 0.97463185, 0.92582005, 0.92582005, -0.9999999],
[0.97463185, 1.0000001, 0.98692757, 0.98692757, -0.97463185],
[0.92582005, 0.98692757, 0.99999994, 0.99999994, -0.92582005],
[0.92582005, 0.98692757, 0.99999994, 0.99999994, -0.92582005],
[0.98198044, 0.9770084, 0.942809, 0.942809, -0.98198044]])
tf.debugging.assert_near(expected_similarities, similarities)
# Also make sure layer.call works.
similarities = similarities_layer.call([context_embedding, label_embedding])
tf.debugging.assert_near(expected_similarities, similarities)
def test_similarities_cosine_similarity(self):
similarities_layer = utils.Similarities(normalization_fn=None)
context_embedding = tf.constant(
[[1, 2, 3],
[4.0, 5.0, 6.0],
[1, 1, 1],
[1, 1, 1],
[1, 1, 2]])
label_embedding = tf.constant(
[[1, 2, 3],
[4.0, 5.0, 6.0],
[1, 1, 1],
[1, 1, 1],
[-1, -2, -3]])
similarities = similarities_layer([context_embedding, label_embedding])
expected_similarities = tf.constant(
[[14.0, 32, 6, 6, -14],
[32, 77, 15, 15, -32],
[6, 15, 3, 3, -6],
[6, 15, 3, 3, -6],
[9, 21, 4, 4, -9]])
tf.debugging.assert_near(expected_similarities, similarities)
# Also make sure layer.call works.
similarities = similarities_layer.call([context_embedding, label_embedding])
tf.debugging.assert_near(expected_similarities, similarities)
if __name__ == '__main__':
absltest.main()
| 30.236842
| 80
| 0.6085
| 920
| 6,894
| 4.367391
| 0.148913
| 0.031359
| 0.032852
| 0.02887
| 0.776008
| 0.730463
| 0.724241
| 0.722748
| 0.722748
| 0.722748
| 0
| 0.132299
| 0.252248
| 6,894
| 227
| 81
| 30.370044
| 0.647139
| 0.089353
| 0
| 0.740113
| 0
| 0
| 0.001278
| 0
| 0
| 0
| 0
| 0
| 0.096045
| 1
| 0.039548
| false
| 0
| 0.016949
| 0
| 0.062147
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ab77eca2be9c4bf1e57da43a77d73a3d3c1d5cc0
| 16,354
|
py
|
Python
|
circlecitycon-2021/baby-meadows/decrypt.py
|
onealmond/hacking-lab
|
631e615944add02db3c2afef47bf1de7171eb065
|
[
"MIT"
] | 9
|
2021-04-20T15:28:36.000Z
|
2022-03-08T19:53:48.000Z
|
circlecitycon-2021/baby-meadows/decrypt.py
|
onealmond/hacking-lab
|
631e615944add02db3c2afef47bf1de7171eb065
|
[
"MIT"
] | null | null | null |
circlecitycon-2021/baby-meadows/decrypt.py
|
onealmond/hacking-lab
|
631e615944add02db3c2afef47bf1de7171eb065
|
[
"MIT"
] | 6
|
2021-06-24T03:25:21.000Z
|
2022-02-20T21:44:52.000Z
|
#!/usr/bin/env python3
import random
g = 2
p = 27364195027981999497713610818487324581721539250673346091482772282510011564291025136146660508795219128557701709138115267357713678480331088419744185203212905091957459339614224615778653860885782170033046504718076905119565522298014609547550378271686461734952043412349624686487216938013960045889798734400260364799100295448466592570911013504023215177896840800637603090083812014709824090261242248660158815056130074711884192533210954160640493418715631256186934347998434364604226848641806710955394434610063365365212141903932895031496716774342948538753169437093579587734530974202008110362571844942019259309141601530779113813093
ciphers = [18472861299310583021601399426562802039334442333653746143102913331632383236848137320685468674862539761853274346196377554144942848084441247264818407508046510439693950513152677271186731608218189999358692054624552932891866196743356084561395540854602777576940052850919972795701934484909277412088653318885893899490159918107843246856106075504950601976210117269067729260482465787454246220955148086001963537465796103243325975899282158583991978247605905533983851241321447545603789830756562668142428937792075172380546159574910423211294907285460547701379828625118042163769858173941236535012239057740669898042738084693883104085250, 24310152058130818637878789807698251906398343312195171281318431074246396307309270397607757432293875184357985953334715985129402036749046209606572767747534081024255692940749633756195077707697849846235635375709703195099354356896313808032544382392557776850969229431978294554811756239776458355874846602320973644566566092512693751652987261977134333607770666883552714829168923492941006709034405516486825702859135296199112422298020103679530488055278989440730567703520620583693239451687982336218345043427663854377655979441400577368099125719143522814135527612289686333840100783393628972511565030468563168409014343339656620789278, 8691279924066505242343771695454852326215242047408674973932208943326590018668780613415478494481693242542485456665549964536869313681642289207502833457191507964758268445988321426034341797724950749700168097045652107475980956170810838658542682255417191425912947817951268564727844634599675621065992782255878707377916617903397013971728059095081119407410031693408061305811739240487285448215742861910897248654139859274288105433317340820784279482003904661667827410358151728194956893534391240041923455764567582557562094577998060900314385280403381066971033768701041910991434367578687028387045677149272828383702030213654460574309, 5273423738165825346590448353850282599324610169138114467581605008473622024266018431941959707799183544183272978032677513172059316507204696248551889132240934336296069771855183425019809748386931634349311751073822662282702003914855805904775641203523125425884065492163653929889458067158669003649453078283211857553346246958526478773578844732198986989173999353253161415299390738841709588691268258555939328360897036897782163098987533753949947470680256581565370903369825108264268135245598044123601620653822897593703158622385318271655756981912238323644425140230889566217269551580662966793496308288916532188322113805182704979826, 20483361832609799263391740017396954437800061304642734328886008991564118267872538472995473949713556584053607665341673233675744282182283219701392975161649510365983190494730934007228411926014226952157645434134718000697092875695010393329143609151265568858931082756397637121454437633016314329735261996037292753645399804193087819658729828679941981541571766035773332800028676053094123996519031490180887183317124976370853997414838171919144579630174198011868528838364260386872184371478453630150883145331526782912531004861575887320342254288036421878212679377792720167155342575771379796029817375676897383673103442982600296359260, 204586843247940495988173590228442818039145913206194972854733627052747239357961462942783055709077751097896523267446680092384749077655460914565541296788957310866200131676030819486217635508423004341887065024322785580864464333908069086555423435267273134629343061169653808947093158195686127212136762474050820420052032984568087459787952020480350040986262029235513702568027737463045551915467015741278411379770324075398404912532876538571694629390436172791487668256951398238660268292311910350921611015972514381709608761173431769106064116028011147535563701459861710235425432733800577590360508127890743589348830963174240555336, 22333656925546931057903453694474633400335512519635280497032234199213986062853665351862277193715410931747342358324831791466144267788075967576920966707849440227605623999477208142198984029366350651528083060241915110223643514669041993137633322518752136753715622491798457798217327005515873707606743647920665433580861467326160339933052004963705977069609109090617230611974112476900424820033668577752959573764630796412316904140544030520471009167343802930455342019413234520363062921625245355390967952722156096843894074192470275949984197623864869233092429066560061140876088754736594413984925729888115033739464285557903617536544, 14639254649193709755005625809079793057650080600318428534638244203436584638498073415243283611954711483405732301738972142185007516946428158052597667014216297514863842431644475042158449379081802002140104645742031518539764115664888695421384444693219014651186712223193925463775746029247130467410333411384134863292601245288670296034426848828917939939361441762712476174912541196971530628395929417542646366280559628310990341848345067324244754147803631459777524927868305917995161774283099310254224936321211751074998771344594255176443358305725424203911997490742189405298479694323154976785578433668847402902498613369044166357510, 1091027895410113732168387600230947476997142404521422076528209450905011160693451044839634819221754362343356834139616072489907233884339771001149455677738352368780191034258799245939650902295109324906721403526254846350766003216963922271821563871825557233365378972776979955980523456162289086363757144172014151957172312957079869951853716662497099702399024976790278022015775177455242446273813214482967733495124082047931429465865495296045782179318466303923000813280967680250738528653747685922476103776965331518047500501731552236631883322871793824554782013529732307851968328148982616606367180861216610441199313308303281347387, 13433393706831736860981188841860197277441941423408080090377736577324367531969889112819812021424784224188764056387543111801189947469461496216661435575006730446420984011301481545260794134322126537107501638522742930537138953804684059284204316142826049894385633802024685580198254331032327726403157925413779891755092424324511228840072396390276946499941585703901336244870709830055393584225868013951876648090407307820690909405786497460853028152710868389714064971913973094377451969716565235552115759645309359794268624793378660542479460296620208902815141143321106368407759341255795026861876567591384902898636092859954314778769, 16450936301756044013727486109646931401089693771822854164100416503784076143537766255358090760738523338816878198233654117583621640473535386759003037615862460472233887185948424967036069947648080179065461511779538387315062069977334403772105267723917135070169280086309949209914346580793606089811487229237522979739471637204270749538049484610057831443464885149543843378657304835942008980927728289749524311736204455356113444027542475641025404535080690640504306573241862104174532566173789822417426705982629588270940252530988792809911805275918200819694330141600823253302763697543015002100422823116603595125569159892865107742881, 20587215887303401222670187778524562488483441772395747414204452477321397652669233987943689317319076365334533067207716726991337576761253847243532283136087589290399644150206927808491332078359066979645898607207052229928751832265321754151483234372437632838336869566345125777341387210035330763198017587342180718653077007577368803975366010600765534857045073186317927003239717400249028204182390614308227976137313855502986401330830831720335137051619088601485301065051586430225593484846239657105727884559712393007657864041748454950143939062090873645573945838095872578637809146113102433239439010425451043631031122410518016532326, 18718078422282491772104012908257499806043920201150754341887889023883234824474936600240547644658329104097741397700215150394259550098043718727247865969486801594683206023042741018353958772382849699463971022088528732972690842891958420790289963469748804360154460851257719052263877373717968942321680530100665894814188433863338628598900654685563494242024130441276997001185792201669135747171401967908235837682987051164362394538409222730317148096383845275516981448025911710923226437691852616597937843686726261674161942570951447657328963332411798992704237510771438627188753081609738820884083006901282470019703070895516844818458, 19869238518860953321449271146150124926641764340901399106962716479902573490843628950675599394674440269192242274536375839894127140832422241759200214937688689751023067256170691175761180852448394407746530817353567963890007059775300919731599861722697907628658413689059361855017050291899019544052933603494806949837229893329835579164411519489332264082284531664995005654763382416093551924223486224588663126754000051536397088561287455139441483811731570270672738031763275135412303341805713711899215539374826949020693298350983779865757857672884156562501191245819314921340707647343042496195079107113686344266524558956775977327101, 14831731019027051626807616692745163041327293068623546255754213721188100354477621581933447307183110943102206559877127215788690868444133957230154954372522968657804878765791019643176367081708858747469032491252409588355671135321332371837455951260576400225780704373640887707888027016375358346206229200495336185083312189712154759717306600427880936602780038564283694928584991642811455058636576662423372864897944597955457958039809170249117330074985465196366200640773569297423665613658836388873447574011621506329626378803657051197530919172313529061806670618427752473982299356987486258347225994181866569963565402401687480977724, 20738315498943570152622063981633144181751660311421605081938406537883552428484341241598017877822737792617959314487615480239701249413760744197797352976580385101972818616048574124698876398498696426526489738605160629488494573905202140753426260635544799766026020693951542006255863689673528573965479320300750988427659722210796539486456452371199241561102152957653425833422445540569094710988487077630480017960285026579254731352442569712303291702087516565868769160225460183390722632395778106209504437877812841526046060307806520441539907707033141662464874429178921829929190745719853145631373706869482449804926180944569137112180, 2223099422805920453849254695898183407453110180971516940491203476236352587967778637680635191340988566956379299988872555840732836150340511395511261716071559488012787300463543132099333444563307258462642690264023598387084064634993510605386825116875767048088750264676027950957039105516929077103839208291545462671169257450090899296931687077928268596418832478166695453988667899446181599340868599849140870766897001537600680191455274327967480002172300767431312742047011122845763511806940333124800711667596764570537682037602039275968919248595379421836639163628614551743282415454744659812185684625053031939895057231225552590737, 14616210186216324748927544607210065459879066607522353998242531910343696610786921577420825442104244223555226015501691492275954598755751520683422684791522936061154635971576278201508286077316206852596356483769606843156679206779119643681774423131883217690272314312673888803060234273854690723154039886423225335247144879980548508514145547890597891724234197128348472166327875920534940978385895069299030891505743546227609334818258480861589205179729486709150993819879915739513329141070712512268431908833041270028240492828295785590583491875553424679506392336623476254367938782435226286899370686467001205727736453119928203691200, 10252602195674039890686696716447658953114808279946084507649598547131692199440677179222165032891210140401269298539083681691029841906840533012311905664041512533315535834073489735556219557249068083022499850219747995305293582059709338454284229591686887044394989276916072315395504528952332881744739128842660231067801431852338924466416398930082929929635954666344143663963779025500328668000068828834176810511977842349350201076264179830056263128319156087103843555688707704053676693596383200443460087986182875085547968328976596724108556789119133705276684328156818568411910852131081492484388133190324842190204538815918789533934, 14669307249923474929340038199420761032269350124028367558830277610589374426635340755118041090621487239039373388458685030345273257392933571358463418790921643214947250307992537217405640887423227682218423171541689749165343414018254360683342593102488347398748010409055529591343004121222046883466833209783740511911579407233413640876366939275292780561927572371831689782234931176931114650562771576795725539085629807406184978450812107206148288114396432979052450652792204845577646221729253528899913838602577401839888226161198819420493065606578109814621430815551567437592105981311280591528434732873219017117109256612796704667658, 3345098013983717638336056338668086861066125408556120978544252893013011464499053088619197866298014616202594192588751004608896788323715009244436172385292477744168360903520915884859788723981179282829577132047049403557515781074169663574909325543252797743444386097548180096591546561942876985661131063049928061281752913865752053020459603498442085886063679685412892235320128298086409604753473392671174560744943165346339395207271320778469801017862923377055977271911608592072724648250105922410675902936978056083275628873829091600797026091911074427487313517988208702111026615498366929090983918208594822054909179003799234254888, 12166131059815975838811768833960297236694527843453817872944059719731318716936819402988951850268987424246584945226058960087848761437322613432532014097126278441563937874537350985340423848214426453909145716195544789965693975560462777940266283184155606135778986088391711542503376428634783171818517088462870251359712241956199953176541914909949163445762073773937963413629045852655756732520051803888314777948234843872654547521200158759801533575835785578251029418387475475387866925322721649393767315949137920775421372347708777321543228811700789247282604990699872376041689074104069014773938773963279597279938809623697199679084, 14834583907015519795462768361779410548224689112118769203381257906298090331353651205043473142156814996905058288027655975260620162684494098252455949112059887500278786827698149783082818094120293189805605741899072740527962414369726451844226180089912617711898819245872363504133410611994907465804180733326116794268098109528491320304641767551168533969040495519957225439882439373731650387045895701256869947811444172324834605687627192005062088187288087449649767463302146305934507270237835595725298639551071461776893998209781680610482139183268745429264018471990863629000264462019532085090856885406626096874129564686904484473004, 20207192224991205559957977686858345880004555314495278772480801596886119777382132455789565607182006704115793799967917756971126626327405081183211513731712103078265574784904173454811312905884886883946190747646755827945859767314681235462573498419627285628872636809891042094168408289355705558697641615994523988679267392120144848846896263052581893149143816933382572261071577981483756697002630410878871322228411667387332059805546290766983165787882031275830542633749985033865720801275023957851310587209753993574052342670819892875230518815852424617092228052297340045966895774747260617496919162083100788053879416349721325485892, 20751160439979842255652097809499285200251991412477076489152855717079572748948927570289942045730282203573205674815954444323740133547683712238256772973165241549090504168278296524417790568661947627484033635689159411280391088797261328442029577562671544269438512603607354884878965022005397705781623747274914073316269756564423637090609458599368308738867256773066113453949336341646985985072494007166427167626273944332441055543942445343787561605317471675496529851570942219444230005414173788862832992587299531264185197690068971210276036031608714972569916772582721979547968529968758660275170246580335435366286642450273211339858]
random.seed(0x1337)
flag = ''
for c in ciphers:
r = pow(g, random.randrange(2, p-1), p)
for a in range(33, 127):
if a * r % p == c:
flag += chr(a)
break
print(flag)
| 817.7
| 15,478
| 0.988994
| 70
| 16,354
| 231.057143
| 0.771429
| 0.000247
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.988239
| 0.006971
| 16,354
| 19
| 15,479
| 860.736842
| 0.007697
| 0.001284
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.000367
| 0
| 0
| 1
| 0
| false
| 0
| 0.076923
| 0
| 0.076923
| 0.076923
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
abe49a269500ab3759590ff9366ea754fe348207
| 18,594
|
py
|
Python
|
tensorflow_graphics/projects/gan/losses_test.py
|
Liang813/graphics
|
71ab1775228a0a292427551350cbb62bfa8bd01a
|
[
"Apache-2.0"
] | 2,759
|
2019-01-08T10:40:34.000Z
|
2022-03-28T13:49:37.000Z
|
tensorflow_graphics/projects/gan/losses_test.py
|
Liang813/graphics
|
71ab1775228a0a292427551350cbb62bfa8bd01a
|
[
"Apache-2.0"
] | 262
|
2019-04-28T12:25:49.000Z
|
2022-03-24T19:35:15.000Z
|
tensorflow_graphics/projects/gan/losses_test.py
|
Liang813/graphics
|
71ab1775228a0a292427551350cbb62bfa8bd01a
|
[
"Apache-2.0"
] | 380
|
2019-05-09T00:14:45.000Z
|
2022-03-31T12:48:25.000Z
|
# Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for gan.losses."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow_graphics.projects.gan import losses
from tensorflow_graphics.util import test_case
class LossesTest(test_case.TestCase):
def test_gradient_penalty_shape_correct(self):
discriminator = tf.keras.Sequential()
discriminator.add(tf.keras.layers.Reshape((25,)))
discriminator.add(tf.keras.layers.Dense(units=1))
real_data = tf.ones(shape=(3, 5, 5))
generated_data = tf.ones(shape=(3, 5, 5))
gradient_penalty = losses.gradient_penalty_loss(
real_data=real_data,
generated_data=generated_data,
discriminator=discriminator)
self.assertAllEqual(tf.shape(gradient_penalty), (3,))
def test_gradient_penalty_shape_correct_sequence_input(self):
discriminator = tf.keras.Sequential()
discriminator.add(tf.keras.layers.Concatenate())
discriminator.add(tf.keras.layers.Reshape((50,)))
discriminator.add(tf.keras.layers.Dense(units=1))
real_data = (tf.ones(shape=(3, 5, 5)), tf.ones(shape=(3, 5, 5)))
generated_data = (tf.ones(shape=(3, 5, 5)), tf.ones(shape=(3, 5, 5)))
gradient_penalty = losses.gradient_penalty_loss(
real_data=real_data,
generated_data=generated_data,
discriminator=discriminator)
self.assertAllEqual(tf.shape(gradient_penalty), (3,))
def test_gradient_penalty_loss_positive(self):
discriminator = tf.keras.Sequential()
discriminator.add(tf.keras.layers.Reshape((25,)))
discriminator.add(tf.keras.layers.Dense(units=1))
real_data = tf.ones(shape=(1, 5, 5))
generated_data = tf.ones(shape=(1, 5, 5))
gradient_penalty = losses.gradient_penalty_loss(
real_data=real_data,
generated_data=generated_data,
discriminator=discriminator)
self.assertAllGreaterEqual(gradient_penalty, 0.0)
def test_gradient_penalty_loss_positive_for_sequence_input(self):
discriminator = tf.keras.Sequential()
discriminator.add(tf.keras.layers.Concatenate())
discriminator.add(tf.keras.layers.Reshape((50,)))
discriminator.add(tf.keras.layers.Dense(units=1))
real_data = (tf.ones(shape=(1, 5, 5)), tf.ones(shape=(1, 5, 5)))
generated_data = (tf.ones(shape=(1, 5, 5)), tf.ones(shape=(1, 5, 5)))
gradient_penalty = losses.gradient_penalty_loss(
real_data=real_data,
generated_data=generated_data,
discriminator=discriminator)
self.assertAllGreaterEqual(gradient_penalty, 0.0)
def test_gradient_penalty_loss_jacobian_preset(self):
layer_weights = np.zeros(shape=(25, 1), dtype=np.float32)
real_data = np.ones(shape=(1, 5, 5), dtype=np.float32)
generated_data = np.ones(shape=(1, 5, 5), dtype=np.float32)
def gradient_penalty_fn(weights):
def multiply(input_tensor):
return tf.linalg.matmul(input_tensor, weights)
discriminator = tf.keras.Sequential()
discriminator.add(tf.keras.layers.Reshape((25,)))
# To simulate a dense layer a lambda layer is used, such that we are able
# to feed the weights in as numpy array to the assert_jacobian_fn.
discriminator.add(tf.keras.layers.Lambda(multiply))
return losses.gradient_penalty_loss(
real_data=tf.convert_to_tensor(real_data),
generated_data=tf.convert_to_tensor(generated_data),
discriminator=discriminator)
with self.subTest(name='is_correct'):
self.assert_jacobian_is_correct_fn(gradient_penalty_fn, (layer_weights,))
with self.subTest(name='is_finite'):
self.assert_jacobian_is_finite_fn(gradient_penalty_fn, (layer_weights,))
def test_gradient_penalty_loss_sequence_input_jacobian_preset(self):
layer_weights = np.zeros(shape=(50, 1), dtype=np.float32)
real_data = (tf.ones(shape=(1, 5, 5), dtype=tf.float32),
tf.ones(shape=(1, 5, 5), dtype=tf.float32))
generated_data = (tf.ones(shape=(1, 5, 5), dtype=tf.float32),
tf.ones(shape=(1, 5, 5), dtype=tf.float32))
def gradient_penalty_fn(weights):
def multiply(input_tensor):
return tf.linalg.matmul(input_tensor, weights)
discriminator = tf.keras.Sequential()
discriminator.add(tf.keras.layers.Concatenate())
discriminator.add(tf.keras.layers.Reshape((50,)))
# To simulate a dense layer a lambda layer is used, such that we are able
# to feed the weights in as numpy array to the assert_jacobian_fn.
discriminator.add(tf.keras.layers.Lambda(multiply))
return losses.gradient_penalty_loss(
real_data=real_data,
generated_data=generated_data,
discriminator=discriminator)
with self.subTest(name='is_correct'):
self.assert_jacobian_is_correct_fn(gradient_penalty_fn, (layer_weights,))
with self.subTest(name='is_finite'):
self.assert_jacobian_is_finite_fn(gradient_penalty_fn, (layer_weights,))
def test_gradient_penalty_loss_lambda_for_zero_gradient(self):
discriminator = tf.keras.Sequential()
discriminator.add(tf.keras.layers.Reshape((4,)))
# Generates a dense layer that is initialized with all zeros.
# This leads to a network that has zero gradient for any input.
discriminator.add(
tf.keras.layers.Dense(
units=1, kernel_initializer='zeros', bias_initializer='zeros'))
real_data = tf.ones(shape=(1, 2, 2))
generated_data = tf.ones(shape=(1, 2, 2))
weight = 1.0
gradient_penalty = losses.gradient_penalty_loss(
real_data=real_data,
generated_data=generated_data,
discriminator=discriminator,
weight=weight)
# Tolerance is large due to eps that is added in the gradient pentaly loss
# for numerical stability at 0.
self.assertAllClose(gradient_penalty, (weight,), atol=0.001)
def test_gradient_penalty_loss_lambda_for_zero_gradient_sequence_input(self):
discriminator = tf.keras.Sequential()
discriminator.add(tf.keras.layers.Concatenate())
discriminator.add(tf.keras.layers.Reshape((8,)))
# Generates a dense layer that is initialized with all zeros.
# This leads to a network that has zero gradient for any input.
discriminator.add(
tf.keras.layers.Dense(
units=1, kernel_initializer='zeros', bias_initializer='zeros'))
real_data = [tf.ones(shape=(1, 2, 2)), tf.ones(shape=(1, 2, 2))]
generated_data = [tf.ones(shape=(1, 2, 2)), tf.ones(shape=(1, 2, 2))]
weight = 1.0
gradient_penalty = losses.gradient_penalty_loss(
real_data=real_data,
generated_data=generated_data,
discriminator=discriminator,
weight=weight)
# Tolerance is large due to eps that is added in the gradient pentaly loss
# for numerical stability at 0.
self.assertAllClose(gradient_penalty, (weight,), atol=0.001)
def test_gradient_penalty_loss_with_wrong_input_types_raises(self):
discriminator = tf.keras.Sequential()
with self.assertRaisesRegex(
TypeError, 'should either both be a tf.Tensor '
'or both a sequence of tf.Tensor'):
losses.gradient_penalty_loss(
real_data=(tf.ones((1,)),),
generated_data=tf.ones((1,)),
discriminator=discriminator)
def test_gradient_penalty_loss_with_unequal_number_of_elements_raises(self):
discriminator = tf.keras.Sequential()
with self.assertRaisesRegex(
ValueError, 'number of elements in real_data and generated_data are '
'expected to be equal'):
losses.gradient_penalty_loss(
real_data=(tf.ones((1,)),),
generated_data=(tf.ones((1,)), tf.ones((1,))),
discriminator=discriminator)
def test_r1_regularization_shape_correct(self):
discriminator = tf.keras.Sequential()
discriminator.add(tf.keras.layers.Reshape((25,)))
discriminator.add(tf.keras.layers.Dense(units=1))
real_data = tf.ones(shape=(3, 5, 5))
r1_regularization = losses.r1_regularization(
real_data=real_data, discriminator=discriminator)
r1_regularization_value = self.evaluate(r1_regularization)
self.assertSequenceEqual(r1_regularization_value.shape, (3,))
def test_r1_regularization_shape_correct_sequence_input(self):
discriminator = tf.keras.Sequential()
discriminator.add(tf.keras.layers.Concatenate())
discriminator.add(tf.keras.layers.Reshape((50,)))
discriminator.add(tf.keras.layers.Dense(units=1))
real_data = (tf.ones(shape=(3, 5, 5)), tf.ones(shape=(3, 5, 5)))
r1_regulatiztion = losses.r1_regularization(
real_data=real_data, discriminator=discriminator)
r1_regulatiztion_value = self.evaluate(r1_regulatiztion)
self.assertSequenceEqual(r1_regulatiztion_value.shape, (3,))
def test_r1_regularization_positive(self):
discriminator = tf.keras.Sequential()
discriminator.add(tf.keras.layers.Reshape((25,)))
discriminator.add(tf.keras.layers.Dense(units=1))
real_data = tf.ones(shape=(1, 5, 5))
r1_regularization = losses.r1_regularization(
real_data=real_data, discriminator=discriminator)
self.assertAllGreaterEqual(r1_regularization, 0.0)
def test_r1_regularization_positive_for_sequence_input(self):
discriminator = tf.keras.Sequential()
discriminator.add(tf.keras.layers.Concatenate())
discriminator.add(tf.keras.layers.Reshape((50,)))
discriminator.add(tf.keras.layers.Dense(units=1))
real_data = (tf.ones(shape=(1, 5, 5)), tf.ones(shape=(1, 5, 5)))
r1_regularization = losses.r1_regularization(
real_data=real_data, discriminator=discriminator)
self.assertAllGreaterEqual(r1_regularization, 0.0)
def test_r1_regularization_jacobian_random(self):
layer_weights = np.random.uniform(-1, 1, size=(25, 1)).astype(np.float32)
real_data = np.ones(shape=(1, 5, 5), dtype=np.float32)
def r1_regularization_fn(weights):
def multiply(input_tensor):
return tf.linalg.matmul(input_tensor, weights)
discriminator = tf.keras.Sequential()
discriminator.add(tf.keras.layers.Reshape((25,)))
# To simulate a dense layer a lambda layer is used, such that we are able
# to feed the weights in as numpy array to the assert_jacobian_fn.
discriminator.add(tf.keras.layers.Lambda(multiply))
return losses.r1_regularization(
real_data=tf.convert_to_tensor(real_data),
discriminator=discriminator)
with self.subTest(name='is_correct'):
self.assert_jacobian_is_correct_fn(
r1_regularization_fn, (layer_weights,), delta=0.001, atol=0.01)
with self.subTest(name='is_finite'):
self.assert_jacobian_is_finite_fn(r1_regularization_fn, (layer_weights,))
def test_r1_regulatization_sequence_input_jacobian_random(self):
layer_weights = np.random.uniform(-1, 1, size=(50, 1)).astype(np.float32)
real_data = (tf.ones(shape=(1, 5, 5), dtype=tf.float32),
tf.ones(shape=(1, 5, 5), dtype=tf.float32))
def r1_regulatization_fn(weights):
def multiply(input_tensor):
return tf.linalg.matmul(input_tensor, weights)
discriminator = tf.keras.Sequential()
discriminator.add(tf.keras.layers.Concatenate())
discriminator.add(tf.keras.layers.Reshape((50,)))
# To simulate a dense layer a lambda layer is used, such that we are able
# to feed the weights in as numpy array to the assert_jacobian_fn.
discriminator.add(tf.keras.layers.Lambda(multiply))
return losses.r1_regularization(
real_data=real_data, discriminator=discriminator)
with self.subTest(name='is_correct'):
self.assert_jacobian_is_correct_fn(
r1_regulatization_fn, (layer_weights,), delta=0.001, atol=0.01)
with self.subTest(name='is_finite'):
self.assert_jacobian_is_finite_fn(r1_regulatization_fn, (layer_weights,))
def test_wasserstein_generator_loss_shape_correct(self):
loss_input = tf.ones(shape=(2, 1))
loss = self.evaluate(losses.wasserstein_generator_loss(loss_input))
self.assertAllEqual(loss.shape, (2, 1))
@parameterized.parameters((losses.wasserstein_generator_loss, 0.0),
(losses.wasserstein_hinge_generator_loss, 0.0),
(losses.minimax_generator_loss, 0.0))
def test_generator_loss_jacobian_preset(self, loss_function,
loss_input_value):
loss_input_init = np.full(
shape=(2, 3), fill_value=loss_input_value, dtype=np.float32)
loss_input = tf.convert_to_tensor(value=loss_input_init)
loss = loss_function(loss_input)
with self.subTest(name='is_finite'):
self.assert_jacobian_is_finite(loss_input, loss_input_init, loss)
with self.subTest(name='is_correct'):
self.assert_jacobian_is_correct(
loss_input, loss_input_init, loss, delta=1e-4, atol=1e-3)
def test_wasserstein_discriminator_loss_shape_correct(self):
loss_input = tf.ones(shape=(2, 1))
loss = self.evaluate(
losses.wasserstein_discriminator_loss(loss_input, loss_input))
self.assertAllEqual(loss.shape, (2, 1))
def test_wasserstein_discriminator_loss_zero_with_same_input(self):
loss_input = tf.ones(shape=(5, 1))
loss = self.evaluate(
losses.wasserstein_discriminator_loss(loss_input, loss_input))
self.assertAllClose(tf.reduce_sum(loss), 0.0)
@parameterized.parameters(
(losses.wasserstein_discriminator_loss, 0.0, 0.0),
(losses.wasserstein_discriminator_loss, 0.5, 0.5),
(losses.wasserstein_hinge_discriminator_loss, 1.0, -1.0),
(losses.wasserstein_hinge_discriminator_loss, 0.0, 0.0),
(losses.wasserstein_hinge_discriminator_loss, 2.0, -2.0),
(losses.minimax_discriminator_loss, 0.0, 0.0))
def test_discriminator_loss_jacobian_finite_preset(
self, loss_function, discriminator_value_real,
discriminator_value_generated):
discriminator_value_real_init = np.full(
shape=(2, 4), fill_value=discriminator_value_real, dtype=np.float32)
discriminator_value_generated_init = np.full(
shape=(2, 4),
fill_value=discriminator_value_generated,
dtype=np.float32)
discriminator_value_real = tf.convert_to_tensor(
value=discriminator_value_real_init)
discriminator_value_generated = tf.convert_to_tensor(
value=discriminator_value_generated_init)
loss = loss_function(discriminator_value_real,
discriminator_value_generated)
with self.subTest(name='with_respect_to_real'):
self.assert_jacobian_is_finite(discriminator_value_real,
discriminator_value_real_init, loss)
with self.subTest(name='with_respcet_to_generated'):
self.assert_jacobian_is_finite(discriminator_value_generated,
discriminator_value_generated_init, loss)
@parameterized.parameters(
(losses.wasserstein_discriminator_loss, 0.0, 0.0),
(losses.wasserstein_discriminator_loss, 0.5, 0.5),
(losses.wasserstein_hinge_discriminator_loss, 0.0, 0.0),
(losses.wasserstein_hinge_discriminator_loss, 2.0, -2.0),
(losses.minimax_discriminator_loss, 0.0, 0.0))
def test_discriminator_loss_jacobian_correct_preset(
self, loss_function, discriminator_value_real,
discriminator_value_generated):
discriminator_value_real_init = np.full(
shape=(2, 4), fill_value=discriminator_value_real, dtype=np.float32)
discriminator_value_generated_init = np.full(
shape=(2, 4),
fill_value=discriminator_value_generated,
dtype=np.float32)
discriminator_value_real = tf.convert_to_tensor(
value=discriminator_value_real_init)
discriminator_value_generated = tf.convert_to_tensor(
value=discriminator_value_generated_init)
loss = loss_function(discriminator_value_real,
discriminator_value_generated)
with self.subTest(name='with_respect_to_real'):
self.assert_jacobian_is_correct(
discriminator_value_real,
discriminator_value_real_init,
loss,
delta=1e-4,
atol=1e-3)
with self.subTest(name='with_respcet_to_generated'):
self.assert_jacobian_is_correct(
discriminator_value_generated,
discriminator_value_generated_init,
loss,
delta=1e-4,
atol=1e-3)
def test_wasserstein_hinge_generator_loss_shape_correct(self):
loss_input = tf.ones(shape=(2, 1))
loss = self.evaluate(losses.wasserstein_hinge_generator_loss(loss_input))
self.assertAllEqual(loss.shape, (2, 1))
def test_wasserstein_hinge_discriminator_loss_shape_correct(self):
loss_input = tf.ones(shape=(2, 1))
loss = self.evaluate(
losses.wasserstein_hinge_discriminator_loss(loss_input, loss_input))
self.assertAllEqual(loss.shape, (2, 1))
@parameterized.parameters((1.0, 2.0, 3.0), (-1.0, 2.0, 5.0), (0.0, 2.0, 4.0),
(4.0, 3.0, 4.0), (-4.0, 3.0, 9.0), (4.0, -3.0, 0.0))
def test_wasserstein_hinge_discriminator_loss_correct_value(
self, real_data_input, generated_data_input, expected_loss_value):
real_data_input = tf.fill(dims=(), value=real_data_input)
generated_data_input = tf.fill(dims=(), value=generated_data_input)
loss = self.evaluate(
losses.wasserstein_hinge_discriminator_loss(real_data_input,
generated_data_input))
self.assertAlmostEqual(loss, expected_loss_value)
def test_minimax_generator_loss_shape_correct(self):
loss_input = tf.ones(shape=(2, 1))
loss = self.evaluate(losses.minimax_generator_loss(loss_input))
self.assertAllEqual(loss.shape, (2, 1))
def test_minimax_discriminator_loss_shape_correct(self):
loss_input = tf.ones(shape=(2, 1))
loss = self.evaluate(
losses.minimax_discriminator_loss(loss_input, loss_input))
self.assertAllEqual(loss.shape, (2, 1))
if __name__ == '__main__':
tf.test.main()
| 40.421739
| 80
| 0.710498
| 2,471
| 18,594
| 5.091056
| 0.085391
| 0.028378
| 0.032353
| 0.06399
| 0.877663
| 0.865342
| 0.831558
| 0.817091
| 0.783307
| 0.769634
| 0
| 0.026649
| 0.18065
| 18,594
| 459
| 81
| 40.509804
| 0.799081
| 0.084974
| 0
| 0.689759
| 0
| 0
| 0.020789
| 0.002945
| 0
| 0
| 0
| 0
| 0.10241
| 1
| 0.105422
| false
| 0
| 0.01506
| 0.012048
| 0.14759
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f9f083da5c331ebd662c578d9ea692584c78527d
| 205
|
py
|
Python
|
backend/server/models/__init__.py
|
jessvb/convo
|
6b8a0d84142a0bfacf94482cebba42d92646be26
|
[
"MIT"
] | null | null | null |
backend/server/models/__init__.py
|
jessvb/convo
|
6b8a0d84142a0bfacf94482cebba42d92646be26
|
[
"MIT"
] | null | null | null |
backend/server/models/__init__.py
|
jessvb/convo
|
6b8a0d84142a0bfacf94482cebba42d92646be26
|
[
"MIT"
] | null | null | null |
from models.action import *
from models.procedure import *
from models.condition import *
from models.klass import *
from models.valueof import *
from models.execution import *
from models.intent import *
| 25.625
| 30
| 0.795122
| 28
| 205
| 5.821429
| 0.357143
| 0.429448
| 0.588957
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136585
| 205
| 7
| 31
| 29.285714
| 0.920904
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
e61398111c64d5dca991ab1cfa876be9c0ab0071
| 31
|
py
|
Python
|
src/ctc/toolbox/lending_utils/__init__.py
|
fei-protocol/checkthechain
|
ec838f3d0d44af228f45394d9ba8d8eb7f677520
|
[
"MIT"
] | 94
|
2022-02-15T19:34:49.000Z
|
2022-03-26T19:26:22.000Z
|
src/ctc/toolbox/lending_utils/__init__.py
|
fei-protocol/checkthechain
|
ec838f3d0d44af228f45394d9ba8d8eb7f677520
|
[
"MIT"
] | 7
|
2022-03-03T02:58:47.000Z
|
2022-03-11T18:41:05.000Z
|
src/ctc/toolbox/lending_utils/__init__.py
|
fei-protocol/checkthechain
|
ec838f3d0d44af228f45394d9ba8d8eb7f677520
|
[
"MIT"
] | 7
|
2022-02-15T17:53:07.000Z
|
2022-03-17T19:14:17.000Z
|
from .lending_summary import *
| 15.5
| 30
| 0.806452
| 4
| 31
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129032
| 31
| 1
| 31
| 31
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e620c211257e1a0bab505dd365e8aa7113c83139
| 129
|
py
|
Python
|
restaurant/get_env.py
|
dasky92/django-restaurant
|
db645868fad1536f6316a78d89a570f374e8b771
|
[
"MIT"
] | null | null | null |
restaurant/get_env.py
|
dasky92/django-restaurant
|
db645868fad1536f6316a78d89a570f374e8b771
|
[
"MIT"
] | null | null | null |
restaurant/get_env.py
|
dasky92/django-restaurant
|
db645868fad1536f6316a78d89a570f374e8b771
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
def get_env():
"""
Get settings file directory.
"""
return 'settings.environment.local'
| 14.333333
| 39
| 0.612403
| 15
| 129
| 5.2
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.24031
| 129
| 8
| 40
| 16.125
| 0.795918
| 0.387597
| 0
| 0
| 0
| 0
| 0.412698
| 0.412698
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
e64d8444ce097e33b893d6e87ca1bc8dd6f6ae73
| 1,207
|
py
|
Python
|
apps/covid_19/preprocess/mixing_matrix/funcs.py
|
malanchak/AuTuMN
|
0cbd006d1f15da414d02eed44e48bb5c06f0802e
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
apps/covid_19/preprocess/mixing_matrix/funcs.py
|
malanchak/AuTuMN
|
0cbd006d1f15da414d02eed44e48bb5c06f0802e
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
apps/covid_19/preprocess/mixing_matrix/funcs.py
|
malanchak/AuTuMN
|
0cbd006d1f15da414d02eed44e48bb5c06f0802e
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
"""
Functions which can be used to transform dynamic mixing timeseries data
"""
from typing import List
def repeat_prev(prev_vals: List[float]):
"""
Repeats the previous seen value again
"""
return prev_vals[-1]
def add_to_prev(prev_vals: List[float], increment: float):
"""
Add increment to previous
"""
val = prev_vals[-1] + increment
if val < 0:
return 0
else:
return val
def add_to_prev_up_to_1(prev_vals: List[float], increment: float):
"""
Add increment to previous
"""
val = prev_vals[-1] + increment
if val > 1:
return 1
elif val < 0:
return 0
else:
return val
def scale_prev(prev_vals: List[float], fraction: float):
"""
Apply a percentage to the previous value, saturating at zero
"""
val = prev_vals[-1] * fraction
if val < 0:
return 0
else:
return val
def scale_prev_up_to_1(prev_vals: List[float], fraction: float):
"""
Apply a percentage to the previous value, saturating at one or zero
"""
val = prev_vals[-1] * fraction
if val > 1:
return 1
elif val < 0:
return 0
else:
return val
| 20.116667
| 71
| 0.601491
| 168
| 1,207
| 4.184524
| 0.27381
| 0.113798
| 0.085349
| 0.12091
| 0.779516
| 0.743954
| 0.743954
| 0.743954
| 0.655761
| 0.618777
| 0
| 0.022592
| 0.303231
| 1,207
| 59
| 72
| 20.457627
| 0.813317
| 0.240265
| 0
| 0.774194
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.16129
| false
| 0
| 0.032258
| 0
| 0.548387
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
050b48e5c011416a01274bb4adb31656f83cc5e5
| 24
|
py
|
Python
|
aad2onnx/shape_calculators/__init__.py
|
matwey/aad2onnx
|
35f06c22abd433b10b13209ddca9e8eb80717d61
|
[
"MIT"
] | null | null | null |
aad2onnx/shape_calculators/__init__.py
|
matwey/aad2onnx
|
35f06c22abd433b10b13209ddca9e8eb80717d61
|
[
"MIT"
] | null | null | null |
aad2onnx/shape_calculators/__init__.py
|
matwey/aad2onnx
|
35f06c22abd433b10b13209ddca9e8eb80717d61
|
[
"MIT"
] | null | null | null |
from . import AadForest
| 12
| 23
| 0.791667
| 3
| 24
| 6.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 24
| 1
| 24
| 24
| 0.95
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
054125394cc38442b03adbd057fa642ee6976d35
| 190
|
py
|
Python
|
backend/api/infrastcture/sample_router.py
|
ryutaro-0907/prome
|
efb8211e6d832e5b1c4b2dffd70b13b696baf45f
|
[
"MIT"
] | null | null | null |
backend/api/infrastcture/sample_router.py
|
ryutaro-0907/prome
|
efb8211e6d832e5b1c4b2dffd70b13b696baf45f
|
[
"MIT"
] | null | null | null |
backend/api/infrastcture/sample_router.py
|
ryutaro-0907/prome
|
efb8211e6d832e5b1c4b2dffd70b13b696baf45f
|
[
"MIT"
] | null | null | null |
from typing import Dict
from fastapi import APIRouter
router = APIRouter()
@router.get('/sample', tags=["message"])
def fetch_hello_world() -> Dict:
return {"message": "Hello world!"}
| 21.111111
| 40
| 0.705263
| 24
| 190
| 5.5
| 0.666667
| 0.227273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142105
| 190
| 8
| 41
| 23.75
| 0.809816
| 0
| 0
| 0
| 0
| 0
| 0.173684
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.333333
| 0.166667
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
054aed34d02dc769ead032988c6482eafe60ea05
| 30,567
|
py
|
Python
|
sdk/python/pulumi_concourse/_inputs.py
|
brumhard/concourse-pulumi-provider
|
b94721a64245955b00049f5e2fc176b1178831b7
|
[
"Apache-2.0"
] | 1
|
2021-09-16T06:15:11.000Z
|
2021-09-16T06:15:11.000Z
|
sdk/python/pulumi_concourse/_inputs.py
|
brumhard/pulumi-concourse
|
b94721a64245955b00049f5e2fc176b1178831b7
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_concourse/_inputs.py
|
brumhard/pulumi-concourse
|
b94721a64245955b00049f5e2fc176b1178831b7
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by pulumigen. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = [
'AnonymousResourceArgs',
'DisplayOptionsArgs',
'GetStepArgs',
'GroupArgs',
'JobArgs',
'ResourceTypeArgs',
'ResourceArgs',
'RunArgsArgs',
'TaskConfigArgs',
'TaskStepArgs',
]
@pulumi.input_type
class AnonymousResourceArgs:
def __init__(__self__, *,
source: pulumi.Input[Mapping[str, pulumi.Input[str]]],
type: pulumi.Input[str],
params: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
pulumi.set(__self__, "source", source)
pulumi.set(__self__, "type", type)
if params is not None:
pulumi.set(__self__, "params", params)
@property
@pulumi.getter
def source(self) -> pulumi.Input[Mapping[str, pulumi.Input[str]]]:
return pulumi.get(self, "source")
@source.setter
def source(self, value: pulumi.Input[Mapping[str, pulumi.Input[str]]]):
pulumi.set(self, "source", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def params(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "params")
@params.setter
def params(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "params", value)
@pulumi.input_type
class DisplayOptionsArgs:
def __init__(__self__, *,
background_image: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] background_image: Allows users to specify a custom background image which is put at 30% opacity, grayscaled and blended into existing background. Must be an http, https, or relative URL.
"""
if background_image is not None:
pulumi.set(__self__, "background_image", background_image)
@property
@pulumi.getter
def background_image(self) -> Optional[pulumi.Input[str]]:
"""
Allows users to specify a custom background image which is put at 30% opacity, grayscaled and blended into existing background. Must be an http, https, or relative URL.
"""
return pulumi.get(self, "background_image")
@background_image.setter
def background_image(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "background_image", value)
@pulumi.input_type
class GetStepArgs:
def __init__(__self__, *,
get: pulumi.Input[str],
params: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
passed: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
resource: Optional[pulumi.Input[str]] = None,
trigger: Optional[pulumi.Input[bool]] = None):
pulumi.set(__self__, "get", get)
if params is not None:
pulumi.set(__self__, "params", params)
if passed is not None:
pulumi.set(__self__, "passed", passed)
if resource is not None:
pulumi.set(__self__, "resource", resource)
if trigger is not None:
pulumi.set(__self__, "trigger", trigger)
@property
@pulumi.getter
def get(self) -> pulumi.Input[str]:
return pulumi.get(self, "get")
@get.setter
def get(self, value: pulumi.Input[str]):
pulumi.set(self, "get", value)
@property
@pulumi.getter
def params(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "params")
@params.setter
def params(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "params", value)
@property
@pulumi.getter
def passed(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "passed")
@passed.setter
def passed(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "passed", value)
@property
@pulumi.getter
def resource(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "resource")
@resource.setter
def resource(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource", value)
@property
@pulumi.getter
def trigger(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "trigger")
@trigger.setter
def trigger(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "trigger", value)
@pulumi.input_type
class GroupArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
jobs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
:param pulumi.Input[str] name: A unique name for the group. This should be short and simple as it will be used as the tab name for navigation.
:param pulumi.Input[Sequence[pulumi.Input[str]]] jobs: A list of jobs that should appear in this group. A job may appear in multiple groups. Neighbours of jobs in the current group will also appear on the same page in order to give context of the location of the group in the pipeline. You may also use any valid glob to represent several jobs.
"""
pulumi.set(__self__, "name", name)
if jobs is not None:
pulumi.set(__self__, "jobs", jobs)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
A unique name for the group. This should be short and simple as it will be used as the tab name for navigation.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def jobs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of jobs that should appear in this group. A job may appear in multiple groups. Neighbours of jobs in the current group will also appear on the same page in order to give context of the location of the group in the pipeline. You may also use any valid glob to represent several jobs.
"""
return pulumi.get(self, "jobs")
@jobs.setter
def jobs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "jobs", value)
@pulumi.input_type
class JobArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
plan: pulumi.Input[Sequence[pulumi.Input[Union['TaskStepArgs', 'GetStepArgs']]]],
ensure: Optional[pulumi.Input[Union['TaskStepArgs', 'GetStepArgs']]] = None,
max_in_flight: Optional[pulumi.Input[float]] = None,
on_abort: Optional[pulumi.Input[Union['TaskStepArgs', 'GetStepArgs']]] = None,
on_error: Optional[pulumi.Input[Union['TaskStepArgs', 'GetStepArgs']]] = None,
on_failure: Optional[pulumi.Input[Union['TaskStepArgs', 'GetStepArgs']]] = None,
on_success: Optional[pulumi.Input[Union['TaskStepArgs', 'GetStepArgs']]] = None,
public: Optional[pulumi.Input[bool]] = None,
serial: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[str] name: The name of the job. This should be short; it will show up in URLs.
:param pulumi.Input[Union['TaskStepArgs', 'GetStepArgs']] ensure: Step to execute regardless of whether the job succeeds, fails, errors, or aborts.
:param pulumi.Input[float] max_in_flight: If set, specifies a maximum number of builds to run at a time. If serial or serial_groups are set, they take precedence and force this value to be 1.
:param pulumi.Input[Union['TaskStepArgs', 'GetStepArgs']] on_abort: Step to execute when the job aborts.
:param pulumi.Input[Union['TaskStepArgs', 'GetStepArgs']] on_error: Step to execute when the job errors.
:param pulumi.Input[Union['TaskStepArgs', 'GetStepArgs']] on_failure: Step to execute when the job fails.
:param pulumi.Input[Union['TaskStepArgs', 'GetStepArgs']] on_success: Step to execute when the job succeeds.
:param pulumi.Input[bool] public: Default false. If set to true, the build log of this job will be viewable by unauthenticated users. Unauthenticated users will always be able to see the inputs, outputs, and build status history of a job. This is useful if you would like to expose your pipeline publicly without showing sensitive information in the build log.
:param pulumi.Input[bool] serial: Default false. If set to true, builds will queue up and execute one-by-one, rather than executing in parallel.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "plan", plan)
if ensure is not None:
pulumi.set(__self__, "ensure", ensure)
if max_in_flight is not None:
pulumi.set(__self__, "max_in_flight", max_in_flight)
if on_abort is not None:
pulumi.set(__self__, "on_abort", on_abort)
if on_error is not None:
pulumi.set(__self__, "on_error", on_error)
if on_failure is not None:
pulumi.set(__self__, "on_failure", on_failure)
if on_success is not None:
pulumi.set(__self__, "on_success", on_success)
if public is not None:
pulumi.set(__self__, "public", public)
if serial is not None:
pulumi.set(__self__, "serial", serial)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the job. This should be short; it will show up in URLs.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def plan(self) -> pulumi.Input[Sequence[pulumi.Input[Union['TaskStepArgs', 'GetStepArgs']]]]:
return pulumi.get(self, "plan")
@plan.setter
def plan(self, value: pulumi.Input[Sequence[pulumi.Input[Union['TaskStepArgs', 'GetStepArgs']]]]):
pulumi.set(self, "plan", value)
@property
@pulumi.getter
def ensure(self) -> Optional[pulumi.Input[Union['TaskStepArgs', 'GetStepArgs']]]:
"""
Step to execute regardless of whether the job succeeds, fails, errors, or aborts.
"""
return pulumi.get(self, "ensure")
@ensure.setter
def ensure(self, value: Optional[pulumi.Input[Union['TaskStepArgs', 'GetStepArgs']]]):
pulumi.set(self, "ensure", value)
@property
@pulumi.getter
def max_in_flight(self) -> Optional[pulumi.Input[float]]:
"""
If set, specifies a maximum number of builds to run at a time. If serial or serial_groups are set, they take precedence and force this value to be 1.
"""
return pulumi.get(self, "max_in_flight")
@max_in_flight.setter
def max_in_flight(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "max_in_flight", value)
@property
@pulumi.getter
def on_abort(self) -> Optional[pulumi.Input[Union['TaskStepArgs', 'GetStepArgs']]]:
"""
Step to execute when the job aborts.
"""
return pulumi.get(self, "on_abort")
@on_abort.setter
def on_abort(self, value: Optional[pulumi.Input[Union['TaskStepArgs', 'GetStepArgs']]]):
pulumi.set(self, "on_abort", value)
@property
@pulumi.getter
def on_error(self) -> Optional[pulumi.Input[Union['TaskStepArgs', 'GetStepArgs']]]:
"""
Step to execute when the job errors.
"""
return pulumi.get(self, "on_error")
@on_error.setter
def on_error(self, value: Optional[pulumi.Input[Union['TaskStepArgs', 'GetStepArgs']]]):
pulumi.set(self, "on_error", value)
@property
@pulumi.getter
def on_failure(self) -> Optional[pulumi.Input[Union['TaskStepArgs', 'GetStepArgs']]]:
"""
Step to execute when the job fails.
"""
return pulumi.get(self, "on_failure")
@on_failure.setter
def on_failure(self, value: Optional[pulumi.Input[Union['TaskStepArgs', 'GetStepArgs']]]):
pulumi.set(self, "on_failure", value)
@property
@pulumi.getter
def on_success(self) -> Optional[pulumi.Input[Union['TaskStepArgs', 'GetStepArgs']]]:
"""
Step to execute when the job succeeds.
"""
return pulumi.get(self, "on_success")
@on_success.setter
def on_success(self, value: Optional[pulumi.Input[Union['TaskStepArgs', 'GetStepArgs']]]):
pulumi.set(self, "on_success", value)
@property
@pulumi.getter
def public(self) -> Optional[pulumi.Input[bool]]:
"""
Default false. If set to true, the build log of this job will be viewable by unauthenticated users. Unauthenticated users will always be able to see the inputs, outputs, and build status history of a job. This is useful if you would like to expose your pipeline publicly without showing sensitive information in the build log.
"""
return pulumi.get(self, "public")
@public.setter
def public(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "public", value)
@property
@pulumi.getter
def serial(self) -> Optional[pulumi.Input[bool]]:
"""
Default false. If set to true, builds will queue up and execute one-by-one, rather than executing in parallel.
"""
return pulumi.get(self, "serial")
@serial.setter
def serial(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "serial", value)
@pulumi.input_type
class ResourceTypeArgs:
def __init__(__self__, *,
check_every: Optional[pulumi.Input[str]] = None,
defaults: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
params: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
privileged: Optional[pulumi.Input[bool]] = None,
source: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
type: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] check_every: Default 1m. The interval on which to check for new versions of the resource. Acceptable interval options are defined by the time.ParseDuration function. If set to never the resource will not be automatically checked. The resource can still be checked manually via the web UI, fly, or webhooks.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] defaults: The default configuration for the resource type. This varies by resource type, and is a black box to Concourse; it is merged with (duplicate fields are overwritten by) resource.source and passed to the resource at runtime.
:param pulumi.Input[str] name: TThe name of the resource type. This should be short and simple. This name will be referenced by pipeline.resources defined within the same pipeline, and task.image_resources used by tasks running in the pipeline.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] params: Arbitrary config to pass when running the get to fetch the resource type's image.
:param pulumi.Input[bool] privileged: Default false. If set to true, the resource's containers will be run with full capabilities, as determined by the worker backend the task runs on.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] source: The configuration for the resource. This varies by resource type, and is a black box to Concourse; it is blindly passed to the resource at runtime.
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: Default []. A list of tags to determine which workers the checks will be performed on. You'll want to specify this if the source is internal to a worker's network, for example.
:param pulumi.Input[str] type: The resource type implementing the resource.
"""
if check_every is not None:
pulumi.set(__self__, "check_every", check_every)
if defaults is not None:
pulumi.set(__self__, "defaults", defaults)
if name is not None:
pulumi.set(__self__, "name", name)
if params is not None:
pulumi.set(__self__, "params", params)
if privileged is not None:
pulumi.set(__self__, "privileged", privileged)
if source is not None:
pulumi.set(__self__, "source", source)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def check_every(self) -> Optional[pulumi.Input[str]]:
"""
Default 1m. The interval on which to check for new versions of the resource. Acceptable interval options are defined by the time.ParseDuration function. If set to never the resource will not be automatically checked. The resource can still be checked manually via the web UI, fly, or webhooks.
"""
return pulumi.get(self, "check_every")
@check_every.setter
def check_every(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "check_every", value)
@property
@pulumi.getter
def defaults(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
The default configuration for the resource type. This varies by resource type, and is a black box to Concourse; it is merged with (duplicate fields are overwritten by) resource.source and passed to the resource at runtime.
"""
return pulumi.get(self, "defaults")
@defaults.setter
def defaults(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "defaults", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
TThe name of the resource type. This should be short and simple. This name will be referenced by pipeline.resources defined within the same pipeline, and task.image_resources used by tasks running in the pipeline.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def params(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Arbitrary config to pass when running the get to fetch the resource type's image.
"""
return pulumi.get(self, "params")
@params.setter
def params(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "params", value)
@property
@pulumi.getter
def privileged(self) -> Optional[pulumi.Input[bool]]:
"""
Default false. If set to true, the resource's containers will be run with full capabilities, as determined by the worker backend the task runs on.
"""
return pulumi.get(self, "privileged")
@privileged.setter
def privileged(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "privileged", value)
@property
@pulumi.getter
def source(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
The configuration for the resource. This varies by resource type, and is a black box to Concourse; it is blindly passed to the resource at runtime.
"""
return pulumi.get(self, "source")
@source.setter
def source(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "source", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Default []. A list of tags to determine which workers the checks will be performed on. You'll want to specify this if the source is internal to a worker's network, for example.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
The resource type implementing the resource.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class ResourceArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
source: pulumi.Input[Mapping[str, pulumi.Input[str]]],
type: pulumi.Input[str],
check_every: Optional[pulumi.Input[str]] = None,
public: Optional[pulumi.Input[bool]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
webhook_token: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] name: The name of the resource. This should be short and simple. This name will be referenced by build plans of jobs in the pipeline.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] source: The configuration for the resource. This varies by resource type, and is a black box to Concourse; it is blindly passed to the resource at runtime.
:param pulumi.Input[str] type: The resource type implementing the resource.
:param pulumi.Input[str] check_every: Default 1m. The interval on which to check for new versions of the resource. Acceptable interval options are defined by the time.ParseDuration function. If set to never the resource will not be automatically checked. The resource can still be checked manually via the web UI, fly, or webhooks.
:param pulumi.Input[bool] public: Default false. If set to true, the metadata for each version of the resource will be viewable by unauthenticated users (assuming the pipeline has been exposed).
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: Default []. A list of tags to determine which workers the checks will be performed on. You'll want to specify this if the source is internal to a worker's network, for example.
:param pulumi.Input[str] webhook_token: If specified, web hooks can be sent to trigger an immediate check of the resource, specifying this value as a primitive form of authentication via query params.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "source", source)
pulumi.set(__self__, "type", type)
if check_every is not None:
pulumi.set(__self__, "check_every", check_every)
if public is not None:
pulumi.set(__self__, "public", public)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if webhook_token is not None:
pulumi.set(__self__, "webhook_token", webhook_token)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the resource. This should be short and simple. This name will be referenced by build plans of jobs in the pipeline.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def source(self) -> pulumi.Input[Mapping[str, pulumi.Input[str]]]:
"""
The configuration for the resource. This varies by resource type, and is a black box to Concourse; it is blindly passed to the resource at runtime.
"""
return pulumi.get(self, "source")
@source.setter
def source(self, value: pulumi.Input[Mapping[str, pulumi.Input[str]]]):
pulumi.set(self, "source", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
The resource type implementing the resource.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def check_every(self) -> Optional[pulumi.Input[str]]:
"""
Default 1m. The interval on which to check for new versions of the resource. Acceptable interval options are defined by the time.ParseDuration function. If set to never the resource will not be automatically checked. The resource can still be checked manually via the web UI, fly, or webhooks.
"""
return pulumi.get(self, "check_every")
@check_every.setter
def check_every(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "check_every", value)
@property
@pulumi.getter
def public(self) -> Optional[pulumi.Input[bool]]:
"""
Default false. If set to true, the metadata for each version of the resource will be viewable by unauthenticated users (assuming the pipeline has been exposed).
"""
return pulumi.get(self, "public")
@public.setter
def public(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "public", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Default []. A list of tags to determine which workers the checks will be performed on. You'll want to specify this if the source is internal to a worker's network, for example.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter
def webhook_token(self) -> Optional[pulumi.Input[str]]:
"""
If specified, web hooks can be sent to trigger an immediate check of the resource, specifying this value as a primitive form of authentication via query params.
"""
return pulumi.get(self, "webhook_token")
@webhook_token.setter
def webhook_token(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "webhook_token", value)
@pulumi.input_type
class RunArgsArgs:
def __init__(__self__, *,
path: pulumi.Input[str],
args: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
dir: Optional[pulumi.Input[str]] = None,
user: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "path", path)
if args is not None:
pulumi.set(__self__, "args", args)
if dir is not None:
pulumi.set(__self__, "dir", dir)
if user is not None:
pulumi.set(__self__, "user", user)
@property
@pulumi.getter
def path(self) -> pulumi.Input[str]:
return pulumi.get(self, "path")
@path.setter
def path(self, value: pulumi.Input[str]):
pulumi.set(self, "path", value)
@property
@pulumi.getter
def args(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "args")
@args.setter
def args(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "args", value)
@property
@pulumi.getter
def dir(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "dir")
@dir.setter
def dir(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "dir", value)
@property
@pulumi.getter
def user(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "user")
@user.setter
def user(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user", value)
@pulumi.input_type
class TaskConfigArgs:
def __init__(__self__, *,
image_resource: pulumi.Input['AnonymousResourceArgs'],
platform: pulumi.Input[str],
run: pulumi.Input['RunArgsArgs']):
pulumi.set(__self__, "image_resource", image_resource)
pulumi.set(__self__, "platform", platform)
pulumi.set(__self__, "run", run)
@property
@pulumi.getter
def image_resource(self) -> pulumi.Input['AnonymousResourceArgs']:
return pulumi.get(self, "image_resource")
@image_resource.setter
def image_resource(self, value: pulumi.Input['AnonymousResourceArgs']):
pulumi.set(self, "image_resource", value)
@property
@pulumi.getter
def platform(self) -> pulumi.Input[str]:
return pulumi.get(self, "platform")
@platform.setter
def platform(self, value: pulumi.Input[str]):
pulumi.set(self, "platform", value)
@property
@pulumi.getter
def run(self) -> pulumi.Input['RunArgsArgs']:
return pulumi.get(self, "run")
@run.setter
def run(self, value: pulumi.Input['RunArgsArgs']):
pulumi.set(self, "run", value)
@pulumi.input_type
class TaskStepArgs:
def __init__(__self__, *,
task: pulumi.Input[str],
config: Optional[pulumi.Input['TaskConfigArgs']] = None,
image: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "task", task)
if config is not None:
pulumi.set(__self__, "config", config)
if image is not None:
pulumi.set(__self__, "image", image)
@property
@pulumi.getter
def task(self) -> pulumi.Input[str]:
return pulumi.get(self, "task")
@task.setter
def task(self, value: pulumi.Input[str]):
pulumi.set(self, "task", value)
@property
@pulumi.getter
def config(self) -> Optional[pulumi.Input['TaskConfigArgs']]:
return pulumi.get(self, "config")
@config.setter
def config(self, value: Optional[pulumi.Input['TaskConfigArgs']]):
pulumi.set(self, "config", value)
@property
@pulumi.getter
def image(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "image")
@image.setter
def image(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "image", value)
| 41.872603
| 368
| 0.648968
| 3,953
| 30,567
| 4.919302
| 0.073109
| 0.125013
| 0.079194
| 0.054407
| 0.856989
| 0.786948
| 0.744163
| 0.696287
| 0.646611
| 0.605317
| 0
| 0.000471
| 0.23653
| 30,567
| 729
| 369
| 41.930041
| 0.832798
| 0.301992
| 0
| 0.494929
| 1
| 0
| 0.077673
| 0.004114
| 0
| 0
| 0
| 0
| 0
| 1
| 0.206897
| false
| 0.016227
| 0.010142
| 0.03854
| 0.330629
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0553a50b160b57ef05ec2c5a4d844fe204bc8f91
| 189
|
py
|
Python
|
projectaile/models/layer.py
|
explabs-ai/projectaile
|
992cecddb2fa6fdc60661103ac761f5bcd64f82b
|
[
"MIT"
] | 5
|
2020-10-13T10:17:17.000Z
|
2021-03-04T08:36:30.000Z
|
projectaile/models/layer.py
|
explabs-ai/projectaile
|
992cecddb2fa6fdc60661103ac761f5bcd64f82b
|
[
"MIT"
] | 2
|
2020-12-03T06:38:38.000Z
|
2021-05-08T10:06:55.000Z
|
projectaile/models/layer.py
|
explabs-ai/projectaile
|
992cecddb2fa6fdc60661103ac761f5bcd64f82b
|
[
"MIT"
] | null | null | null |
class LAYER():
def __init__(self):
return
def call(self, x):
return
def compute_output_shape(self, input_shape):
return
def compute_mask(self, input, input_mask=None):
return
| 15.75
| 48
| 0.724868
| 28
| 189
| 4.571429
| 0.5
| 0.210938
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.169312
| 189
| 12
| 49
| 15.75
| 0.815287
| 0
| 0
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.444444
| false
| 0
| 0
| 0.444444
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
0563ca8ad20e2b3dbf91e0911b60469ab33b1103
| 84
|
py
|
Python
|
registrations/pca/__init__.py
|
devisperessutti/Python
|
829098c91234aebe1463dd613af96e1e6bf9fdc1
|
[
"MIT"
] | 1
|
2020-02-16T15:17:11.000Z
|
2020-02-16T15:17:11.000Z
|
registrations/pca/__init__.py
|
devisperessutti/pca_echo_registration
|
829098c91234aebe1463dd613af96e1e6bf9fdc1
|
[
"MIT"
] | null | null | null |
registrations/pca/__init__.py
|
devisperessutti/pca_echo_registration
|
829098c91234aebe1463dd613af96e1e6bf9fdc1
|
[
"MIT"
] | 1
|
2016-08-23T08:40:13.000Z
|
2016-08-23T08:40:13.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 1 18:32:28 2014
@author: dp11
"""
| 10.5
| 35
| 0.547619
| 14
| 84
| 3.285714
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.215385
| 0.22619
| 84
| 7
| 36
| 12
| 0.492308
| 0.869048
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
057f7e72a55786570b251897fcfe42bb152399f4
| 31
|
py
|
Python
|
template/__init__.py
|
chucoding/Notion2Github
|
820aace4e6f52a42adf2587f5c77ef768c4e1586
|
[
"MIT"
] | null | null | null |
template/__init__.py
|
chucoding/Notion2Github
|
820aace4e6f52a42adf2587f5c77ef768c4e1586
|
[
"MIT"
] | null | null | null |
template/__init__.py
|
chucoding/Notion2Github
|
820aace4e6f52a42adf2587f5c77ef768c4e1586
|
[
"MIT"
] | null | null | null |
from template.calendar import *
| 31
| 31
| 0.83871
| 4
| 31
| 6.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096774
| 31
| 1
| 31
| 31
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5566a80c7351f7cde6cc960cf5520c5bf9e4f317
| 193
|
py
|
Python
|
gae/src/views.py
|
Tjorriemorrie/flexitime
|
b2d8564ba9e0f0c2c16610a8a99d54dd6ea58cae
|
[
"MIT"
] | null | null | null |
gae/src/views.py
|
Tjorriemorrie/flexitime
|
b2d8564ba9e0f0c2c16610a8a99d54dd6ea58cae
|
[
"MIT"
] | null | null | null |
gae/src/views.py
|
Tjorriemorrie/flexitime
|
b2d8564ba9e0f0c2c16610a8a99d54dd6ea58cae
|
[
"MIT"
] | null | null | null |
from flask import render_template, request, abort
from src import app
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def index(path):
return render_template('index.html')
| 21.444444
| 49
| 0.704663
| 27
| 193
| 4.962963
| 0.592593
| 0.208955
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108808
| 193
| 8
| 50
| 24.125
| 0.77907
| 0
| 0
| 0
| 0
| 0
| 0.139896
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.333333
| 0.166667
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
55750942a0246d19f2f594fbbf9c661c33e282aa
| 288
|
py
|
Python
|
nlp/Constants.py
|
Asap7772/DeepCriminalize
|
c171c6ce6e87e126e6e2b0ed1d9709ee7d0ce667
|
[
"MIT"
] | 1
|
2019-10-28T02:40:07.000Z
|
2019-10-28T02:40:07.000Z
|
nlp/Constants.py
|
Asap7772/DeepCriminalize
|
c171c6ce6e87e126e6e2b0ed1d9709ee7d0ce667
|
[
"MIT"
] | 16
|
2020-01-28T23:05:09.000Z
|
2022-02-27T03:02:38.000Z
|
nlp/Constants.py
|
Asap7772/DeepCriminalize
|
c171c6ce6e87e126e6e2b0ed1d9709ee7d0ce667
|
[
"MIT"
] | null | null | null |
EXT_ANALYTICS_SUBSCRIPTION_KEY = "f861df55922e4287b9de318f55c1da2c"
TEXT_ANALYTICS_ENDPOINT = "https://deepcriminalizenlpnlpnlp.cognitiveservices.azure.com/"
TEXT_ANALYTICS_URL = "https://deepcriminalizenlpnlpnlp.cognitiveservices.azure.com/text/analytics/v2.1/keyPhrases?showStats=True"
| 72
| 129
| 0.868056
| 28
| 288
| 8.678571
| 0.642857
| 0.160494
| 0.378601
| 0.419753
| 0.55144
| 0.55144
| 0.55144
| 0
| 0
| 0
| 0
| 0.078853
| 0.03125
| 288
| 3
| 130
| 96
| 0.792115
| 0
| 0
| 0
| 0
| 0.333333
| 0.690972
| 0.111111
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
55858dc8c91d0a5465407b0a3a972318dd419201
| 3,635
|
py
|
Python
|
pipeline/archivebot/pattern_conversion_test.py
|
chfoo/ArchiveBot
|
84e0c1cf9c0a9559fa1370f3570a0837f9a7641f
|
[
"MIT"
] | null | null | null |
pipeline/archivebot/pattern_conversion_test.py
|
chfoo/ArchiveBot
|
84e0c1cf9c0a9559fa1370f3570a0837f9a7641f
|
[
"MIT"
] | null | null | null |
pipeline/archivebot/pattern_conversion_test.py
|
chfoo/ArchiveBot
|
84e0c1cf9c0a9559fa1370f3570a0837f9a7641f
|
[
"MIT"
] | null | null | null |
import re
import unittest
from .pattern_conversion import lua_pattern_to_regex
class Test(unittest.TestCase):
def test_alpha(self):
self.assertTrue(re.search(lua_pattern_to_regex('%a'), 'a'))
self.assertTrue(re.search(lua_pattern_to_regex('%a'), 'Z'))
self.assertFalse(re.search(lua_pattern_to_regex('%a'), '0'))
def test_control(self):
self.assertTrue(re.search(lua_pattern_to_regex('%c'), '\x01'))
self.assertFalse(re.search(lua_pattern_to_regex('%c'), 'a'))
def test_graphic(self):
self.assertTrue(re.search(lua_pattern_to_regex('%g'), 'P'))
self.assertFalse(re.search(lua_pattern_to_regex('%g'), '\t'))
def test_lowercase(self):
self.assertTrue(re.search(lua_pattern_to_regex('%l'), 'h'))
self.assertFalse(re.search(lua_pattern_to_regex('%l'), 'H'))
def test_printable(self):
self.assertTrue(re.search(lua_pattern_to_regex('%p'), ']'))
self.assertTrue(re.search(lua_pattern_to_regex('%p'), '-'))
self.assertTrue(re.search(lua_pattern_to_regex('%p'), '#'))
self.assertFalse(re.search(lua_pattern_to_regex('%p'), '\x01'))
def test_space(self):
self.assertTrue(re.search(lua_pattern_to_regex('%s'), ' '))
self.assertTrue(re.search(lua_pattern_to_regex('%s'), '\t'))
self.assertFalse(re.search(lua_pattern_to_regex('%s'), 'A'))
def test_upper(self):
self.assertTrue(re.search(lua_pattern_to_regex('%u'), 'A'))
self.assertFalse(re.search(lua_pattern_to_regex('%u'), 'a'))
def test_alphanum(self):
self.assertTrue(re.search(lua_pattern_to_regex('%w'), 'A'))
self.assertFalse(re.search(lua_pattern_to_regex('%w'), '#'))
def test_hex(self):
self.assertTrue(re.search(lua_pattern_to_regex('%x'), 'A'))
self.assertFalse(re.search(lua_pattern_to_regex('%x'), 'z'))
def test_complex(self):
self.assertTrue(re.search(
lua_pattern_to_regex(r'^http://www%.reddit%.com/login%?dest='),
'http://www.reddit.com/login?dest=archiveteam'
))
self.assertFalse(re.search(
lua_pattern_to_regex(r'^http://www%.reddit%.com/login%?dest='),
'http://www.reddit.com/r/archiveteam'
))
self.assertTrue(re.search(
lua_pattern_to_regex(r'subscription%.php%?'),
'subscription.php?'
))
self.assertFalse(re.search(
lua_pattern_to_regex(r'subscription%.php%?'),
'subscriptionXphp?'
))
self.assertTrue(re.search(
lua_pattern_to_regex(r'^http://.+%.blogspot%.com/search%?'),
'http://archiveteam.blogspot.com/search?q=archives'
))
self.assertFalse(re.search(
lua_pattern_to_regex(r'^http://.+%.blogspot%.com/search%?'),
'http://.blogspot.com/search?q=archives'
))
def test_dash(self):
self.assertTrue(re.search(
lua_pattern_to_regex('cat%-dog'),
'cat-dog'
))
self.assertFalse(re.search(
lua_pattern_to_regex('cat%-dog'),
'cattdog'
))
self.assertTrue(re.search(
lua_pattern_to_regex('cat-dog'),
'cattdog'
))
self.assertTrue(re.search(
lua_pattern_to_regex('cat-dog'),
'cattttdog'
))
def test_nest(self):
self.assertTrue(re.search(
lua_pattern_to_regex('abc[%a]xyz'),
'abcDxyz'
))
self.assertFalse(re.search(
lua_pattern_to_regex('abc[%a]xyz'),
'abc xyz'
))
| 35.990099
| 75
| 0.597249
| 456
| 3,635
| 4.502193
| 0.140351
| 0.170482
| 0.204579
| 0.28982
| 0.827082
| 0.801754
| 0.79737
| 0.792012
| 0.696055
| 0.291281
| 0
| 0.001793
| 0.232737
| 3,635
| 100
| 76
| 36.35
| 0.734313
| 0
| 0
| 0.44186
| 0
| 0
| 0.150757
| 0
| 0
| 0
| 0
| 0
| 0.395349
| 1
| 0.139535
| false
| 0
| 0.034884
| 0
| 0.186047
| 0.011628
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
55a866d149fd836de36d8bc24985363e886299e7
| 210
|
py
|
Python
|
mogpe/gating_networks/__init__.py
|
aidanscannell/mogpe
|
25a9af473d73d6fa35bd060bee0eb2c372b995e5
|
[
"Apache-2.0"
] | 11
|
2021-04-01T02:40:21.000Z
|
2022-01-31T16:14:44.000Z
|
mogpe/gating_networks/__init__.py
|
aidanscannell/mogpe
|
25a9af473d73d6fa35bd060bee0eb2c372b995e5
|
[
"Apache-2.0"
] | null | null | null |
mogpe/gating_networks/__init__.py
|
aidanscannell/mogpe
|
25a9af473d73d6fa35bd060bee0eb2c372b995e5
|
[
"Apache-2.0"
] | 3
|
2021-04-04T02:45:34.000Z
|
2021-11-22T23:48:28.000Z
|
#!/usr/bin/env python3
from mogpe.gating_networks.base import GatingNetworkBase
from mogpe.gating_networks.svgp import SVGPGatingNetworkBinary, SVGPGatingNetworkMulti, SVGPGatingFunction, SVGPGatingNetworkBase
| 52.5
| 129
| 0.880952
| 21
| 210
| 8.714286
| 0.761905
| 0.098361
| 0.163934
| 0.251366
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005076
| 0.061905
| 210
| 3
| 130
| 70
| 0.923858
| 0.1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
75a4208309f1604d84b2dec31148c64b1fc5efa1
| 1,986
|
py
|
Python
|
src/morphometrics/measure/_tests/test_surface.py
|
haesleinhuepf/morphometrics
|
c4bdee33a9deaeed6f69a6853bf0787601fa8494
|
[
"BSD-3-Clause"
] | 5
|
2022-03-17T18:14:18.000Z
|
2022-03-23T00:48:17.000Z
|
src/morphometrics/measure/_tests/test_surface.py
|
haesleinhuepf/morphometrics
|
c4bdee33a9deaeed6f69a6853bf0787601fa8494
|
[
"BSD-3-Clause"
] | 11
|
2022-01-27T14:10:43.000Z
|
2022-03-20T18:22:30.000Z
|
src/morphometrics/measure/_tests/test_surface.py
|
haesleinhuepf/morphometrics
|
c4bdee33a9deaeed6f69a6853bf0787601fa8494
|
[
"BSD-3-Clause"
] | 1
|
2022-03-17T18:17:21.000Z
|
2022-03-17T18:17:21.000Z
|
import numpy as np
import pytest
import trimesh
from morphometrics.measure.surface import distance_between_surfaces
@pytest.mark.parametrize("fill_value", [np.nan, 0])
def test_distance_between_surfaces(fill_value):
source_vertices = np.array([[0, 0, 0], [0, 10, 0], [0, 10, 10]])
source_faces = np.array([[0, 1, 2]])
vertex_normals = np.array([[1, 0, 0], [1, 0, 0], [1, 0, 0]])
source_mesh = trimesh.Trimesh(
vertices=source_vertices, faces=source_faces, vertex_normals=vertex_normals
)
destination_vertices = np.array([[5, 5, 10], [5, 15, 5], [5, 15, 15]])
destination_faces = np.array([[0, 1, 2]])
destination_mesh = trimesh.Trimesh(
vertices=destination_vertices,
faces=destination_faces,
vertex_normals=vertex_normals,
)
distances = distance_between_surfaces(
source_surface=source_mesh,
destination_surface=destination_mesh,
fill_value=fill_value,
)
np.testing.assert_allclose(distances, [fill_value, fill_value, 5])
@pytest.mark.parametrize("fill_value", [np.nan, 0])
def test_distance_between_surfaces_flip_normal(fill_value):
source_vertices = np.array([[5, 0, 0], [5, 10, 0], [5, 10, 10]])
source_faces = np.array([[0, 1, 2]])
vertex_normals = np.array([[1, 0, 0], [1, 0, 0], [1, 0, 0]])
source_mesh = trimesh.Trimesh(
vertices=source_vertices, faces=source_faces, vertex_normals=vertex_normals
)
destination_vertices = np.array([[0, 5, 10], [0, 15, 5], [0, 15, 15]])
destination_faces = np.array([[0, 1, 2]])
destination_mesh = trimesh.Trimesh(
vertices=destination_vertices,
faces=destination_faces,
vertex_normals=vertex_normals,
)
distances = distance_between_surfaces(
source_surface=source_mesh,
destination_surface=destination_mesh,
fill_value=fill_value,
flip_normals=True,
)
np.testing.assert_allclose(distances, [fill_value, fill_value, 5])
| 34.842105
| 83
| 0.671702
| 265
| 1,986
| 4.781132
| 0.154717
| 0.085241
| 0.037885
| 0.041042
| 0.874507
| 0.874507
| 0.827151
| 0.827151
| 0.827151
| 0.827151
| 0
| 0.052434
| 0.193353
| 1,986
| 56
| 84
| 35.464286
| 0.738452
| 0
| 0
| 0.638298
| 0
| 0
| 0.01007
| 0
| 0
| 0
| 0
| 0
| 0.042553
| 1
| 0.042553
| false
| 0
| 0.085106
| 0
| 0.12766
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.