hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1d1440784b015c2af1bb3c792b09e92f2956dc6a
| 44
|
py
|
Python
|
app/handlers/homework/inline_mode/__init__.py
|
vitaliy-ukiru/math-bot
|
72c116b4f5a4aa6a5f8eaae67ecbbf3df821f9e9
|
[
"MIT"
] | 1
|
2021-12-11T07:41:38.000Z
|
2021-12-11T07:41:38.000Z
|
app/handlers/homework/inline_mode/__init__.py
|
vitaliy-ukiru/math-bot
|
72c116b4f5a4aa6a5f8eaae67ecbbf3df821f9e9
|
[
"MIT"
] | 8
|
2021-05-08T21:48:34.000Z
|
2022-01-20T15:42:00.000Z
|
app/handlers/homework/inline_mode/__init__.py
|
vitaliy-ukiru/math-bot
|
72c116b4f5a4aa6a5f8eaae67ecbbf3df821f9e9
|
[
"MIT"
] | null | null | null |
__all__ = ("dp",)
from .handlers import dp
| 11
| 24
| 0.659091
| 6
| 44
| 4.166667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 44
| 3
| 25
| 14.666667
| 0.694444
| 0
| 0
| 0
| 0
| 0
| 0.045455
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
1d2c00608b961b664ac9d3e64dbf1d33fd2aceb9
| 892
|
py
|
Python
|
src/tt_storage/tt_storage/exceptions.py
|
al-arz/the-tale
|
542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5
|
[
"BSD-3-Clause"
] | 85
|
2017-11-21T12:22:02.000Z
|
2022-03-27T23:07:17.000Z
|
src/tt_storage/tt_storage/exceptions.py
|
al-arz/the-tale
|
542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5
|
[
"BSD-3-Clause"
] | 545
|
2017-11-04T14:15:04.000Z
|
2022-03-27T14:19:27.000Z
|
src/tt_storage/tt_storage/exceptions.py
|
al-arz/the-tale
|
542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5
|
[
"BSD-3-Clause"
] | 45
|
2017-11-11T12:36:30.000Z
|
2022-02-25T06:10:44.000Z
|
from tt_web import exceptions
class StorageError(exceptions.BaseError):
pass
class OperationsError(StorageError):
pass
class ItemAlreadyCreated(OperationsError):
MESSAGE = 'can not create item {item_id} for owner {owner_id}'
class CanNotDeleteItem(OperationsError):
MESSAGE = 'Can not delete item {item_id} from owner {owner_id}'
class CanNotChangeItemOwner(OperationsError):
MESSAGE = 'Can not change item {item_id} owner from {old_owner_id} to {new_owner_id}'
class CanNotChangeItemOwnerSameOwner(OperationsError):
MESSAGE = 'Can not change item {item_id} ownwer {owner_id} to same'
class CanNotChangeItemStorage(OperationsError):
MESSAGE = 'Can not move item {item_id} from storage {old_storage_id} to storage {new_storage_id}'
class UnknownOperationTypeInProtobuf(OperationsError):
MESSAGE = 'Unknown operation type in protobuf: "{type}"'
| 25.485714
| 101
| 0.767937
| 106
| 892
| 6.301887
| 0.358491
| 0.197605
| 0.187126
| 0.209581
| 0.131737
| 0.131737
| 0.131737
| 0.131737
| 0
| 0
| 0
| 0
| 0.151345
| 892
| 34
| 102
| 26.235294
| 0.882431
| 0
| 0
| 0.117647
| 0
| 0
| 0.401796
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.117647
| 0.058824
| 0
| 0.882353
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 4
|
1d48fdb0886237d37ece2a8f2b0d527a48b1bda4
| 269
|
py
|
Python
|
spectrespecs/nightwatch/utils.py
|
Spacehug/loony-lovegood
|
fd860591d37bd18107243e4e4e86cb4fd3836c6f
|
[
"MIT"
] | 1
|
2019-08-03T09:22:41.000Z
|
2019-08-03T09:22:41.000Z
|
spectrespecs/nightwatch/utils.py
|
Spacehug/loony-lovegood
|
fd860591d37bd18107243e4e4e86cb4fd3836c6f
|
[
"MIT"
] | 2
|
2021-04-30T20:58:49.000Z
|
2021-06-01T23:58:36.000Z
|
spectrespecs/nightwatch/utils.py
|
Spacehug/loony-lovegood
|
fd860591d37bd18107243e4e4e86cb4fd3836c6f
|
[
"MIT"
] | null | null | null |
import re
SPAM = re.compile(r"([-\w\d:%._+~#=]*\.[\w\d]{2,6})|(@[\w\d_]*)")
CODE = re.compile(r"([0-9\s]{12})")
def is_malicious(message):
return re.search(SPAM, message) is not None
def is_friend_code(message):
return re.search(CODE, message) is not None
| 20.692308
| 65
| 0.613383
| 47
| 269
| 3.404255
| 0.489362
| 0.0375
| 0.125
| 0.2625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025862
| 0.137546
| 269
| 12
| 66
| 22.416667
| 0.663793
| 0
| 0
| 0
| 0
| 0
| 0.208178
| 0.159851
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0.142857
| 0.285714
| 0.714286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
1d5fe86bcb98105828cf2270bc8568f523213dba
| 183
|
py
|
Python
|
lambdata_JonRivera/__init__.py
|
JonRivera/Package_Repo
|
c12dc07ce5ab04e6842403b6adc89e7f8e4024aa
|
[
"MIT"
] | null | null | null |
lambdata_JonRivera/__init__.py
|
JonRivera/Package_Repo
|
c12dc07ce5ab04e6842403b6adc89e7f8e4024aa
|
[
"MIT"
] | null | null | null |
lambdata_JonRivera/__init__.py
|
JonRivera/Package_Repo
|
c12dc07ce5ab04e6842403b6adc89e7f8e4024aa
|
[
"MIT"
] | 2
|
2020-08-04T19:11:59.000Z
|
2020-08-07T01:29:21.000Z
|
""" lambdata-JonRivera - a collection of Data Science Helper Functions """
import pandas as pd
import numpy as np
ONES = pd.DataFrame(np.ones(10))
ZEROS = pd.DataFrame(np.zeros(50))
| 26.142857
| 74
| 0.737705
| 29
| 183
| 4.655172
| 0.689655
| 0.088889
| 0.192593
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025478
| 0.142077
| 183
| 6
| 75
| 30.5
| 0.834395
| 0.360656
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
1d610fc0f15125a457a1844b617c8a8eaf67d90b
| 234
|
py
|
Python
|
src/oscar/apps/address/apps.py
|
Jean1508/ya-madoa
|
1ffb1d11e15bf33e4c3a09698675a4357e887eaa
|
[
"BSD-3-Clause"
] | null | null | null |
src/oscar/apps/address/apps.py
|
Jean1508/ya-madoa
|
1ffb1d11e15bf33e4c3a09698675a4357e887eaa
|
[
"BSD-3-Clause"
] | 5
|
2021-05-28T19:38:28.000Z
|
2022-03-12T00:45:39.000Z
|
src/oscar/apps/address/apps.py
|
Jean1508/ya-madoa
|
1ffb1d11e15bf33e4c3a09698675a4357e887eaa
|
[
"BSD-3-Clause"
] | null | null | null |
from django.utils.translation import gettext_lazy as _
from oscar.core.application import OscarConfig
class AddressConfig(OscarConfig):
label = 'address'
name = 'oscar.apps.address'
verbose_name = _('Address')
| 23.4
| 55
| 0.722222
| 26
| 234
| 6.346154
| 0.730769
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.192308
| 234
| 9
| 56
| 26
| 0.873016
| 0
| 0
| 0
| 0
| 0
| 0.142222
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
1d69da8a62d50b6da7b32182bb1bb70047c27846
| 63
|
py
|
Python
|
test/hello_world/a.py
|
xuzizhou/fc-python-sdk
|
3964dc91de69263083ef62ab4b13b21c6fe4fc58
|
[
"MIT"
] | 51
|
2017-08-02T01:35:03.000Z
|
2022-03-13T07:07:15.000Z
|
test/hello_world/a.py
|
xuzizhou/fc-python-sdk
|
3964dc91de69263083ef62ab4b13b21c6fe4fc58
|
[
"MIT"
] | 61
|
2017-08-26T03:37:26.000Z
|
2022-01-23T21:20:56.000Z
|
test/hello_world/a.py
|
xuzizhou/fc-python-sdk
|
3964dc91de69263083ef62ab4b13b21c6fe4fc58
|
[
"MIT"
] | 16
|
2017-09-27T07:58:19.000Z
|
2021-11-12T03:21:20.000Z
|
def my_handler(event, context):
return 'new hello world'
| 12.6
| 31
| 0.698413
| 9
| 63
| 4.777778
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.206349
| 63
| 4
| 32
| 15.75
| 0.86
| 0
| 0
| 0
| 0
| 0
| 0.245902
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 4
|
1d7e759f6a6332307c3c88ce555745f1b7552d51
| 163
|
py
|
Python
|
vivid/featureset/__init__.py
|
upura/vivid
|
6139697d60656d4774aceae880f5a07d929124a8
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
vivid/featureset/__init__.py
|
upura/vivid
|
6139697d60656d4774aceae880f5a07d929124a8
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
vivid/featureset/__init__.py
|
upura/vivid
|
6139697d60656d4774aceae880f5a07d929124a8
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
from .atoms import AbstractMergeAtom, AbstractAtom, StringContainsAtom
from .molecules import create_molecule, find_molecule
from .utils import create_data_loader
| 40.75
| 70
| 0.871166
| 19
| 163
| 7.263158
| 0.684211
| 0.173913
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.092025
| 163
| 3
| 71
| 54.333333
| 0.932432
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
1d820fd325920c61a4473dec5305b07108f20eb0
| 8,206
|
py
|
Python
|
SViTE/engine.py
|
VITA-Group/SViTE
|
b0c62fd153c8b0b99917ab935ee76925c9de1149
|
[
"MIT"
] | 50
|
2021-05-29T00:52:45.000Z
|
2022-03-17T11:39:47.000Z
|
SViTE/engine.py
|
VITA-Group/SViTE
|
b0c62fd153c8b0b99917ab935ee76925c9de1149
|
[
"MIT"
] | 2
|
2022-01-16T07:24:52.000Z
|
2022-03-29T01:56:24.000Z
|
SViTE/engine.py
|
VITA-Group/SViTE
|
b0c62fd153c8b0b99917ab935ee76925c9de1149
|
[
"MIT"
] | 6
|
2021-06-27T22:24:16.000Z
|
2022-01-17T02:45:32.000Z
|
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
"""
Train and eval functions used in main.py
"""
import math
import sys
from typing import Iterable, Optional
import time
import torch
from timm.data import Mixup
from timm.utils import accuracy, ModelEma
from losses import DistillationLoss
import utils
import pdb
import warnings
warnings.filterwarnings('ignore')
def get_tau(start_tau, end_tau, ite, total):
tau = start_tau + (end_tau - start_tau) * ite / total
return tau
ite_step = 0
def train_one_epoch(model: torch.nn.Module, criterion: DistillationLoss,
data_loader: Iterable, optimizer: torch.optim.Optimizer,
device: torch.device, epoch: int, loss_scaler, max_norm: float = 0,
model_ema: Optional[ModelEma] = None, mixup_fn: Optional[Mixup] = None,
set_training_mode=True, mask=None, args=None):
model.train(set_training_mode)
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 10
# pdb.set_trace()
total_iteration = len(data_loader) * (args.epochs)
for samples, targets in metric_logger.log_every(data_loader, print_freq, header):
samples = samples.to(device, non_blocking=True)
targets = targets.to(device, non_blocking=True)
global ite_step
optimizer.zero_grad()
if mixup_fn is not None:
samples, targets = mixup_fn(samples, targets)
if args.token_selection:
tau = get_tau(10, 0.1, ite_step, total_iteration)
else:
tau = -1
with torch.cuda.amp.autocast():
if args.pruning_type == 'structure':
outputs, atten_pruning_indicator = model(samples, tau=tau, number=args.token_number)
else:
outputs = model(samples, tau=tau, number=args.token_number)
atten_pruning_indicator = None
loss = criterion(samples, outputs, targets)
loss_value = loss.item()
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value))
sys.exit(1)
# this attribute is added by timm on one optimizer (adahessian)
is_second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order
loss_scaler(loss, optimizer, clip_grad=max_norm,
parameters=model.parameters(),
create_graph=is_second_order)
if mask is not None:
mask.step(pruning_type=args.pruning_type)
torch.cuda.synchronize()
if model_ema is not None:
model_ema.update(model)
metric_logger.update(loss=loss_value)
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
# update sparse topology
ite_step = mask.steps
if ite_step % args.update_frequency == 0 and ite_step < args.t_end * total_iteration:
mask.at_end_of_epoch(pruning_type=args.pruning_type,
indicator_list=atten_pruning_indicator)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
def train_one_epoch_training_time(model: torch.nn.Module, criterion: DistillationLoss,
data_loader: Iterable, optimizer: torch.optim.Optimizer,
device: torch.device, epoch: int, loss_scaler, max_norm: float = 0,
model_ema: Optional[ModelEma] = None, mixup_fn: Optional[Mixup] = None,
set_training_mode=True, mask=None, args=None):
model.train(set_training_mode)
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 10
# pdb.set_trace()
total_time = 0
total_iteration = len(data_loader) * (args.epochs)
for samples, targets in metric_logger.log_every(data_loader, print_freq, header):
samples = samples.to(device, non_blocking=True)
targets = targets.to(device, non_blocking=True)
optimizer.zero_grad()
if mixup_fn is not None:
samples, targets = mixup_fn(samples, targets)
with torch.cuda.amp.autocast():
start = time.time()
if args.pruning_type == 'structure':
outputs, atten_pruning_indicator = model(samples)
elif args.token_selection:
outputs = model(samples, tau=10, number=args.token_number)
atten_pruning_indicator = None
else:
outputs = model(samples)
atten_pruning_indicator = None
loss = criterion(samples, outputs, targets)
loss_value = loss.item()
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value))
sys.exit(1)
# this attribute is added by timm on one optimizer (adahessian)
is_second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order
loss_scaler(loss, optimizer, clip_grad=max_norm,
parameters=model.parameters(),
create_graph=is_second_order)
end = time.time()
total_time += end-start
global ite_step
ite_step += 1
if ite_step % 100 == 0:
print(total_time)
total_time = 0
# if mask is not None:
# mask.step(pruning_type=args.pruning_type)
torch.cuda.synchronize()
if model_ema is not None:
model_ema.update(model)
metric_logger.update(loss=loss_value)
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
# update sparse topology
# if ite_step % args.update_frequency == 0 and ite_step < args.t_end * total_iteration:
# mask.at_end_of_epoch(pruning_type=args.pruning_type,
# indicator_list=atten_pruning_indicator)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
@torch.no_grad()
def evaluate(data_loader, model, device, args=None):
criterion = torch.nn.CrossEntropyLoss()
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Test:'
# switch to evaluation mode
model.eval()
if args.token_selection:
tau = 1
else:
tau = -1
for images, target in metric_logger.log_every(data_loader, 10, header):
images = images.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
# compute output
with torch.cuda.amp.autocast():
if args.pruning_type == 'structure':
output, atten_pruning_indicator = model(images, tau=tau, number=args.token_number)
else:
output = model(images, tau=tau, number=args.token_number)
atten_pruning_indicator = None
# output = model(images)
loss = criterion(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
batch_size = images.shape[0]
metric_logger.update(loss=loss.item())
metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)
metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print('* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f} loss {losses.global_avg:.3f}'
.format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss))
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
| 39.07619
| 100
| 0.631124
| 1,002
| 8,206
| 4.958084
| 0.185629
| 0.062802
| 0.038043
| 0.022947
| 0.760266
| 0.71719
| 0.71719
| 0.709138
| 0.682166
| 0.682166
| 0
| 0.009883
| 0.272484
| 8,206
| 209
| 101
| 39.263158
| 0.822278
| 0.092006
| 0
| 0.621622
| 0
| 0.006757
| 0.041481
| 0.008754
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027027
| false
| 0
| 0.074324
| 0
| 0.128378
| 0.067568
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
1d8beb9a5b5a3f3b8e23f4d482b6642dbfa156f1
| 1,644
|
py
|
Python
|
sklearn_wrapper/modules/data_translaters/DataTranslater.py
|
hidetomo-watanabe/analyze_for_kaggle
|
d90dbad3d07c862271332c151bcc7229d7c353df
|
[
"Apache-2.0"
] | 3
|
2018-01-04T06:53:03.000Z
|
2019-02-19T22:19:38.000Z
|
sklearn_wrapper/modules/data_translaters/DataTranslater.py
|
hidetomo-watanabe/analyze_for_kaggle
|
d90dbad3d07c862271332c151bcc7229d7c353df
|
[
"Apache-2.0"
] | null | null | null |
sklearn_wrapper/modules/data_translaters/DataTranslater.py
|
hidetomo-watanabe/analyze_for_kaggle
|
d90dbad3d07c862271332c151bcc7229d7c353df
|
[
"Apache-2.0"
] | null | null | null |
from logging import getLogger
logger = getLogger('predict').getChild('DataTranslater')
if 'ConfigReader' not in globals():
from ..ConfigReader import ConfigReader
if 'TableDataTranslater' not in globals():
from .TableDataTranslater import TableDataTranslater
if 'ImageDataTranslater' not in globals():
from .ImageDataTranslater import ImageDataTranslater
class DataTranslater(ConfigReader):
def __init__(self, kernel=False):
self.kernel = kernel
def get_translater(self):
data_type = self.configs['data']['type']
if data_type == 'table':
self.translater = TableDataTranslater(self.kernel)
elif data_type == 'image':
self.translater = ImageDataTranslater(self.kernel)
else:
logger.error('DATA MODE SHOULD BE table OR image')
raise Exception('NOT IMPLEMENTED')
# take over instance variable
self.translater.__dict__.update(self.__dict__)
return
def get_df_data(self):
return self.translater.get_df_data()
def calc_train_data(self):
return self.translater.calc_train_data()
def write_train_data(self):
return self.translater.write_train_data()
def get_train_data(self):
return self.translater.get_train_data()
def get_pre_processers(self):
return self.translater.get_pre_processers()
def get_post_processers(self):
return self.translater.get_post_processers()
def _sample_with_under(self):
return self.translater._sample_with_under()
def _sample_with_over(self):
return self.translater._sample_with_over()
| 31.018868
| 62
| 0.697689
| 188
| 1,644
| 5.824468
| 0.292553
| 0.140639
| 0.102283
| 0.175342
| 0.251142
| 0.251142
| 0
| 0
| 0
| 0
| 0
| 0
| 0.214112
| 1,644
| 52
| 63
| 31.615385
| 0.847523
| 0.016423
| 0
| 0
| 0
| 0
| 0.085449
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.263158
| false
| 0
| 0.105263
| 0.210526
| 0.631579
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
d52e31c396587f99ff03e58a1b845be97eb21a6c
| 2,937
|
py
|
Python
|
src/djanban/apps/charts/views/public.py
|
diegojromerolopez/djanban
|
6451688d49cf235d03c604b19a6a8480b33eed87
|
[
"MIT"
] | 33
|
2017-06-14T18:04:25.000Z
|
2021-06-15T07:07:56.000Z
|
src/djanban/apps/charts/views/public.py
|
diegojromerolopez/djanban
|
6451688d49cf235d03c604b19a6a8480b33eed87
|
[
"MIT"
] | 1
|
2017-05-10T08:45:55.000Z
|
2017-05-10T08:45:55.000Z
|
src/djanban/apps/charts/views/public.py
|
diegojromerolopez/djanban
|
6451688d49cf235d03c604b19a6a8480b33eed87
|
[
"MIT"
] | 8
|
2017-08-27T11:14:25.000Z
|
2021-03-03T12:11:16.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from djanban.apps.base.auth import user_is_administrator
from djanban.apps.boards.models import Board
from djanban.apps.charts import cards, labels, members, boards, requirements
# General burndown chart
def burndown(request, board_public_access_code):
board = _get_user_board(request, board_public_access_code)
return boards.burndown(board=board)
# Requirement burndown chart
def requirement_burndown(request, board_public_access_code, requirement_code=None):
requirement = None
board = _get_user_board(request, board_public_access_code)
if requirement_code is not None:
requirement = board.requirements.get(code=requirement_code)
return requirements.burndown(board, requirement)
# Show the spent time by week by members
def spent_time_by_week(request, week_of_year, board_public_access_code):
board = _get_user_board(request, board_public_access_code)
return members.spent_time_by_week(request.user, week_of_year=week_of_year, board=board)
# Show a chart with the task forward movements by member
def task_forward_movements_by_member(request, board_public_access_code):
board = _get_user_board(request, board_public_access_code)
return members.task_movements_by_member(request, "forward", board)
# Show a chart with the task backward movements by member
def task_backward_movements_by_member(request, board_public_access_code):
board = _get_user_board(request, board_public_access_code)
return members.task_movements_by_member(request, "backward", board)
# Show average time each card lives in each list
def avg_time_by_list(request, board_public_access_code):
board = _get_user_board(request, board_public_access_code)
return cards.avg_time_by_list(board)
# Average card lead time
def avg_lead_time(request, board_public_access_code):
board = _get_user_board(request, board_public_access_code)
return cards.avg_lead_time(request, board)
# Average card cycle time
def avg_cycle_time(request, board_public_access_code):
board = _get_user_board(request, board_public_access_code)
return cards.avg_cycle_time(request, board)
# Average spent times
def avg_spent_times(request, board_public_access_code):
board = _get_user_board(request, board_public_access_code)
return labels.avg_spent_times(request, board)
# Average estimated times
def avg_estimated_times(request, board_public_access_code):
board = _get_user_board(request, board_public_access_code)
return labels.avg_estimated_times(request, board)
# Get user boards depending on if the user is a superuser
# or a visitor
def _get_user_board(request, board_public_access_code):
if user_is_administrator(request.user):
return Board.objects.get(public_access_code=board_public_access_code)
return Board.objects.get(enable_public_access=True, public_access_code=board_public_access_code)
| 37.653846
| 100
| 0.809329
| 429
| 2,937
| 5.146853
| 0.165501
| 0.141304
| 0.181159
| 0.21875
| 0.627717
| 0.513587
| 0.493659
| 0.436594
| 0.436594
| 0.396286
| 0
| 0.000389
| 0.124957
| 2,937
| 77
| 101
| 38.142857
| 0.858755
| 0.145727
| 0
| 0.243902
| 0
| 0
| 0.006012
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.268293
| false
| 0
| 0.097561
| 0
| 0.658537
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
d53b0fcc700a8ea4071c41b5598165cb774021a1
| 191
|
py
|
Python
|
weibo/test/testData.py
|
haiboz/weiboSpider
|
517cae2ef3e7bccd9e1d328a40965406707f5362
|
[
"Apache-2.0"
] | null | null | null |
weibo/test/testData.py
|
haiboz/weiboSpider
|
517cae2ef3e7bccd9e1d328a40965406707f5362
|
[
"Apache-2.0"
] | null | null | null |
weibo/test/testData.py
|
haiboz/weiboSpider
|
517cae2ef3e7bccd9e1d328a40965406707f5362
|
[
"Apache-2.0"
] | null | null | null |
#coding:utf8
'''
Created on 2016年4月19日
@author: wb-zhaohaibo
'''
datas = []
weibo_data = {}
if weibo_data is None:
print "sss"
else:
print "---"
print len(weibo_data)
| 14.692308
| 25
| 0.596859
| 24
| 191
| 4.625
| 0.75
| 0.243243
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.056338
| 0.256545
| 191
| 13
| 25
| 14.692308
| 0.725352
| 0.057592
| 0
| 0
| 0
| 0
| 0.051724
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.428571
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
d53b3f387102b62bd20819bb8ed7a91b89c52b76
| 112
|
py
|
Python
|
great_international/apps.py
|
uktrade/directory-cms
|
8c8d13ce29ea74ddce7a40f3dd29c8847145d549
|
[
"MIT"
] | 6
|
2018-03-20T11:19:07.000Z
|
2021-10-05T07:53:11.000Z
|
great_international/apps.py
|
uktrade/directory-cms
|
8c8d13ce29ea74ddce7a40f3dd29c8847145d549
|
[
"MIT"
] | 802
|
2018-02-05T14:16:13.000Z
|
2022-02-10T10:59:21.000Z
|
great_international/apps.py
|
uktrade/directory-cms
|
8c8d13ce29ea74ddce7a40f3dd29c8847145d549
|
[
"MIT"
] | 6
|
2019-01-22T13:19:37.000Z
|
2019-07-01T10:35:26.000Z
|
from django.apps import AppConfig
class GreatInternationalConfig(AppConfig):
name = 'great_international'
| 18.666667
| 42
| 0.803571
| 11
| 112
| 8.090909
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133929
| 112
| 5
| 43
| 22.4
| 0.917526
| 0
| 0
| 0
| 0
| 0
| 0.169643
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
d54b6954c5529cde63c332fe2c41b1b36d08863a
| 81
|
py
|
Python
|
django_ltree_field/test_utils/test_app/__init__.py
|
john-parton/django-ltree-field
|
a4378f0eb0d6a4abb2ed459c49b081d7e2a35c4b
|
[
"BSD-3-Clause"
] | 1
|
2021-11-11T20:03:12.000Z
|
2021-11-11T20:03:12.000Z
|
django_ltree_field/test_utils/test_app/__init__.py
|
john-parton/django-ltree-field
|
a4378f0eb0d6a4abb2ed459c49b081d7e2a35c4b
|
[
"BSD-3-Clause"
] | null | null | null |
django_ltree_field/test_utils/test_app/__init__.py
|
john-parton/django-ltree-field
|
a4378f0eb0d6a4abb2ed459c49b081d7e2a35c4b
|
[
"BSD-3-Clause"
] | null | null | null |
default_app_config = 'django_ltree_field.test_utils.test_app.apps.TestAppConfig'
| 40.5
| 80
| 0.876543
| 12
| 81
| 5.416667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.037037
| 81
| 1
| 81
| 81
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0.703704
| 0.703704
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
d54ef50b568c97cf4dd042bab1e7f918318a353a
| 2,852
|
py
|
Python
|
apps/users/migrations/0011_fishx.py
|
lucasjaroszewski/incremental-game
|
bae8823f986be0fd046bd50195d43fbc548fad90
|
[
"MIT"
] | null | null | null |
apps/users/migrations/0011_fishx.py
|
lucasjaroszewski/incremental-game
|
bae8823f986be0fd046bd50195d43fbc548fad90
|
[
"MIT"
] | 5
|
2021-06-09T17:54:51.000Z
|
2022-03-12T00:46:49.000Z
|
apps/users/migrations/0011_fishx.py
|
lucasjaroszewski/incremental-game
|
bae8823f986be0fd046bd50195d43fbc548fad90
|
[
"MIT"
] | 1
|
2020-09-27T18:26:15.000Z
|
2020-09-27T18:26:15.000Z
|
# Generated by Django 3.0.6 on 2020-08-03 20:39
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('users', '0010_profile_rod'),
]
operations = [
migrations.CreateModel(
name='FishX',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('number', models.PositiveIntegerField(default='0')),
('description', models.CharField(blank=True, default='M', max_length=1)),
('name', models.CharField(default='', max_length=30)),
('hook', models.CharField(default='S', max_length=1)),
('actual_catch', models.PositiveIntegerField(default='0')),
('stones', models.PositiveIntegerField(default='20')),
('rod', models.PositiveIntegerField(default='0')),
('bait', models.CharField(default='', max_length=30)),
('reel', models.CharField(default='', max_length=20)),
('xp', models.PositiveIntegerField(default='0')),
('icon', models.ImageField(default='default.jpg', upload_to='media/fish_pics')),
('lake_tiilen', models.BooleanField(default=False)),
('dragon_palace', models.BooleanField(default=False)),
('acteul', models.BooleanField(default=False)),
('vasu_mointains', models.BooleanField(default=False)),
('charol_plains', models.BooleanField(default=False)),
('man_eating_swamp', models.BooleanField(default=False)),
('baruoki', models.BooleanField(default=False)),
('nauru_uplands', models.BooleanField(default=False)),
('karek_swampland', models.BooleanField(default=False)),
('rinde_port', models.BooleanField(default=False)),
('serena_coast', models.BooleanField(default=False)),
('rucyana_sands', models.BooleanField(default=False)),
('elzion_airport', models.BooleanField(default=False)),
('nilva', models.BooleanField(default=False)),
('last_island', models.BooleanField(default=False)),
('dimension_rift', models.BooleanField(default=False)),
('zol_plains', models.BooleanField(default=False)),
('moonlight_forest', models.BooleanField(default=False)),
('snake_neck_igoma', models.BooleanField(default=False)),
('ancient_battlefield', models.BooleanField(default=False)),
('user', models.ManyToManyField(to=settings.AUTH_USER_MODEL)),
],
),
]
| 52.814815
| 114
| 0.596424
| 262
| 2,852
| 6.351145
| 0.431298
| 0.216346
| 0.300481
| 0.360577
| 0.101563
| 0.039663
| 0
| 0
| 0
| 0
| 0
| 0.015537
| 0.255259
| 2,852
| 53
| 115
| 53.811321
| 0.767891
| 0.015778
| 0
| 0
| 1
| 0
| 0.134046
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.042553
| 0
| 0.106383
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
d5610e83a2492c6e7447f48e1d9220d30e4fb359
| 160
|
py
|
Python
|
Data Analytics/making data frames.py
|
bahuisman/NatGasModel
|
397423237b90a7638089f79492be0519e02fcc67
|
[
"MIT"
] | 4
|
2019-09-09T08:05:46.000Z
|
2021-03-24T13:09:10.000Z
|
Data Analytics/making data frames.py
|
bahuisman/NatGasModel
|
397423237b90a7638089f79492be0519e02fcc67
|
[
"MIT"
] | null | null | null |
Data Analytics/making data frames.py
|
bahuisman/NatGasModel
|
397423237b90a7638089f79492be0519e02fcc67
|
[
"MIT"
] | 3
|
2019-09-09T08:05:48.000Z
|
2020-04-03T21:31:21.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 14 01:43:16 2017
@author: Berend
"""
import pandas as pd
import numpy as np
np.linspace(2012,2020,7)
| 14.545455
| 36
| 0.61875
| 27
| 160
| 3.666667
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.178862
| 0.23125
| 160
| 11
| 37
| 14.545455
| 0.626016
| 0.46875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
d576129df67b75d5a13213ef3de9ba608204071b
| 108
|
py
|
Python
|
mir/__init__.py
|
fenrir-z/ymir-cmd
|
6fbffd3c1ff5dd1c9a44b55de411523b50567661
|
[
"Apache-2.0"
] | 64
|
2021-11-15T03:48:00.000Z
|
2022-03-25T07:08:46.000Z
|
mir/__init__.py
|
fenrir-z/ymir-cmd
|
6fbffd3c1ff5dd1c9a44b55de411523b50567661
|
[
"Apache-2.0"
] | 35
|
2021-11-23T04:14:35.000Z
|
2022-03-26T09:03:43.000Z
|
mir/__init__.py
|
fenrir-z/ymir-cmd
|
6fbffd3c1ff5dd1c9a44b55de411523b50567661
|
[
"Apache-2.0"
] | 57
|
2021-11-11T10:15:40.000Z
|
2022-03-29T07:27:54.000Z
|
import logging
import sys
logging.basicConfig(stream=sys.stdout, format='%(message)s', level=logging.INFO)
| 21.6
| 80
| 0.787037
| 15
| 108
| 5.666667
| 0.733333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074074
| 108
| 4
| 81
| 27
| 0.85
| 0
| 0
| 0
| 0
| 0
| 0.101852
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
d594e650b232d55ab1ddc0481cc31694028496df
| 11,748
|
py
|
Python
|
tests/unit_tests/test_view_establishment.py
|
sspbft/BFTList
|
d73aee5bd0ab05995509f0fcfaf3c0a5944e617a
|
[
"MIT"
] | 6
|
2019-11-12T01:45:55.000Z
|
2022-03-18T10:57:21.000Z
|
tests/unit_tests/test_view_establishment.py
|
practicalbft/BFTList
|
d73aee5bd0ab05995509f0fcfaf3c0a5944e617a
|
[
"MIT"
] | 4
|
2019-02-14T10:57:09.000Z
|
2019-03-21T15:22:08.000Z
|
tests/unit_tests/test_view_establishment.py
|
sspbft/BFTList
|
d73aee5bd0ab05995509f0fcfaf3c0a5944e617a
|
[
"MIT"
] | 1
|
2019-04-04T15:09:33.000Z
|
2019-04-04T15:09:33.000Z
|
import unittest
from unittest.mock import Mock, MagicMock, call
from resolve.resolver import Resolver
from modules.view_establishment.predicates import PredicatesAndAction
from modules.view_establishment.module import ViewEstablishmentModule
from modules.enums import ViewEstablishmentEnums
from resolve.enums import Function, Module
from modules.constants import VIEWS, PHASE, WITNESSES, VCHANGE
class ViewEstablishmentModuleTest(unittest.TestCase):
def setUp(self):
self.resolver = Resolver(testing=True)
def test_while_true_case_1_is_true_and_return_is_an_action(self):
view_est_mod = ViewEstablishmentModule(0, self.resolver, 2, 0)
# (1)Predicates and action reset all should be called
view_est_mod.pred_and_action.need_reset = MagicMock(return_value = True)
view_est_mod.pred_and_action.reset_all = Mock()
# (2) Processor i recent values are noticed and both processors have been witnessed
view_est_mod.noticed_recent_value = MagicMock(return_value = True)
view_est_mod.get_witnesses = MagicMock(return_value = {0,1})
# (3)Let predicate of case 0 be false and case 1 true
view_est_mod.witnes_seen = MagicMock(return_value = True)
view_est_mod.pred_and_action.automation = MagicMock(side_effect=(lambda t ,y, x: x))
# (4) Mocks the final calls
view_est_mod.send_msg = Mock()
# Run the method and check all statements above
view_est_mod.run(testing=True)
# (1) Predicates and action reset all should be called
view_est_mod.pred_and_action.reset_all.assert_called_once()
# (2) Processor i recent values are noticed and both processors have been witnessed
self.assertTrue(view_est_mod.witnesses[view_est_mod.id])
self.assertEqual(view_est_mod.witnesses_set, {0,1})
# (3) Let predicate of case 0 be false and case 1 true, make sure function is called
calls_automaton = [call(
ViewEstablishmentEnums.PREDICATE,view_est_mod.phs[view_est_mod.id], 0),
call(
ViewEstablishmentEnums.PREDICATE,view_est_mod.phs[view_est_mod.id], 1),
call(
ViewEstablishmentEnums.ACTION,view_est_mod.phs[view_est_mod.id], 1)
]
# any_order means that no other calls to the function should be made
view_est_mod.pred_and_action.automation.assert_has_calls(calls_automaton, any_order = False)
# (4) Check that the functions are called with correct input
#view_est_mod.send_msg.assert_called_once()
# Used for mocking predicate_and_action automaton for different values
# When called with predicate : case 0 returns false, case 1 returns true.
def side_effect_case_1_return_no_action(self, action, phase, case):
if(action == ViewEstablishmentEnums.ACTION):
return ViewEstablishmentEnums.NO_ACTION
else:
return case
def test_while_true_case_1_is_true_and_return_is_no_action(self):
view_est_mod = ViewEstablishmentModule(0, self.resolver, 2, 0)
# (1)Predicates and action reset all should be called
view_est_mod.pred_and_action.need_reset = MagicMock(return_value = True)
view_est_mod.pred_and_action.reset_all = Mock()
# (2) Processor i recent values are noticed and both processors have been witnessed
view_est_mod.noticed_recent_value = MagicMock(return_value = False)
view_est_mod.get_witnesses = MagicMock(return_value = set())
# (3)Let predicate of case 0 be false and case 1 true
view_est_mod.witnes_seen = MagicMock(return_value = True)
view_est_mod.pred_and_action.automation = MagicMock(side_effect=self.side_effect_case_1_return_no_action)
# (4) Mocks the final calls
view_est_mod.next_phs = Mock()
view_est_mod.send_msg = Mock()
# Run the method and check all statements above
view_est_mod.run(testing=True)
# (3) Let predicate of case 0 be false and case 1 true, make sure function is called
calls_automaton = [call(
ViewEstablishmentEnums.PREDICATE,view_est_mod.phs[view_est_mod.id], 0),
call(
ViewEstablishmentEnums.PREDICATE,view_est_mod.phs[view_est_mod.id], 1),
call(
ViewEstablishmentEnums.ACTION,view_est_mod.phs[view_est_mod.id], 1)
]
# any_order means that no other calls to the function should be made
view_est_mod.pred_and_action.automation.assert_has_calls(calls_automaton, any_order = False)
# (4) Check that the functions are called with correct input
view_est_mod.next_phs.assert_not_called()
#view_est_mod.send_msg.assert_called_once()
def test_while_true_no_case_is_true(self):
view_est_mod = ViewEstablishmentModule(0, self.resolver, 2, 0)
# (1)Predicates and action reset all should not be called
view_est_mod.pred_and_action.need_reset = MagicMock(return_value = False)
view_est_mod.pred_and_action.reset_all = Mock()
# (2) Processor i recent values are noticed and both processors have been witnessed
view_est_mod.noticed_recent_value = MagicMock(return_value = True)
view_est_mod.get_witnesses = MagicMock(return_value = {0,1})
# (3) No predicate is true
view_est_mod.witnes_seen = MagicMock(return_value = True)
view_est_mod.pred_and_action.automation = MagicMock(return_value = False)
# (4) Mocks the final calls
view_est_mod.next_phs = Mock()
view_est_mod.send_msg = Mock()
# Run the method and check all statements above
view_est_mod.run(testing=True)
# (1) Predicates and action reset all should not be called
view_est_mod.pred_and_action.reset_all.assert_not_called()
# (3)
calls_automaton = [call(
ViewEstablishmentEnums.PREDICATE,view_est_mod.phs[view_est_mod.id], 0),
call(
ViewEstablishmentEnums.PREDICATE,view_est_mod.phs[view_est_mod.id], 1),
call(
ViewEstablishmentEnums.PREDICATE,view_est_mod.phs[view_est_mod.id], 2),
call(
ViewEstablishmentEnums.PREDICATE,view_est_mod.phs[view_est_mod.id], 3)
]
view_est_mod.pred_and_action.automation.assert_has_calls(calls_automaton, any_order = False)
# (4) Check that next_phs is not called and send_msg are called with correct arguments
view_est_mod.next_phs.assert_not_called()
#view_est_mod.send_msg.assert_called_once()
# Macros
def test_echo_no_witn(self):
view_est_mod = ViewEstablishmentModule(0, self.resolver, 2, 0)
# Both conditions are fulfilled
view_est_mod.phs[view_est_mod.id] = 0
view_est_mod.pred_and_action.get_info = MagicMock(return_value = ({"current": 0, "next": 1}, False, False))
view_est_mod.echo[1] = {VIEWS: {"current": 0, "next": 1}, PHASE: 0, WITNESSES: None, VCHANGE: False}
self.assertTrue(view_est_mod.echo_no_witn(1))
# The view in the echo is not correct
view_est_mod.echo[1] = {VIEWS: {"current": 0, "next": 0}, PHASE: 0, WITNESSES: None}
self.assertFalse(view_est_mod.echo_no_witn(1))
# The phase in the echo is not correct
view_est_mod.echo[1] = {VIEWS: {"current": 0, "next": 1}, PHASE: 1, WITNESSES: None}
self.assertFalse(view_est_mod.echo_no_witn(1))
def test_witnes_seen(self):
view_est_mod = ViewEstablishmentModule(0, self.resolver, 6, 1)
# Both condition fulfilled with f = 0
view_est_mod.witnesses[view_est_mod.id] = True
view_est_mod.witnesses_set = {1, 2, 3, 4, 5}
view_est_mod.echo[0] = {VIEWS: {"current": 0, "next": 1}, PHASE: 1, WITNESSES: None}
view_est_mod.echo[2] = {VIEWS: {"current": 0, "next": 1}, PHASE: 1, WITNESSES: None}
view_est_mod.echo[3] = {VIEWS: {"current": 0, "next": 1}, PHASE: 1, WITNESSES: None}
view_est_mod.echo[4] = {VIEWS: {"current": 0, "next": 1}, PHASE: 1, WITNESSES: None}
view_est_mod.echo[5] = {VIEWS: {"current": 0, "next": 1}, PHASE: 1, WITNESSES: None}
view_est_mod.echo[1] = {VIEWS: {"current": 0, "next": 1}, PHASE: 1, WITNESSES: None}
self.assertTrue(view_est_mod.witnes_seen())
# Processor i has not been witnessed
view_est_mod.witnesses[view_est_mod.id] = False
self.assertFalse(view_est_mod.witnes_seen())
# f = 1, meaning the set is not big enough
view_est_mod.witnesses[view_est_mod.id] = True
view_est_mod.witnesses_set = {1, 2, 3}
self.assertFalse(view_est_mod.witnes_seen())
def test_next_phs(self):
view_est_mod = ViewEstablishmentModule(0, self.resolver, 2, 0)
view_est_mod.phs[view_est_mod.id] = 0
# Move to phase 1
view_est_mod.next_phs()
self.assertEqual(view_est_mod.phs[view_est_mod.id], 1)
# Move to phase 2
view_est_mod.next_phs()
self.assertEqual(view_est_mod.phs[view_est_mod.id], 0)
# Interface functions
def test_get_phs(self):
view_est_mod = ViewEstablishmentModule(0, self.resolver, 2, 0)
view_est_mod.phs = [0, 1]
self.assertEqual(view_est_mod.get_phs(0), 0)
self.assertEqual(view_est_mod.get_phs(1), 1)
def test_init(self):
view_est_mod = ViewEstablishmentModule(0, self.resolver, 2, 0)
view_est_mod.phs = [0, 1]
view_est_mod.witnesses_set = {0}
view_est_mod.witnesses = [True, True]
view_est_mod.init_module()
self.assertEqual(view_est_mod.phs, [0, 0])
self.assertEqual(view_est_mod.witnesses_set, set())
self.assertEqual(view_est_mod.witnesses, [False, False])
# Function added for while true loop
def test_noticed_recent_value(self):
view_est_mod = ViewEstablishmentModule(0, self.resolver, 2, 1)
# All have noticed
view_est_mod.echo_no_witn = MagicMock(return_value = True)
self.assertTrue(view_est_mod.noticed_recent_value())
# Processor 1 has not noticed (0 False, 1 True)
view_est_mod.echo_no_witn = MagicMock(side_effect=lambda x: not x)
self.assertTrue(view_est_mod.noticed_recent_value())
# None have noticed
view_est_mod.echo_no_witn = MagicMock(return_value = False)
self.assertFalse(view_est_mod.noticed_recent_value())
def test_get_witnesses(self):
view_est_mod = ViewEstablishmentModule(0, self.resolver, 2, 0)
# Both processors have been witnessed
view_est_mod.witnesses=[True, True]
self.assertEqual(view_est_mod.get_witnesses(), {0,1})
# Both processor 1 has been witnessed, not processor 0
view_est_mod.witnesses=[False, True]
self.assertEqual(view_est_mod.get_witnesses(), {1})
# None of the processors have been witnessed
view_est_mod.witnesses=[False, False]
self.assertEqual(view_est_mod.get_witnesses(), set())
# Function added for re-routing inter-module communication
def test_get_current_view_from_predicts_and_action(self):
view_est_mod = ViewEstablishmentModule(0, self.resolver, 2, 0)
view_est_mod.pred_and_action.get_current_view = Mock()
view_est_mod.get_current_view(0)
view_est_mod.pred_and_action.get_current_view.assert_called_once_with(0)
def test_allow_service_from_predicts_and_action(self):
view_est_mod = ViewEstablishmentModule(0, self.resolver, 2, 0)
view_est_mod.pred_and_action.allow_service = Mock()
view_est_mod.allow_service()
view_est_mod.pred_and_action.allow_service.assert_called_once()
| 44.839695
| 115
| 0.690415
| 1,678
| 11,748
| 4.530989
| 0.09118
| 0.122452
| 0.174931
| 0.034986
| 0.798501
| 0.786795
| 0.760621
| 0.722873
| 0.642115
| 0.630014
| 0
| 0.018031
| 0.221059
| 11,748
| 262
| 116
| 44.839695
| 0.812807
| 0.195948
| 0
| 0.464052
| 0
| 0
| 0.011706
| 0
| 0
| 0
| 0
| 0
| 0.196078
| 1
| 0.091503
| false
| 0
| 0.052288
| 0
| 0.163399
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
d597da931bb3b2d46eadaae0e53b5f56c67a78cc
| 1,943
|
py
|
Python
|
autograd/scipy/stats/norm.py
|
EnjoyLifeFund/py36pkgs
|
0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | 4
|
2021-01-12T22:02:57.000Z
|
2021-04-02T15:24:18.000Z
|
autograd/scipy/stats/norm.py
|
EnjoyLifeFund/py36pkgs
|
0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
autograd/scipy/stats/norm.py
|
EnjoyLifeFund/py36pkgs
|
0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | 1
|
2017-07-30T23:49:27.000Z
|
2017-07-30T23:49:27.000Z
|
"""Gradients of the normal distribution."""
from __future__ import absolute_import
import scipy.stats
import autograd.numpy as anp
from autograd.core import primitive
from autograd.numpy.numpy_grads import unbroadcast
pdf = primitive(scipy.stats.norm.pdf)
cdf = primitive(scipy.stats.norm.cdf)
logpdf = primitive(scipy.stats.norm.logpdf)
logcdf = primitive(scipy.stats.norm.logcdf)
pdf.defvjp(lambda g, ans, vs, gvs, x, loc=0.0, scale=1.0: unbroadcast(vs, gvs, -g * ans * (x - loc) / scale**2))
pdf.defvjp(lambda g, ans, vs, gvs, x, loc=0.0, scale=1.0: unbroadcast(vs, gvs, g * ans * (x - loc) / scale**2), argnum=1)
pdf.defvjp(lambda g, ans, vs, gvs, x, loc=0.0, scale=1.0: unbroadcast(vs, gvs, g * ans * (((x - loc)/scale)**2 - 1.0)/scale), argnum=2)
cdf.defvjp(lambda g, ans, vs, gvs, x, loc=0.0, scale=1.0: unbroadcast(vs, gvs, g * pdf(x, loc, scale)))
cdf.defvjp(lambda g, ans, vs, gvs, x, loc=0.0, scale=1.0: unbroadcast(vs, gvs, -g * pdf(x, loc, scale)), argnum=1)
cdf.defvjp(lambda g, ans, vs, gvs, x, loc=0.0, scale=1.0: unbroadcast(vs, gvs, -g * pdf(x, loc, scale)*(x-loc)/scale), argnum=2)
logpdf.defvjp(lambda g, ans, vs, gvs, x, loc=0.0, scale=1.0: unbroadcast(vs, gvs, -g * (x - loc) / scale**2))
logpdf.defvjp(lambda g, ans, vs, gvs, x, loc=0.0, scale=1.0: unbroadcast(vs, gvs, g * (x - loc) / scale**2), argnum=1)
logpdf.defvjp(lambda g, ans, vs, gvs, x, loc=0.0, scale=1.0: unbroadcast(vs, gvs, g * (-1.0/scale + (x - loc)**2/scale**3)), argnum=2)
logcdf.defvjp(lambda g, ans, vs, gvs, x, loc=0.0, scale=1.0: unbroadcast(vs, gvs, g * anp.exp(logpdf(x, loc, scale) - logcdf(x, loc, scale))))
logcdf.defvjp(lambda g, ans, vs, gvs, x, loc=0.0, scale=1.0: unbroadcast(vs, gvs, -g * anp.exp(logpdf(x, loc, scale) - logcdf(x, loc, scale))), argnum=1)
logcdf.defvjp(lambda g, ans, vs, gvs, x, loc=0.0, scale=1.0: unbroadcast(vs, gvs, -g * anp.exp(logpdf(x, loc, scale) - logcdf(x, loc, scale))*(x-loc)/scale), argnum=2)
| 64.766667
| 167
| 0.65826
| 368
| 1,943
| 3.459239
| 0.105978
| 0.091123
| 0.113119
| 0.150825
| 0.729772
| 0.718775
| 0.707777
| 0.707777
| 0.683425
| 0.683425
| 0
| 0.039952
| 0.136902
| 1,943
| 29
| 168
| 67
| 0.719141
| 0.019043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.238095
| 0
| 0.238095
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
d5a64f6c2783e8b94f9669bc21e2aeda3ec0783a
| 1,244
|
py
|
Python
|
django_analyses/filters/__init__.py
|
TheLabbingProject/django_analyses
|
08cac40a32754a265b37524f08ec6160c69ebea8
|
[
"Apache-2.0"
] | 1
|
2020-12-30T12:43:34.000Z
|
2020-12-30T12:43:34.000Z
|
django_analyses/filters/__init__.py
|
TheLabbingProject/django_analyses
|
08cac40a32754a265b37524f08ec6160c69ebea8
|
[
"Apache-2.0"
] | 59
|
2019-12-25T13:14:56.000Z
|
2021-07-22T12:24:46.000Z
|
django_analyses/filters/__init__.py
|
TheLabbingProject/django_analyses
|
08cac40a32754a265b37524f08ec6160c69ebea8
|
[
"Apache-2.0"
] | 2
|
2020-05-24T06:44:27.000Z
|
2020-07-09T15:47:31.000Z
|
"""
Filters for the app's :ref:`models <modules/django_analyses.models:Models>`.
References
----------
* `Django REST Framework`_ `filtering documentation`_.
* django-filter_'s documentation for `Integration with DRF`_.
.. _django-filter: https://django-filter.readthedocs.io/en/stable/index.html
.. _Django REST Framework: https://www.django-rest-framework.org/
.. _filtering documentation:
https://www.django-rest-framework.org/api-guide/filtering/
.. _Integration with DRF:
https://django-filter.readthedocs.io/en/stable/guide/rest_framework.html
"""
from django_analyses.filters.analysis import AnalysisFilter
from django_analyses.filters.analysis_version import AnalysisVersionFilter
from django_analyses.filters.category import CategoryFilter
from django_analyses.filters.input import (InputDefinitionFilter, InputFilter,
InputSpecificationFilter)
from django_analyses.filters.output import (OutputDefinitionFilter,
OutputFilter,
OutputSpecificationFilter)
from django_analyses.filters.pipeline import (NodeFilter, PipeFilter,
PipelineFilter)
| 44.428571
| 78
| 0.69373
| 120
| 1,244
| 7.05
| 0.416667
| 0.115839
| 0.12766
| 0.177305
| 0.238771
| 0.160757
| 0.089835
| 0
| 0
| 0
| 0
| 0
| 0.213023
| 1,244
| 27
| 79
| 46.074074
| 0.864147
| 0.451768
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
633922242bfb2084b69e826c8f5d4f9b1df633ef
| 253
|
py
|
Python
|
tips/*40.py
|
leolanese/python-playground
|
4cfa281243e48ea616387c2110444944aaba5b3d
|
[
"MIT"
] | 1
|
2018-10-11T20:27:52.000Z
|
2018-10-11T20:27:52.000Z
|
tips/*40.py
|
leolanese/python-playground
|
4cfa281243e48ea616387c2110444944aaba5b3d
|
[
"MIT"
] | null | null | null |
tips/*40.py
|
leolanese/python-playground
|
4cfa281243e48ea616387c2110444944aaba5b3d
|
[
"MIT"
] | null | null | null |
print("Hello!, Welcome to Python")
print("*" * 40)
print("Please Enter your name ")
name = input()
print("Your name is, " + name)
# Hello!,Welcome to Python
# ****************************************
# Please Enter your name
# Leo
# Your name is, Leo
| 21.083333
| 42
| 0.549407
| 32
| 253
| 4.34375
| 0.40625
| 0.230216
| 0.201439
| 0.28777
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009434
| 0.162055
| 253
| 11
| 43
| 23
| 0.646226
| 0.438735
| 0
| 0
| 0
| 0
| 0.463235
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.8
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
6373a447352ea40984e9c81455efa67f18110321
| 64
|
py
|
Python
|
uniswap/__init__.py
|
mul53/uniswap-python
|
f24993bcea8cb4181be59dd7e4e9abcd40a375cb
|
[
"MIT"
] | 3
|
2021-05-03T06:59:31.000Z
|
2021-11-02T05:18:54.000Z
|
uniswap/__init__.py
|
mul53/uniswap-python
|
f24993bcea8cb4181be59dd7e4e9abcd40a375cb
|
[
"MIT"
] | null | null | null |
uniswap/__init__.py
|
mul53/uniswap-python
|
f24993bcea8cb4181be59dd7e4e9abcd40a375cb
|
[
"MIT"
] | 4
|
2020-10-27T20:27:44.000Z
|
2022-03-23T22:07:55.000Z
|
from .uniswap import Uniswap, InvalidToken, InsufficientBalance
| 32
| 63
| 0.859375
| 6
| 64
| 9.166667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09375
| 64
| 1
| 64
| 64
| 0.948276
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
6397d998f4655e986e02ed4237c84c111c3b5cc1
| 93
|
py
|
Python
|
bloodcare/apps.py
|
tanmayag8958/bloodcare
|
b0d0a3920d9e39fc37d8471a2359603d589e1798
|
[
"MIT"
] | 2
|
2019-11-19T07:38:06.000Z
|
2021-08-14T06:43:55.000Z
|
bloodcare/apps.py
|
tanmayag8958/bloodcare
|
b0d0a3920d9e39fc37d8471a2359603d589e1798
|
[
"MIT"
] | 3
|
2021-06-04T23:04:34.000Z
|
2021-06-10T19:21:04.000Z
|
bloodcare/apps.py
|
tanmayag8958/bloodcare
|
b0d0a3920d9e39fc37d8471a2359603d589e1798
|
[
"MIT"
] | 2
|
2020-04-08T16:17:09.000Z
|
2020-04-11T06:26:06.000Z
|
from django.apps import AppConfig
class BloodcareConfig(AppConfig):
name = 'bloodcare'
| 15.5
| 33
| 0.763441
| 10
| 93
| 7.1
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16129
| 93
| 5
| 34
| 18.6
| 0.910256
| 0
| 0
| 0
| 0
| 0
| 0.096774
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
63abe3e259b96a5e0967b9ac9b090b0f5826710d
| 83
|
py
|
Python
|
scripts/jenkins/docs_release_check/check.py
|
pengpj/apm-agent-java
|
d10db0ca04d31e1cd9891eb694b0c49c5b535028
|
[
"Apache-2.0"
] | 1
|
2021-08-04T05:10:14.000Z
|
2021-08-04T05:10:14.000Z
|
scripts/jenkins/docs_release_check/check.py
|
pengpj/apm-agent-java
|
d10db0ca04d31e1cd9891eb694b0c49c5b535028
|
[
"Apache-2.0"
] | 34
|
2021-01-18T07:04:28.000Z
|
2022-03-28T23:04:36.000Z
|
scripts/jenkins/docs_release_check/check.py
|
pengpj/apm-agent-java
|
d10db0ca04d31e1cd9891eb694b0c49c5b535028
|
[
"Apache-2.0"
] | 3
|
2021-06-04T13:35:28.000Z
|
2021-07-16T08:42:42.000Z
|
#!/usr/bin/env python
import lib
if __name__ == "__main__":
lib.entrypoint()
| 11.857143
| 26
| 0.662651
| 11
| 83
| 4.272727
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.180723
| 83
| 6
| 27
| 13.833333
| 0.691176
| 0.240964
| 0
| 0
| 0
| 0
| 0.129032
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
893a7c27290dcf8017cc971df2d155df432fca7e
| 154
|
py
|
Python
|
python/row-of-the-odd-triangle/solution.py
|
hiljusti/codewars-solutions
|
1a423e8cb0fbcac94738f6e51dc333f057b0a731
|
[
"WTFPL"
] | 2
|
2020-02-22T08:47:51.000Z
|
2021-05-21T22:21:55.000Z
|
python/row-of-the-odd-triangle/solution.py
|
hiljusti/codewars-solutions
|
1a423e8cb0fbcac94738f6e51dc333f057b0a731
|
[
"WTFPL"
] | null | null | null |
python/row-of-the-odd-triangle/solution.py
|
hiljusti/codewars-solutions
|
1a423e8cb0fbcac94738f6e51dc333f057b0a731
|
[
"WTFPL"
] | 1
|
2021-11-09T17:22:10.000Z
|
2021-11-09T17:22:10.000Z
|
# https://www.codewars.com/kata/5d5a7525207a674b71aa25b5
def odd_row(row):
base = row * (row - 1)
return [base + n * 2 + 1 for n in range(row)]
| 22
| 56
| 0.636364
| 24
| 154
| 4.041667
| 0.708333
| 0.123711
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.165289
| 0.214286
| 154
| 6
| 57
| 25.666667
| 0.636364
| 0.350649
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
894f3d22c11e0a45e04bc9cf5924908b0f99befa
| 58
|
py
|
Python
|
mitmproxy/tools/console/grideditor/__init__.py
|
0x7c48/mitmproxy
|
f9d8f3bae3f4e681d5f4d406b7e06b099e60ecba
|
[
"MIT"
] | 24,939
|
2015-01-01T17:13:21.000Z
|
2022-03-31T17:50:04.000Z
|
mitmproxy/tools/console/grideditor/__init__.py
|
0x7c48/mitmproxy
|
f9d8f3bae3f4e681d5f4d406b7e06b099e60ecba
|
[
"MIT"
] | 3,655
|
2015-01-02T12:31:43.000Z
|
2022-03-31T20:24:57.000Z
|
mitmproxy/tools/console/grideditor/__init__.py
|
0x7c48/mitmproxy
|
f9d8f3bae3f4e681d5f4d406b7e06b099e60ecba
|
[
"MIT"
] | 3,712
|
2015-01-06T06:47:06.000Z
|
2022-03-31T10:33:27.000Z
|
from .editors import * # noqa
from . import base # noqa
| 19.333333
| 30
| 0.672414
| 8
| 58
| 4.875
| 0.625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.241379
| 58
| 2
| 31
| 29
| 0.886364
| 0.155172
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
8980010713a9fe4e84501a6cc407c8bd192a9456
| 189
|
py
|
Python
|
polls/forms.py
|
kraupn3r/intranet
|
4cabf6f365ef0ea0f352f67f9322318e161ed265
|
[
"MIT"
] | null | null | null |
polls/forms.py
|
kraupn3r/intranet
|
4cabf6f365ef0ea0f352f67f9322318e161ed265
|
[
"MIT"
] | null | null | null |
polls/forms.py
|
kraupn3r/intranet
|
4cabf6f365ef0ea0f352f67f9322318e161ed265
|
[
"MIT"
] | null | null | null |
from django import forms
from .models import Poll
class PollForm(forms.ModelForm):
class Meta():
model = Poll
fields = ['title','target_departament','target_location']
| 23.625
| 65
| 0.687831
| 22
| 189
| 5.818182
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.206349
| 189
| 7
| 66
| 27
| 0.853333
| 0
| 0
| 0
| 0
| 0
| 0.201058
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
8981dd4564ac09e0f71ab2a3985c14ff4fe90bb6
| 44,957
|
py
|
Python
|
Meters/IEC/Helpers/obis_codes.py
|
Runamook/PyCharmProjects
|
1b1a063345e052451f00e3fdea82e31bdd2a0cae
|
[
"MIT"
] | null | null | null |
Meters/IEC/Helpers/obis_codes.py
|
Runamook/PyCharmProjects
|
1b1a063345e052451f00e3fdea82e31bdd2a0cae
|
[
"MIT"
] | null | null | null |
Meters/IEC/Helpers/obis_codes.py
|
Runamook/PyCharmProjects
|
1b1a063345e052451f00e3fdea82e31bdd2a0cae
|
[
"MIT"
] | null | null | null |
obis_codes = {
"1.8.0": "Positive active energy (A+) total [kWh]",
"1.8.1": "Positive active energy (A+) in tariff T1 [kWh]",
"1.8.2": "Positive active energy (A+) in tariff T2 [kWh]",
"1.8.3": "Positive active energy (A+) in tariff T3 [kWh]",
"1.8.4": "Positive active energy (A+) in tariff T4 [kWh]",
"2.8.0": "Negative active energy (A+) total [kWh]",
"2.8.1": "Negative active energy (A+) in tariff T1 [kWh]",
"2.8.2": "Negative active energy (A+) in tariff T2 [kWh]",
"2.8.3": "Negative active energy (A+) in tariff T3 [kWh]",
"2.8.4": "Negative active energy (A+) in tariff T4 [kWh]",
"15.8.0": "Absolute active energy (A+) total [kWh]",
"15.8.1": "Absolute active energy (A+) in tariff T1 [kWh]",
"15.8.2": "Absolute active energy (A+) in tariff T2 [kWh]",
"15.8.3": "Absolute active energy (A+) in tariff T3 [kWh]",
"15.8.4": "Absolute active energy (A+) in tariff T4 [kWh]",
"16.8.0": "Sum active energy without reverse blockade (A+ - A-) total [kWh]",
"16.8.1": "Sum active energy without reverse blockade (A+ - A-) in tariff T1 [kWh]",
"16.8.2": "Sum active energy without reverse blockade (A+ - A-) in tariff T2 [kWh]",
"16.8.3": "Sum active energy without reverse blockade (A+ - A-) in tariff T3 [kWh]",
"16.8.4": "Sum active energy without reverse blockade (A+ - A-) in tariff T4 [kWh]",
"3.8.0": "Positive reactive energy (Q+) total [kvarh]",
"3.8.1": "Positive reactive energy (Q+) in tariff T1 [kvarh]",
"3.8.2": "Positive reactive energy (Q+) in tariff T2 [kvarh]",
"3.8.3": "Positive reactive energy (Q+) in tariff T3 [kvarh]",
"3.8.4": "Positive reactive energy (Q+) in tariff T4 [kvarh]",
"4.8.0": "Negative reactive energy (Q-) total [kvarh]",
"4.8.1": "Negative reactive energy (Q-) in tariff T1 [kvarh]",
"4.8.2": "Negative reactive energy (Q-) in tariff T2 [kvarh]",
"4.8.3": "Negative reactive energy (Q-) in tariff T3 [kvarh]",
"4.8.4": "Negative reactive energy (Q-) in tariff T4 [kvarh]",
"5.8.0": "Imported inductive reactive energy in 1-st quadrant (Q1) total [kvarh]",
"5.8.1": "Imported inductive reactive energy in 1-st quadrant (Q1) in tariff T1 [kvarh]",
"5.8.2": "Imported inductive reactive energy in 1-st quadrant (Q1) in tariff T2 [kvarh]",
"5.8.3": "Imported inductive reactive energy in 1-st quadrant (Q1) in tariff T3 [kvarh]",
"5.8.4": "Imported inductive reactive energy in 1-st quadrant (Q1) in tariff T4 [kvarh]",
"6.8.0": "Imported capacitive reactive energy in 2-nd quadrant (Q2) total [kvarh]",
"6.8.1": "Imported capacitive reactive energy in 2-nd quadr. (Q2) in tariff T1 [kvarh]",
"6.8.2": "Imported capacitive reactive energy in 2-nd quadr. (Q2) in tariff T2 [kvarh]",
"6.8.3": "Imported capacitive reactive energy in 2-nd quadr. (Q2) in tariff T3 [kvarh]",
"6.8.4": "Imported capacitive reactive energy in 2-nd quadr. (Q2) in tariff T4 [kvarh]",
"7.8.0": "Exported inductive reactive energy in 3-rd quadrant (Q3) total [kvarh]",
"7.8.1": "Exported inductive reactive energy in 3-rd quadrant (Q3) in tariff T1 [kvarh]",
"7.8.2": "Exported inductive reactive energy in 3-rd quadrant (Q3) in tariff T2 [kvarh]",
"7.8.3": "Exported inductive reactive energy in 3-rd quadrant (Q3) in tariff T3 [kvarh]",
"7.8.4": "Exported inductive reactive energy in 3-rd quadrant (Q3) in tariff T4 [kvarh]",
"8.8.0": "Exported capacitive reactive energy in 4-th quadrant (Q4) total [kvarh]",
"8.8.1": "Exported capacitive reactive energy in 4-th quadr. (Q4) in tariff T1 [kvarh]",
"8.8.2": "Exported capacitive reactive energy in 4-th quadr. (Q4) in tariff T2 [kvarh]",
"8.8.3": "Exported capacitive reactive energy in 4-th quadr. (Q4) in tariff T3 [kvarh]",
"8.8.4": "Exported capacitive reactive energy in 4-th quadr. (Q4) in tariff T4 [kvarh]",
"9.8.0": "Apparent energy (S+) total [kVAh]",
"9.8.1": "Apparent energy (S+) in tariff T1 [kVAh]",
"9.8.2": "Apparent energy (S+) in tariff T2 [kVAh]",
"9.8.3": "Apparent energy (S+) in tariff T3 [kVAh]",
"9.8.4": "Apparent energy (S+) in tariff T4 [kVAh]",
"21.8.0": "Positive active energy (A+) in phase L1 total [kWh]",
"41.8.0": "Positive active energy (A+) in phase L2 total [kWh]",
"61.8.0": "Positive active energy (A+) in phase L3 total [kWh]",
"22.8.0": "Negative active energy (A-) in phase L1 total [kWh]",
"42.8.0": "Negative active energy (A-) in phase L2 total [kWh]",
"62.8.0": "Negative active energy (A-) in phase L3 total [kWh]",
"35.8.0": "Absolute active energy (|A|) in phase L1 total [kWh]",
"55.8.0": "Absolute active energy (|A|) in phase L2 total [kWh]",
"75.8.0": "Absolute active energy (|A|) in phase L3 total [kWh]",
"1.6.0": "Positive active maximum demand (A+) total [kW]",
"1.6.1": "Positive active maximum demand (A+) in tariff T1 [kW]",
"1.6.2": "Positive active maximum demand (A+) in tariff T2 [kW]",
"1.6.3": "Positive active maximum demand (A+) in tariff T3 [kW]",
"1.6.4": "Positive active maximum demand (A+) in tariff T4 [kW]",
"2.6.0": "Negative active maximum demand (A-) total [kW]",
"2.6.1": "Negative active maximum demand (A-) in tariff T1 [kW]",
"2.6.2": "Negative active maximum demand (A-) in tariff T2 [kW]",
"2.6.3": "Negative active maximum demand (A-) in tariff T3 [kW]",
"2.6.4": "Negative active maximum demand (A-) in tariff T4 [kW]",
"15.6.0": "Absolute active maximum demand (|A|) total [kW]",
"15.6.1": "Absolute active maximum demand (|A|) in tariff T1 [kW]",
"15.6.2": "Absolute active maximum demand (|A|) in tariff T2 [kW]",
"15.6.3": "Absolute active maximum demand (|A|) in tariff T3 [kW]",
"15.6.4": "Absolute active maximum demand (|A|) in tariff T4 [kW]",
"3.6.0": "Positive reactive maximum demand (Q+) total [kvar]",
"4.6.0": "Negative reactive maximum demand (Q-) total [kvar]",
"5.6.0": "Reactive maximum demand in Q1 (Q1) total [kvar]",
"6.6.0": "Reactive maximum demand in Q2 (Q2) total [kvar]",
"7.6.0": "Reactive maximum demand in Q3 (Q3) total [kvar]",
"8.6.0": "Reactive maximum demand in Q4 (Q4) total [kvar]",
"9.6.0": "Apparent maximum demand (S+) total [kVA]",
"1.2.0": "Positive active cumulative maximum demand (A+) total [kW]",
"1.2.1": "Positive active cumulative maximum demand (A+) in tariff T1 [kW]",
"1.2.2": "Positive active cumulative maximum demand (A+) in tariff T2 [kW]",
"1.2.3": "Positive active cumulative maximum demand (A+) in tariff T3 [kW]",
"1.2.4": "Positive active cumulative maximum demand (A+) in tariff T4 [kW]",
"2.2.0": "Negative active cumulative maximum demand (A-) total [kW]",
"2.2.1": "Negative active cumulative maximum demand (A-) in tariff T1 [kW]",
"2.2.2": "Negative active cumulative maximum demand (A-) in tariff T2 [kW]",
"2.2.3": "Negative active cumulative maximum demand (A-) in tariff T3 [kW]",
"2.2.4": "Negative active cumulative maximum demand (A-) in tariff T4 [kW]",
"15.2.0": "Absolute active cumulative maximum demand (|A|) total [kW]",
"15.2.1": "Absolute active cumulative maximum demand (|A|) in tariff T1 [kW]",
"15.2.2": "Absolute active cumulative maximum demand (|A|) in tariff T2 [kW]",
"15.2.3": "Absolute active cumulative maximum demand (|A|) in tariff T3 [kW]",
"15.2.4": "Absolute active cumulative maximum demand (|A|) in tariff T4 [kW]",
"3.2.0": "Positive reactive cumulative maximum demand (Q+) total [kvar]",
"4.2.0": "Negative reactive cumulative maximum demand (Q-) total [kvar]",
"5.2.0": "Reactive cumulative maximum demand in Q1 (Q1) total [kvar]",
"6.2.0": "Reactive cumulative maximum demand in Q2 (Q2) total [kvar]",
"7.2.0": "Reactive cumulative maximum demand in Q3 (Q3) total [kvar]",
"8.2.0": "Reactive cumulative maximum demand in Q4 (Q4) total [kvar]",
"9.2.0": "Apparent cumulative maximum demand (S+) total [kVA]",
"1.4.0": "Positive active demand in a current demand period (A+) [kW]",
"2.4.0": "Negative active demand in a current demand period (A-) [kW]",
"15.4.0": "Absolute active demand in a current demand period (|A|) [kW]",
"3.4.0": "Positive reactive demand in a current demand period (Q+) [kvar]",
"4.4.0": "Negative reactive demand in a current demand period (Q-) [kvar]",
"5.4.0": "Reactive demand in a current demand period in Q1 (Q1) [kvar]",
"6.4.0": "Reactive demand in a current demand period in Q2 (Q2) [kvar]",
"7.4.0": "Reactive demand in a current demand period in Q3 (Q3) [kvar]",
"8.4.0": "Reactive demand in a current demand period in Q4 (Q4) [kvar]",
"9.4.0": "Apparent demand in a current demand period (S+) [kVA]",
"1.5.0": "Positive active demand in the last completed demand period (A+) [kW]",
"2.5.0": "Negative active demand in the last completed demand period (A-) [kW]",
"15.5.0": "Absolute active demand in the last completed demand period (|A|) [kW]",
"3.5.0": "Positive reactive demand in the last completed demand period (Q+) [kvar]",
"4.5.0": "Negative reactive demand in the last completed demand period (Q-) [kvar]",
"5.5.0": "Reactive demand in the last completed demand period in Q1 (Q1) [kvar]",
"6.5.0": "Reactive demand in the last completed demand period in Q2 (Q2) [kvar]",
"7.5.0": "Reactive demand in the last completed demand period in Q3 (Q3) [kvar]",
"8.5.0": "Reactive demand in the last completed demand period in Q4 (Q4) [kvar]",
"9.5.0": "Apparent demand in the last completed demand period (S+) [kVA]",
"1.7.0": "Positive active instantaneous power (A+) [kW]",
"21.7.0": "Positive active instantaneous power (A+) in phase L1 [kW]",
"41.7.0": "Positive active instantaneous power (A+) in phase L2 [kW]",
"61.7.0": "Positive active instantaneous power (A+) in phase L3 [kW]",
"2.7.0": "Negative active instantaneous power (A-) [kW]",
"22.7.0": "Negative active instantaneous power (A-) in phase L1 [kW]",
"42.7.0": "Negative active instantaneous power (A-) in phase L2 [kW]",
"62.7.0": "Negative active instantaneous power (A-) in phase L3 [kW]",
"15.7.0": "Absolute active instantaneous power (|A|) [kW]",
"35.7.0": "Absolute active instantaneous power (|A|) in phase L1 [kW]",
"55.7.0": "Absolute active instantaneous power (|A|) in phase L2 [kW]",
"75.7.0": "Absolute active instantaneous power (|A|) in phase L3 [kW]",
"16.7.0": "Sum active instantaneous power (A+ - A-) [kW]",
"36.7.0": "Sum active instantaneous power (A+ - A-) in phase L1 [kW]",
"56.7.0": "Sum active instantaneous power (A+ - A-) in phase L2 [kW]",
"76.7.0": "Sum active instantaneous power (A+ - A-) in phase L3 [kW]",
"3.7.0": "Positive reactive instantaneous power (Q+) [kvar]",
"23.7.0": "Positive reactive instantaneous power (Q+) in phase L1 [kvar]",
"43.7.0": "Positive reactive instantaneous power (Q+) in phase L2 [kvar]",
"63.7.0": "Positive reactive instantaneous power (Q+) in phase L3 [kvar]",
"4.7.0": "Negative reactive instantaneous power (Q-) [kvar]",
"24.7.0": "Negative reactive instantaneous power (Q-) in phase L1 [kvar]",
"44.7.0": "Negative reactive instantaneous power (Q-) in phase L2 [kvar]",
"64.7.0": "Negative reactive instantaneous power (Q-) in phase L3 [kvar]",
"9.7.0": "Apparent instantaneous power (S+) [kVA]",
"29.7.0": "Apparent instantaneous power (S+) in phase L1 [kVA]",
"49.7.0": "Apparent instantaneous power (S+) in phase L2 [kVA]",
"69.7.0": "Apparent instantaneous power (S+) in phase L3 [kVA]",
"11.7.0": "Instantaneous current (I) [A]",
"31.7.0": "Instantaneous current (I) in phase L1 [A]",
"51.7.0": "Instantaneous current (I) in phase L2 [A]",
"71.7.0": "Instantaneous current (I) in phase L3 [A]",
"91.7.0": "Instantaneous current (I) in neutral [A]",
"11.6.0": "Maximum current (I max) [A]",
"31.6.0": "Maximum current (I max) in phase L1 [A]",
"51.6.0": "Maximum current (I max) in phase L2 [A]",
"71.6.0": "Maximum current (I max) in phase L3 [A]",
"91.6.0": "Maximum current (I max) in neutral [A]",
"12.7.0": "Instantaneous voltage (U) [V]",
"32.7.0": "Instantaneous voltage (U) in phase L1 [V]",
"52.7.0": "Instantaneous voltage (U) in phase L2 [V]",
"72.7.0": "Instantaneous voltage (U) in phase L3 [V]",
"13.7.0": "Instantaneous power factor",
"33.7.0": "Instantaneous power factor in phase L1",
"53.7.0": "Instantaneous power factor in phase L2",
"73.7.0": "Instantaneous power factor in phase L3",
"14.7.0": "Frequency [Hz]",
"C.53.1": "Tamper 1 energy register",
"C.53.2": "Tamper 2 energy register",
"C.53.3": "Tamper 3 energy register",
"C.53.4": "Tamper 4 energy register",
"C.53.11": "Tamper 5 energy register",
"C.53.5": "Tamper 1 time counter register",
"C.53.6": "Tamper 2 time counter register",
"C.53.7": "Tamper 3 time counter register",
"C.53.9": "Tamper 4 time counter register",
"C.53.10": "Tamper 5 time counter register",
"C.2.0": "Event parameters change - counter",
"C.2.1": "Event parameters change - timestamp",
"C.51.1": "Event terminal cover opened - counter",
"C.51.2": "Event terminal cover opened - timestamp",
"C.51.3": "Event main cover opened - counter",
"C.51.5": "Event magnetic field detection start - counter",
"C.51.6": "Event magnetic field detection start - timestamp",
"C.51.7": "Event reverse power flow - counter",
"C.51.8": "Event reverse power flow - timestamp",
"C.7.10": "Event power down - timestamp",
"C.51.13": "Event power up - counter",
"C.51.14": "Event power up – timestamp",
"C.51.15": "Event RTC (Real Time Clock) set - counter",
"C.51.16": "Event RTC (Real Time Clock) set - timestamp",
"C.51.21": "Event terminal cover closed - counter",
"C.51.22": "Event terminal cover closed - timestamp",
"C.51.23": "Event main cover closed - counter",
"C.51.24": "Event main cover closed - timestamp",
"C.51.25": "Event log-book 1 erased - counter",
"C.51.26": "Event log-book 1 erased - timestamp",
"C.51.27": "Event fraud start - counter",
"C.51.28": "Event fraud start - timestamp",
"C.51.29": "Event fraud stop - counter",
"C.51.30": "Event fraud stop - timestamp",
"0.9.1": "Current time (hh:mm:ss)",
"0.9.2": "Date (YY.MM.DD or DD.MM.YY)",
"0.9.4": "Date and Time (YYMMDDhhmmss)",
"0.8.0": "Demand period [min]",
"0.8.4": "Load profile period [min] (option)",
"0.0.0": "Device address 1",
"0.0.1": "Device address 2",
"0.1.0": "MD reset counter",
"0.1.2": "MD reset timestamp",
"0.2.0": "Firmware version",
"0.2.2": "Tariff program ID",
"C.1.0": "Meter serial number",
"C.1.2": "Parameters file code",
"C.1.4": "Parameters check sum",
"C.1.5": "Firmware built date",
"C.1.6": "Firmware check sum",
"C.6.0": "Power down time counter",
"C.6.1": "Battery remaining capacity",
"F.F.0": "Fatal error meter status",
"C.87.0": "Active tariff",
"0.2.1": "Parameters scheme ID",
"C.60.9": "Fraud flag",
"0.3.0": "Active energy meter constant",
"0.4.2": "Current transformer ratio",
"0.4.3": "Voltage transformer ratio",
"0.0.9": "Identification number",
"21.25": "Instantaneous value of active power phase L1",
"41.25": "Instantaneous value of active power phase L2",
"61.25": "Instantaneous value of active power phase L3",
"1.25": "Instantaneous value of total power",
"23.25": "Instantaneous value of reactive power phase L1",
"43.25": "Instantaneous value of reactive power phase L2",
"63.25": "Instantaneous value of reactive power phase L3",
"3.25": "Instantaneous value of reactive power phase total",
"29.25": "Instantaneous value of apparent power phase L1",
"49.25": "Instantaneous value of apparent power phase L2",
"69.25": "Instantaneous value of apparent power phase L3",
"9.25": "Instantaneous value of total apparent power",
"31.25": "Instantaneous value of current phase L1",
"51.25": "Instantaneous value of current phase L2",
"71.25": "Instantaneous value of current phase L3",
"32.25": "Instantaneous value of voltage phase L1",
"52.25": "Instantaneous value of voltage phase L2",
"72.25": "Instantaneous value of voltage phase L3",
"33.25": "Instantaneous value of power factor phase L1",
"53.25": "Instantaneous value of power factor phase L2",
"73.25": "Instantaneous value of power factor phase L3",
"13.25": "Instantaneous value of average power factor",
"14.25": "Instantaneous value of frequency",
"C.3": "State of the in/out control signals",
"C.4": "State of the internal control signals",
"C.5": "Internal operating conditions",
"C.7.0": "Total number of phase failures",
"C.7.1": "Number of phase failures phase 1",
"C.7.2": "Number of phase failures phase 2",
"C.7.3": "Number of phase failures phase 3",
"C.51.4": "DCF-77 last synchronization",
"C.52.0": "Phase information",
"C.86.0": "Installation check"
}
obis_codes_short = {
"1.8.0": "Positive active energy total",
"1.8.1": "Positive active energy in tariff T1",
"1.8.2": "Positive active energy in tariff T2",
"1.8.3": "Positive active energy in tariff T3",
"1.8.4": "Positive active energy in tariff T4",
"2.8.0": "Negative active energy total",
"2.8.1": "Negative active energy in tariff T1",
"2.8.2": "Negative active energy in tariff T2",
"2.8.3": "Negative active energy in tariff T3",
"2.8.4": "Negative active energy in tariff T4",
"15.8.0": "Absolute active energy total",
"15.8.1": "Absolute active energy in tariff T1",
"15.8.2": "Absolute active energy in tariff T2",
"15.8.3": "Absolute active energy in tariff T3",
"15.8.4": "Absolute active energy in tariff T4",
"16.8.0": "Sum active energy without reverse blockade total",
"16.8.1": "Sum active energy without reverse blockade in tariff T1",
"16.8.2": "Sum active energy without reverse blockade in tariff T2",
"16.8.3": "Sum active energy without reverse blockade in tariff T3",
"16.8.4": "Sum active energy without reverse blockade in tariff T4",
"3.8.0": "Positive reactive energy total",
"3.8.1": "Positive reactive energy in tariff T1",
"3.8.2": "Positive reactive energy in tariff T2",
"3.8.3": "Positive reactive energy in tariff T3",
"3.8.4": "Positive reactive energy in tariff T4",
"4.8.0": "Negative reactive energy total",
"4.8.1": "Negative reactive energy in tariff T1",
"4.8.2": "Negative reactive energy in tariff T2",
"4.8.3": "Negative reactive energy in tariff T3",
"4.8.4": "Negative reactive energy in tariff T4",
"5.8.0": "Imported inductive reactive energy in 1-st quadrant total",
"5.8.1": "Imported inductive reactive energy in 1-st quadrant in tariff T1",
"5.8.2": "Imported inductive reactive energy in 1-st quadrant in tariff T2",
"5.8.3": "Imported inductive reactive energy in 1-st quadrant in tariff T3",
"5.8.4": "Imported inductive reactive energy in 1-st quadrant in tariff T4",
"6.8.0": "Imported capacitive reactive energy in 2-nd quadrant total",
"6.8.1": "Imported capacitive reactive energy in 2-nd quadr. in tariff T1",
"6.8.2": "Imported capacitive reactive energy in 2-nd quadr. in tariff T2",
"6.8.3": "Imported capacitive reactive energy in 2-nd quadr. in tariff T3",
"6.8.4": "Imported capacitive reactive energy in 2-nd quadr. in tariff T4",
"7.8.0": "Exported inductive reactive energy in 3-rd quadrant total",
"7.8.1": "Exported inductive reactive energy in 3-rd quadrant in tariff T1",
"7.8.2": "Exported inductive reactive energy in 3-rd quadrant in tariff T2",
"7.8.3": "Exported inductive reactive energy in 3-rd quadrant in tariff T3",
"7.8.4": "Exported inductive reactive energy in 3-rd quadrant in tariff T4",
"8.8.0": "Exported capacitive reactive energy in 4-th quadrant total",
"8.8.1": "Exported capacitive reactive energy in 4-th quadr. in tariff T1",
"8.8.2": "Exported capacitive reactive energy in 4-th quadr. in tariff T2",
"8.8.3": "Exported capacitive reactive energy in 4-th quadr. in tariff T3",
"8.8.4": "Exported capacitive reactive energy in 4-th quadr. in tariff T4",
"9.8.0": "Apparent energy total",
"9.8.1": "Apparent energy in tariff T1",
"9.8.2": "Apparent energy in tariff T2",
"9.8.3": "Apparent energy in tariff T3",
"9.8.4": "Apparent energy in tariff T4",
"21.8.0": "Positive active energy in phase L1 total",
"41.8.0": "Positive active energy in phase L2 total",
"61.8.0": "Positive active energy in phase L3 total",
"22.8.0": "Negative active energy in phase L1 total",
"42.8.0": "Negative active energy in phase L2 total",
"62.8.0": "Negative active energy in phase L3 total",
"35.8.0": "Absolute active energy in phase L1 total",
"55.8.0": "Absolute active energy in phase L2 total",
"75.8.0": "Absolute active energy in phase L3 total",
"1.6.0": "Positive active maximum demand total",
"1.6.1": "Positive active maximum demand in tariff T1",
"1.6.2": "Positive active maximum demand in tariff T2",
"1.6.3": "Positive active maximum demand in tariff T3",
"1.6.4": "Positive active maximum demand in tariff T4",
"2.6.0": "Negative active maximum demand total",
"2.6.1": "Negative active maximum demand in tariff T1",
"2.6.2": "Negative active maximum demand in tariff T2",
"2.6.3": "Negative active maximum demand in tariff T3",
"2.6.4": "Negative active maximum demand in tariff T4",
"15.6.0": "Absolute active maximum demand total",
"15.6.1": "Absolute active maximum demand in tariff T1",
"15.6.2": "Absolute active maximum demand in tariff T2",
"15.6.3": "Absolute active maximum demand in tariff T3",
"15.6.4": "Absolute active maximum demand in tariff T4",
"3.6.0": "Positive reactive maximum demand total",
"4.6.0": "Negative reactive maximum demand total",
"5.6.0": "Reactive maximum demand in Q1 total",
"6.6.0": "Reactive maximum demand in Q2 total",
"7.6.0": "Reactive maximum demand in Q3 total",
"8.6.0": "Reactive maximum demand in Q4 total",
"9.6.0": "Apparent maximum demand total",
"1.2.0": "Positive active cumulative maximum demand total",
"1.2.1": "Positive active cumulative maximum demand in tariff T1",
"1.2.2": "Positive active cumulative maximum demand in tariff T2",
"1.2.3": "Positive active cumulative maximum demand in tariff T3",
"1.2.4": "Positive active cumulative maximum demand in tariff T4",
"2.2.0": "Negative active cumulative maximum demand total",
"2.2.1": "Negative active cumulative maximum demand in tariff T1",
"2.2.2": "Negative active cumulative maximum demand in tariff T2",
"2.2.3": "Negative active cumulative maximum demand in tariff T3",
"2.2.4": "Negative active cumulative maximum demand in tariff T4",
"15.2.0": "Absolute active cumulative maximum demand total",
"15.2.1": "Absolute active cumulative maximum demand in tariff T1",
"15.2.2": "Absolute active cumulative maximum demand in tariff T2",
"15.2.3": "Absolute active cumulative maximum demand in tariff T3",
"15.2.4": "Absolute active cumulative maximum demand in tariff T4",
"3.2.0": "Positive reactive cumulative maximum demand total",
"4.2.0": "Negative reactive cumulative maximum demand total",
"5.2.0": "Reactive cumulative maximum demand in Q1 total",
"6.2.0": "Reactive cumulative maximum demand in Q2 total",
"7.2.0": "Reactive cumulative maximum demand in Q3 total",
"8.2.0": "Reactive cumulative maximum demand in Q4 total",
"9.2.0": "Apparent cumulative maximum demand total",
"1.4.0": "Positive active demand in current demand period",
"2.4.0": "Negative active demand in current demand period",
"15.4.0": "Absolute active demand in current demand period",
"3.4.0": "Positive reactive demand in current demand period",
"4.4.0": "Negative reactive demand in current demand period",
"5.4.0": "Reactive demand in current demand period in Q1",
"6.4.0": "Reactive demand in current demand period in Q2",
"7.4.0": "Reactive demand in current demand period in Q3",
"8.4.0": "Reactive demand in current demand period in Q4",
"9.4.0": "Apparent demand in current demand period",
"1.5.0": "Positive active demand in the last completed demand period",
"2.5.0": "Negative active demand in the last completed demand period",
"15.5.0": "Absolute active demand in the last completed demand period",
"3.5.0": "Positive reactive demand in the last completed demand period",
"4.5.0": "Negative reactive demand in the last completed demand period",
"5.5.0": "Reactive demand in the last completed demand period in Q1",
"6.5.0": "Reactive demand in the last completed demand period in Q2",
"7.5.0": "Reactive demand in the last completed demand period in Q3",
"8.5.0": "Reactive demand in the last completed demand period in Q4",
"9.5.0": "Apparent demand in the last completed demand period",
"1.7.0": "Positive active instantaneous power",
"21.7.0": "Positive active instantaneous power in phase L1",
"41.7.0": "Positive active instantaneous power in phase L2",
"61.7.0": "Positive active instantaneous power in phase L3",
"2.7.0": "Negative active instantaneous power",
"22.7.0": "Negative active instantaneous power in phase L1",
"42.7.0": "Negative active instantaneous power in phase L2",
"62.7.0": "Negative active instantaneous power in phase L3",
"15.7.0": "Absolute active instantaneous power",
"35.7.0": "Absolute active instantaneous power in phase L1",
"55.7.0": "Absolute active instantaneous power in phase L2",
"75.7.0": "Absolute active instantaneous power in phase L3",
"16.7.0": "Sum active instantaneous power",
"36.7.0": "Sum active instantaneous power in phase L1",
"56.7.0": "Sum active instantaneous power in phase L2",
"76.7.0": "Sum active instantaneous power in phase L3",
"3.7.0": "Positive reactive instantaneous power",
"23.7.0": "Positive reactive instantaneous power in phase L1",
"43.7.0": "Positive reactive instantaneous power in phase L2",
"63.7.0": "Positive reactive instantaneous power in phase L3",
"4.7.0": "Negative reactive instantaneous power",
"24.7.0": "Negative reactive instantaneous power in phase L1",
"44.7.0": "Negative reactive instantaneous power in phase L2",
"64.7.0": "Negative reactive instantaneous power in phase L3",
"9.7.0": "Apparent instantaneous power",
"29.7.0": "Apparent instantaneous power in phase L1",
"49.7.0": "Apparent instantaneous power in phase L2",
"69.7.0": "Apparent instantaneous power in phase L3",
"11.7.0": "Instantaneous current",
"31.7.0": "Instantaneous current in phase L1",
"51.7.0": "Instantaneous current in phase L2",
"71.7.0": "Instantaneous current in phase L3",
"91.7.0": "Instantaneous current in neutral",
"11.6.0": "Maximum current",
"31.6.0": "Maximum current in phase L1",
"51.6.0": "Maximum current in phase L2",
"71.6.0": "Maximum current in phase L3",
"91.6.0": "Maximum current in neutral",
"12.7.0": "Instantaneous voltage",
"32.7.0": "Instantaneous voltage in phase L1",
"52.7.0": "Instantaneous voltage in phase L2",
"72.7.0": "Instantaneous voltage in phase L3",
"13.7.0": "Instantaneous power factor",
"33.7.0": "Instantaneous power factor in phase L1",
"53.7.0": "Instantaneous power factor in phase L2",
"73.7.0": "Instantaneous power factor in phase L3",
"14.7.0": "Frequency",
"C.53.1": "Tamper 1 energy register",
"C.53.2": "Tamper 2 energy register",
"C.53.3": "Tamper 3 energy register",
"C.53.4": "Tamper 4 energy register",
"C.53.11": "Tamper 5 energy register",
"C.53.5": "Tamper 1 time counter register",
"C.53.6": "Tamper 2 time counter register",
"C.53.7": "Tamper 3 time counter register",
"C.53.9": "Tamper 4 time counter register",
"C.53.10": "Tamper 5 time counter register",
"C.2.0": "Event parameters change - counter",
"C.2.1": "Event parameters change - timestamp",
"C.51.1": "Event terminal cover opened - counter",
"C.51.2": "Event terminal cover opened - timestamp",
"C.51.3": "Event main cover opened - counter",
"C.51.5": "Event magnetic field detection start - counter",
"C.51.6": "Event magnetic field detection start - timestamp",
"C.51.7": "Event reverse power flow - counter",
"C.51.8": "Event reverse power flow - timestamp",
"C.7.10": "Event power down - timestamp",
"C.51.13": "Event power up - counter",
"C.51.14": "Event power up – timestamp",
"C.51.15": "Event RTC (Real Time Clock) set - counter",
"C.51.16": "Event RTC (Real Time Clock) set - timestamp",
"C.51.21": "Event terminal cover closed - counter",
"C.51.22": "Event terminal cover closed - timestamp",
"C.51.23": "Event main cover closed - counter",
"C.51.24": "Event main cover closed - timestamp",
"C.51.25": "Event log-book 1 erased - counter",
"C.51.26": "Event log-book 1 erased - timestamp",
"C.51.27": "Event fraud start - counter",
"C.51.28": "Event fraud start - timestamp",
"C.51.29": "Event fraud stop - counter",
"C.51.30": "Event fraud stop - timestamp",
"0.9.1": "Current time",
"0.9.2": "Date",
"0.9.4": "Date and Time",
"0.8.0": "Demand period",
"0.8.4": "Load profile period",
"0.0.0": "Device address 1",
"0.0.1": "Device address 2",
"0.1.0": "MD reset counter",
"0.1.2": "MD reset timestamp",
"0.2.0": "Firmware version",
"0.2.2": "Tariff program ID",
"C.1.0": "Meter serial number",
"C.1.2": "Parameters file code",
"C.1.4": "Parameters check sum",
"C.1.5": "Firmware built date",
"C.1.6": "Firmware check sum",
"C.6.0": "Power down time counter",
"C.6.1": "Battery remaining capacity",
"F.F.0": "Fatal error meter status",
"C.87.0": "Active tariff",
"0.2.1": "Parameters scheme ID",
"C.60.9": "Fraud flag",
"0.3.0": "Active energy meter constant",
"0.4.2": "Current transformer ratio",
"0.4.3": "Voltage transformer ratio",
"0.0.9": "Identification number",
"21.25": "Instantaneous value of active power phase L1",
"41.25": "Instantaneous value of active power phase L2",
"61.25": "Instantaneous value of active power phase L3",
"1.25": "Instantaneous value of total power",
"23.25": "Instantaneous value of reactive power phase L1",
"43.25": "Instantaneous value of reactive power phase L2",
"63.25": "Instantaneous value of reactive power phase L3",
"3.25": "Instantaneous value of reactive power phase total",
"29.25": "Instantaneous value of apparent power phase L1",
"49.25": "Instantaneous value of apparent power phase L2",
"69.25": "Instantaneous value of apparent power phase L3",
"9.25": "Instantaneous value of total apparent power",
"31.25": "Instantaneous value of current phase L1",
"51.25": "Instantaneous value of current phase L2",
"71.25": "Instantaneous value of current phase L3",
"32.25": "Instantaneous value of voltage phase L1",
"52.25": "Instantaneous value of voltage phase L2",
"72.25": "Instantaneous value of voltage phase L3",
"33.25": "Instantaneous value of power factor phase L1",
"53.25": "Instantaneous value of power factor phase L2",
"73.25": "Instantaneous value of power factor phase L3",
"13.25": "Instantaneous value of average power factor",
"14.25": "Instantaneous value of frequency",
"C.3": "State of the in/out control signals",
"C.4": "State of the internal control signals",
"C.5": "Internal operating conditions",
"C.7.0": "Total number of phase failures",
"C.7.1": "Number of phase failures phase 1",
"C.7.2": "Number of phase failures phase 2",
"C.7.3": "Number of phase failures phase 3",
"C.51.4": "DCF-77 last synchronization",
"C.52.0": "Phase information",
"C.86.0": "Installation check"
}
table_obis_codes = {
"1.5.0": "Positive active demand",
"2.5.0": "Negative active demand",
"5.5.0": "Reactive demand in Q1",
"6.5.0": "Reactive demand in Q2",
"7.5.0": "Reactive demand in Q3",
"8.5.0": "Reactive demand in Q4",
"0.9.1": "Current time",
"0.9.2": "Date",
"0.0.0": "Device address 1",
"0.0.9": "Identification number",
"21.25": "Active power phase L1",
"41.25": "Active power phase L2",
"61.25": "Active power phase L3",
"1.25": "Total power",
"23.25": "Reactive power phase L1",
"43.25": "Reactive power phase L2",
"63.25": "Reactive power phase L3",
"3.25": "Reactive power phase total",
"32.25": "Voltage phase L1",
"52.25": "Voltage phase L2",
"72.25": "Voltage phase L3",
"33.25": "Power factor phase L1",
"53.25": "Power factor phase L2",
"73.25": "Power factor phase L3",
"13.25": "Average power factor",
"14.25": "Frequency",
"C.3": "State of the in/out control signals", # Not implemented
"C.4": "State of the internal control signals", # Not implemented
"C.5": "Internal operating conditions", # Not implemented
"C.7.0": "Total number of phase failures",
"C.7.1": "Phase failures phase 1",
"C.7.2": "Phase failures phase 2",
"C.7.3": "Phase failures phase 3",
"C.51.4": "DCF-77 last synchronization", # Not implemented
"C.52.0": "Phase information", # Not implemented
"C.86.0": "Installation check" # Not implemented
}
zabbix_obis_codes = {
"1.5.0": "positiveActiveDemand",
"2.5.0": "negativeActiveDemand",
"5.5.0": "reactiveDemandQ1",
"6.5.0": "reactiveDemandQ2",
"7.5.0": "reactiveDemandQ3",
"8.5.0": "reactiveDemandQ4",
"bill-1.5.0": "Z-1-1.1.29.0",
"bill-2.5.0": "Z-1-1.2.29.0",
"bill-5.5.0": "Z-1-1.5.29.0",
"bill-6.5.0": "Z-1-1.6.29.0",
"bill-7.5.0": "Z-1-1.7.29.0",
"bill-8.5.0": "Z-1-1.8.29.0",
"bill-1.8.0": "Z-1-1.1.8.0-bill",
"bill-2.8.0": "Z-1-1.2.8.0-bill",
"bill-5.8.0": "Z-1-1.5.8.0-bill",
"bill-6.8.0": "Z-1-1.6.8.0-bill",
"bill-7.8.0": "Z-1-1.7.8.0-bill",
"bill-8.8.0": "Z-1-1.8.8.0-bill",
"bill-raw-1.5.0": "Z-1-1.1.29.0-raw",
"bill-raw-2.5.0": "Z-1-1.2.29.0-raw",
"bill-raw-5.5.0": "Z-1-1.5.29.0-raw",
"bill-raw-6.5.0": "Z-1-1.6.29.0-raw",
"bill-raw-7.5.0": "Z-1-1.7.29.0-raw",
"bill-raw-8.5.0": "Z-1-1.8.29.0-raw",
"bill-raw-1.8.0": "Z-1-1.1.8.0-bill-raw",
"bill-raw-2.8.0": "Z-1-1.2.8.0-bill-raw",
"bill-raw-5.8.0": "Z-1-1.5.8.0-bill-raw",
"bill-raw-6.8.0": "Z-1-1.6.8.0-bill-raw",
"bill-raw-7.8.0": "Z-1-1.7.8.0-bill-raw",
"bill-raw-8.8.0": "Z-1-1.8.8.0-bill-raw",
"bill-Log": "Z-1-1.Log",
# "bill-1.5.0": "positiveActiveDemandBill",
# "bill-2.5.0": "negativeActiveDemandBill",
# "bill-5.5.0": "reactiveDemandQ1Bill",
# "bill-6.5.0": "reactiveDemandQ2Bill",
# "bill-7.5.0": "reactiveDemandQ3Bill",
# "bill-8.5.0": "reactiveDemandQ4Bill",
"0.9.1": "Current time",
"0.9.2": "Date",
"0.0.0": "Device address 1",
"0.0.9": "Identification number",
"21.25": "activePowerPhaseL1",
"41.25": "activePowerPhaseL2",
"61.25": "activePowerPhaseL3",
"1.25": "totalPower",
"23.25": "reactivePowerPhaseL1",
"43.25": "reactivePowerPhaseL2",
"63.25": "reactivePowerPhaseL3",
"3.25": "reactivePowerTotal",
"32.25": "voltagePhaseL1",
"52.25": "voltagePhaseL2",
"72.25": "voltagePhaseL3",
"33.25": "powerFactorPhaseL1",
"53.25": "powerFactorPhaseL2",
"73.25": "powerFactorPhaseL3",
"13.25": "powerFactorAvg",
"14.25": "Frequency",
"C.3": "State of the in/out control signals", # Not implemented
"C.4": "State of the internal control signals", # Not implemented
"C.5": "Internal operating conditions", # Not implemented
"C.7.0": "phaseFailuresTotal",
"C.7.1": "phaseFailuresPhase1",
"C.7.2": "phaseFailuresPhase2",
"C.7.3": "phaseFailuresPhase3",
"C.51.4": "DCF-77 last synchronization", # Not implemented
"C.52.0": "Phase information", # Not implemented
"C.86.0": "Installation check", # Not implemented
"29.25": "apparentPowerPhaseL1",
"49.25": "apparentPowerPhaseL2",
"69.25": "apparentPowerPhaseL3",
"9.25": "apparentPowerTotal",
"31.25": "currentPhaseL1",
"51.25": "currentPhaseL2",
"71.25": "currentPhaseL3",
"cos_phi": "CosinusPhi",
"tan_phi": "TangensPhi",
"1.2.1": "Z-1-1.1.2.1",
"1.2.2": "Z-1-1.1.2.2",
"1.6.1": "Z-1-1.1.6.1",
"1.6.2": "Z-1-1.1.6.2",
"1.8.0": "Z-1-1.1.8.0",
"1.8.1": "Z-1-1.1.8.1",
"1.8.2": "Z-1-1.1.8.2",
"2.2.1": "Z-1-1.2.2.1",
"2.2.2": "Z-1-1.2.2.2",
"2.6.1": "Z-1-1.2.6.1",
"2.6.2": "Z-1-1.2.6.2",
"2.8.0": "Z-1-1.2.8.0",
"2.8.1": "Z-1-1.2.8.1",
"2.8.2": "Z-1-1.2.8.2",
"5.8.0": "Z-1-1.5.8.0",
"5.8.1": "Z-1-1.5.8.1",
"5.8.2": "Z-1-1.5.8.2",
"6.8.0": "Z-1-1.6.8.0",
"6.8.1": "Z-1-1.6.8.1",
"6.8.2": "Z-1-1.6.8.2",
"7.8.0": "Z-1-1.7.8.0",
"7.8.1": "Z-1-1.7.8.1",
"7.8.2": "Z-1-1.7.8.2",
"8.8.0": "Z-1-1.8.8.0",
"8.8.1": "Z-1-1.8.8.1",
"8.8.2": "Z-1-1.8.8.2",
"raw-1.2.1": "Z-1-1.1.2.1-raw",
"raw-1.2.2": "Z-1-1.1.2.2-raw",
"raw-1.6.1": "Z-1-1.1.6.1-raw",
"raw-1.6.2": "Z-1-1.1.6.2-raw",
"raw-1.8.0": "Z-1-1.1.8.0-raw",
"raw-1.8.1": "Z-1-1.1.8.1-raw",
"raw-1.8.2": "Z-1-1.1.8.2-raw",
"raw-2.2.1": "Z-1-1.2.2.1-raw",
"raw-2.2.2": "Z-1-1.2.2.2-raw",
"raw-2.6.1": "Z-1-1.2.6.1-raw",
"raw-2.6.2": "Z-1-1.2.6.2-raw",
"raw-2.8.0": "Z-1-1.2.8.0-raw",
"raw-2.8.1": "Z-1-1.2.8.1-raw",
"raw-2.8.2": "Z-1-1.2.8.2-raw",
"raw-5.8.0": "Z-1-1.5.8.0-raw",
"raw-5.8.1": "Z-1-1.5.8.1-raw",
"raw-5.8.2": "Z-1-1.5.8.2-raw",
"raw-6.8.0": "Z-1-1.6.8.0-raw",
"raw-6.8.1": "Z-1-1.6.8.1-raw",
"raw-6.8.2": "Z-1-1.6.8.2-raw",
"raw-7.8.0": "Z-1-1.7.8.0-raw",
"raw-7.8.1": "Z-1-1.7.8.1-raw",
"raw-7.8.2": "Z-1-1.7.8.2-raw",
"raw-8.8.0": "Z-1-1.8.8.0-raw",
"raw-8.8.1": "Z-1-1.8.8.1-raw",
"raw-8.8.2": "Z-1-1.8.8.2-raw",
"0.1.2": "Z-1-1.0.1.2",
"1.6.1-time": "Z-1-1.1.6.1-time",
"1.6.2-time": "Z-1-1.1.6.2-time",
"2.6.1-time": "Z-1-1.2.6.1-time",
"2.6.2-time": "Z-1-1.2.6.2-time",
"P.200_Bit15": "Z-1-1.Bit15",
"P.200_Bit14": "Z-1-1.Bit14",
"P.200_Bit13": "Z-1-1.Bit13",
"P.200_Bit12": "Z-1-1.Bit12",
"P.200_Bit11": "Z-1-1.Bit11",
"P.200_Bit10": "Z-1-1.Bit10",
"P.200_Bit9": "Z-1-1.Bit9",
"P.200_Bit8": "Z-1-1.Bit8",
"P.200_Bit7": "Z-1-1.Bit7",
"P.200_Bit6": "Z-1-1.Bit6",
"P.200_Bit5": "Z-1-1.Bit5",
"P.200_Bit4": "Z-1-1.Bit4",
"P.200_Bit3": "Z-1-1.Bit3",
"P.200_Bit2": "Z-1-1.Bit2",
"P.200_Bit1": "Z-1-1.Bit1",
"P.200_Bit0": "Z-1-1.Bit0",
"2000": "Z-1-1.2000",
"23A6": "Z-1-1.23A6",
"234C": "Z-1-1.234C",
"334C": "Z-1-1.334C",
"234D": "Z-1-1.234D",
"334D": "Z-1-1.334D",
"234E": "Z-1-1.234E",
"334E": "Z-1-1.334E",
"0.9.1-value": "Z-1-1.0.9.1-value",
"0.9.2-value": "Z-1-1.0.9.2-value",
"0.9.1-trigger": "Z-1-1.0.9.1-trigger",
"0.9.2-trigger": "Z-1-1.0.9.2-trigger"
}
transform_set = {
"positiveActiveDemand": "totalFactor",
"negativeActiveDemand": "totalFactor",
"reactiveDemandQ1": "totalFactor",
"reactiveDemandQ2": "totalFactor",
"reactiveDemandQ3": "totalFactor",
"reactiveDemandQ4": "totalFactor",
# "positiveActiveDemandBill": "totalFactor",
# "negativeActiveDemandBill": "totalFactor",
# "reactiveDemandQ1Bill": "totalFactor",
# "reactiveDemandQ2Bill": "totalFactor",
# "reactiveDemandQ3Bill": "totalFactor",
# "reactiveDemandQ4Bill": "totalFactor",
"Z-1-1.1.29.0": "totalFactor",
"Z-1-1.2.29.0": "totalFactor",
"Z-1-1.5.29.0": "totalFactor",
"Z-1-1.6.29.0": "totalFactor",
"Z-1-1.7.29.0": "totalFactor",
"Z-1-1.8.29.0": "totalFactor",
"Z-1-1.1.29.0-raw": "None",
"Z-1-1.2.29.0-raw": "None",
"Z-1-1.5.29.0-raw": "None",
"Z-1-1.6.29.0-raw": "None",
"Z-1-1.7.29.0-raw": "None",
"Z-1-1.8.29.0-raw": "None",
"Z-1-1.Log": "None",
"Current time": "None",
"Date": "None",
"Device address 1": "None",
"Identification number": "",
"activePowerPhaseL1": "totalFactor",
"activePowerPhaseL2": "totalFactor",
"activePowerPhaseL3": "totalFactor",
"totalPower": "totalFactor",
"reactivePowerPhaseL1": "totalFactor",
"reactivePowerPhaseL2": "totalFactor",
"reactivePowerPhaseL3": "totalFactor",
"reactivePowerTotal": "totalFactor",
"voltagePhaseL1": "voltageRatio",
"voltagePhaseL2": "voltageRatio",
"voltagePhaseL3": "voltageRatio",
"powerFactorPhaseL1": "None",
"powerFactorPhaseL2": "None",
"powerFactorPhaseL3": "None",
"powerFactorAvg": "None",
"Frequency": "None",
"phaseFailuresTotal": "None",
"phaseFailuresPhase1": "None",
"phaseFailuresPhase2": "None",
"phaseFailuresPhase3": "None",
"apparentPowerPhaseL1": "totalFactor",
"apparentPowerPhaseL2": "totalFactor",
"apparentPowerPhaseL3": "totalFactor",
"apparentPowerTotal": "totalFactor",
"currentPhaseL1": "currentRatio",
"currentPhaseL2": "currentRatio",
"currentPhaseL3": "currentRatio",
"CosinusPhi": "None",
"TangensPhi": "None",
"Z-1-1.1.2.1": "totalFactor",
"Z-1-1.1.2.2": "totalFactor",
"Z-1-1.1.6.1": "totalFactor",
"Z-1-1.1.6.2": "totalFactor",
"Z-1-1.1.8.0": "totalFactor",
"Z-1-1.1.8.1": "totalFactor",
"Z-1-1.1.8.2": "totalFactor",
"Z-1-1.2.2.1": "totalFactor",
"Z-1-1.2.2.2": "totalFactor",
"Z-1-1.2.6.1": "totalFactor",
"Z-1-1.2.6.2": "totalFactor",
"Z-1-1.2.8.0": "totalFactor",
"Z-1-1.2.8.1": "totalFactor",
"Z-1-1.2.8.2": "totalFactor",
"Z-1-1.5.8.0": "totalFactor",
"Z-1-1.5.8.1": "totalFactor",
"Z-1-1.5.8.2": "totalFactor",
"Z-1-1.6.8.0": "totalFactor",
"Z-1-1.6.8.1": "totalFactor",
"Z-1-1.6.8.2": "totalFactor",
"Z-1-1.7.8.0": "totalFactor",
"Z-1-1.7.8.1": "totalFactor",
"Z-1-1.7.8.2": "totalFactor",
"Z-1-1.8.8.0": "totalFactor",
"Z-1-1.8.8.1": "totalFactor",
"Z-1-1.8.8.2": "totalFactor",
"Z-1-1.1.2.1-raw": "None",
"Z-1-1.1.2.2-raw": "None",
"Z-1-1.1.6.1-raw": "None",
"Z-1-1.1.6.2-raw": "None",
"Z-1-1.1.8.0-raw": "None",
"Z-1-1.1.8.1-raw": "None",
"Z-1-1.1.8.2-raw": "None",
"Z-1-1.2.2.1-raw": "None",
"Z-1-1.2.2.2-raw": "None",
"Z-1-1.2.6.1-raw": "None",
"Z-1-1.2.6.2-raw": "None",
"Z-1-1.2.8.0-raw": "None",
"Z-1-1.2.8.1-raw": "None",
"Z-1-1.2.8.2-raw": "None",
"Z-1-1.5.8.0-raw": "None",
"Z-1-1.5.8.1-raw": "None",
"Z-1-1.5.8.2-raw": "None",
"Z-1-1.6.8.0-raw": "None",
"Z-1-1.6.8.1-raw": "None",
"Z-1-1.6.8.2-raw": "None",
"Z-1-1.7.8.0-raw": "None",
"Z-1-1.7.8.1-raw": "None",
"Z-1-1.7.8.2-raw": "None",
"Z-1-1.8.8.0-raw": "None",
"Z-1-1.8.8.1-raw": "None",
"Z-1-1.8.8.2-raw": "None",
"Z-1-1.0.1.2": "None",
"Z-1-1.1.6.1-time": "None",
"Z-1-1.1.6.2-time": "None",
"Z-1-1.2.6.1-time": "None",
"Z-1-1.2.6.2-time": "None",
"Z-1-1.Bit15": "None",
"Z-1-1.Bit14": "None",
"Z-1-1.Bit13": "None",
"Z-1-1.Bit12": "None",
"Z-1-1.Bit11": "None",
"Z-1-1.Bit10": "None",
"Z-1-1.Bit9": "None",
"Z-1-1.Bit8": "None",
"Z-1-1.Bit7": "None",
"Z-1-1.Bit6": "None",
"Z-1-1.Bit5": "None",
"Z-1-1.Bit4": "None",
"Z-1-1.Bit3": "None",
"Z-1-1.Bit2": "None",
"Z-1-1.Bit1": "None",
"Z-1-1.Bit0": "None",
"Z-1-1.2000": "None",
"Z-1-1.23A6": "None",
"Z-1-1.234C": "None",
"Z-1-1.334C": "None",
"Z-1-1.234D": "None",
"Z-1-1.334D": "None",
"Z-1-1.234E": "None",
"Z-1-1.334E": "None",
"Z-1-1.0.9.1-value": "None",
"Z-1-1.0.9.2-value": "None",
"Z-1-1.0.9.1-trigger": "None",
"Z-1-1.0.9.2-trigger": "None",
"Z-1-1.1.8.0-bill": "totalFactor",
"Z-1-1.2.8.0-bill": "totalFactor",
"Z-1-1.5.8.0-bill": "totalFactor",
"Z-1-1.6.8.0-bill": "totalFactor",
"Z-1-1.7.8.0-bill": "totalFactor",
"Z-1-1.8.8.0-bill": "totalFactor",
"Z-1-1.1.8.0-bill-raw": "None",
"Z-1-1.2.8.0-bill-raw": "None",
"Z-1-1.5.8.0-bill-raw": "None",
"Z-1-1.6.8.0-bill-raw": "None",
"Z-1-1.7.8.0-bill-raw": "None",
"Z-1-1.8.8.0-bill-raw": "None"
}
| 49.676243
| 93
| 0.606491
| 7,569
| 44,957
| 3.599154
| 0.038711
| 0.019308
| 0.024227
| 0.018244
| 0.878496
| 0.842596
| 0.743117
| 0.602269
| 0.477057
| 0.382314
| 0
| 0.109512
| 0.196276
| 44,957
| 904
| 94
| 49.731195
| 0.644369
| 0.014881
| 0
| 0.234763
| 0
| 0
| 0.753717
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.022573
| 0
| 0.022573
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
8994253b00d6e5d27e65563ed1f3b8a7b6074e7a
| 56
|
py
|
Python
|
hms_kivy/rfid/__init__.py
|
NottingHack/hms-kivy
|
38f0047517be15099f2f34b73f6aa43902de7c85
|
[
"MIT"
] | 1
|
2021-12-17T04:24:22.000Z
|
2021-12-17T04:24:22.000Z
|
hms_kivy/rfid/__init__.py
|
NottingHack/hms-kivy
|
38f0047517be15099f2f34b73f6aa43902de7c85
|
[
"MIT"
] | null | null | null |
hms_kivy/rfid/__init__.py
|
NottingHack/hms-kivy
|
38f0047517be15099f2f34b73f6aa43902de7c85
|
[
"MIT"
] | null | null | null |
"""
RFID
===============
"""
# from .rfid import RFID
| 7
| 24
| 0.392857
| 5
| 56
| 4.4
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.196429
| 56
| 7
| 25
| 8
| 0.488889
| 0.785714
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
899aac445ad00336b2aa58754d79698b04eca293
| 426
|
py
|
Python
|
diff/__init__.py
|
treebohotels/diff-and-patch
|
497c078ea5c1bc6caa361f14eb4c6206d84a5d24
|
[
"BSD-2-Clause-FreeBSD"
] | 1
|
2020-01-01T15:34:30.000Z
|
2020-01-01T15:34:30.000Z
|
diff/__init__.py
|
treebohotels/diff-and-patch
|
497c078ea5c1bc6caa361f14eb4c6206d84a5d24
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
diff/__init__.py
|
treebohotels/diff-and-patch
|
497c078ea5c1bc6caa361f14eb4c6206d84a5d24
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
# -*- coding: utf-8 -*-
from diff.diff_strategy.base_diff_item import BaseDiffItem
from diff.diff_strategy.integer_diff import IntegerDiff
from diff.diff_strategy.string_diff import StringDiff
from diff.diff_strategy.base_diff_strategy import BaseDiffStrategy
from diff.differ import Differ
from diff.patcher import Patcher
from diff.patch_behaviours import BasePatchBehaviour
from diff.patch_behaviours import BaseBehaviour
| 38.727273
| 66
| 0.861502
| 59
| 426
| 6.016949
| 0.355932
| 0.180282
| 0.135211
| 0.225352
| 0.321127
| 0.157746
| 0
| 0
| 0
| 0
| 0
| 0.002577
| 0.089202
| 426
| 10
| 67
| 42.6
| 0.912371
| 0.049296
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
89ab07a525358131a9c0e7f84a7b69337a2f2cca
| 104
|
py
|
Python
|
models/continent.py
|
morival/W04_project_TBL
|
a116bbda72bf61c55752fe1f4fdce2685ae0d024
|
[
"PostgreSQL",
"Unlicense",
"MIT"
] | null | null | null |
models/continent.py
|
morival/W04_project_TBL
|
a116bbda72bf61c55752fe1f4fdce2685ae0d024
|
[
"PostgreSQL",
"Unlicense",
"MIT"
] | null | null | null |
models/continent.py
|
morival/W04_project_TBL
|
a116bbda72bf61c55752fe1f4fdce2685ae0d024
|
[
"PostgreSQL",
"Unlicense",
"MIT"
] | null | null | null |
class Continent:
def __init__(self, name, id = None):
self.name = name
self.id = id
| 20.8
| 40
| 0.567308
| 14
| 104
| 3.928571
| 0.571429
| 0.290909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.326923
| 104
| 5
| 41
| 20.8
| 0.785714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
89b3b953ae664601d77ea8396659d68d4f2330e8
| 52
|
py
|
Python
|
probez/file_handling/file_handling_exceptions.py
|
Sepidak/spikeGUI
|
25ae60160308c0a34e7180f3e39a1c4dc6aad708
|
[
"MIT"
] | null | null | null |
probez/file_handling/file_handling_exceptions.py
|
Sepidak/spikeGUI
|
25ae60160308c0a34e7180f3e39a1c4dc6aad708
|
[
"MIT"
] | 3
|
2021-08-09T21:51:41.000Z
|
2021-08-09T21:51:45.000Z
|
probez/file_handling/file_handling_exceptions.py
|
Sepidak/spikeGUI
|
25ae60160308c0a34e7180f3e39a1c4dc6aad708
|
[
"MIT"
] | 3
|
2021-10-16T14:07:59.000Z
|
2021-10-16T17:09:03.000Z
|
class InconsistentNChanError(Exception):
pass
| 17.333333
| 41
| 0.769231
| 4
| 52
| 10
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173077
| 52
| 2
| 42
| 26
| 0.930233
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 4
|
984d7d175e90b6c00cda2c880cd9eb06bf678508
| 370
|
py
|
Python
|
awards/serializers.py
|
kilonzijnr/awards
|
d2ec991de8f161b88ae85f6520c1702ead21291e
|
[
"MIT"
] | null | null | null |
awards/serializers.py
|
kilonzijnr/awards
|
d2ec991de8f161b88ae85f6520c1702ead21291e
|
[
"MIT"
] | null | null | null |
awards/serializers.py
|
kilonzijnr/awards
|
d2ec991de8f161b88ae85f6520c1702ead21291e
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from .models import Profile,Project
class ProfileSerializer(serializers.ModelSerializer):
class Meta:
model = Profile
fields = ('name','bio')
class ProjectSerializer(serializers.ModelSerializer):
class Meta:
model = Project
fields=('sitename','link','content', 'design')
| 24.666667
| 54
| 0.667568
| 34
| 370
| 7.235294
| 0.617647
| 0.211382
| 0.252033
| 0.284553
| 0.325203
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.235135
| 370
| 15
| 54
| 24.666667
| 0.869258
| 0
| 0
| 0.2
| 0
| 0
| 0.086253
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.6
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
984dd04ab5d04ac8ea4c9837a4fad414a731b448
| 98
|
py
|
Python
|
django_adminform/apps.py
|
humanscape-covy/django-jsonform
|
ddf2bf40022855d4969988e13a0a3db7abb8a365
|
[
"BSD-3-Clause"
] | null | null | null |
django_adminform/apps.py
|
humanscape-covy/django-jsonform
|
ddf2bf40022855d4969988e13a0a3db7abb8a365
|
[
"BSD-3-Clause"
] | null | null | null |
django_adminform/apps.py
|
humanscape-covy/django-jsonform
|
ddf2bf40022855d4969988e13a0a3db7abb8a365
|
[
"BSD-3-Clause"
] | null | null | null |
from django.apps import AppConfig
class JsonappConfig(AppConfig):
name = 'django_adminform'
| 16.333333
| 33
| 0.77551
| 11
| 98
| 6.818182
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153061
| 98
| 5
| 34
| 19.6
| 0.903614
| 0
| 0
| 0
| 0
| 0
| 0.163265
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
986e9a1940b22c7bcff1ec87b0c917126cf577b9
| 135
|
py
|
Python
|
rules_default/castervoice/lib/ctrl/mgr/errors/tree_rule_config_error.py
|
MLH-Fellowship/LarynxCode
|
840fee18c689a357052825607c27fc8e3e56571c
|
[
"MIT"
] | 1
|
2021-09-17T06:11:02.000Z
|
2021-09-17T06:11:02.000Z
|
rules_default/castervoice/lib/ctrl/mgr/errors/tree_rule_config_error.py
|
soma2000-lang/LarynxCode
|
840fee18c689a357052825607c27fc8e3e56571c
|
[
"MIT"
] | 5
|
2021-02-03T05:29:41.000Z
|
2021-02-08T01:14:11.000Z
|
rules_default/castervoice/lib/ctrl/mgr/errors/tree_rule_config_error.py
|
soma2000-lang/LarynxCode
|
840fee18c689a357052825607c27fc8e3e56571c
|
[
"MIT"
] | 4
|
2021-02-03T05:05:00.000Z
|
2021-07-14T06:21:10.000Z
|
class TreeRuleConfigurationError(Exception):
def __init__(self, msg):
super(TreeRuleConfigurationError, self).__init__(msg)
| 45
| 61
| 0.77037
| 12
| 135
| 8
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 135
| 3
| 61
| 45
| 0.820513
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
98852ee7adf518e9d53b96045eb40fd60d4b87be
| 982
|
py
|
Python
|
pyengy/error/node_error.py
|
FreNeS1/PyEngy-2d
|
771112530ae039e8921369f700ba3b66c3df50c1
|
[
"MIT"
] | 1
|
2020-07-09T12:42:30.000Z
|
2020-07-09T12:42:30.000Z
|
pyengy/error/node_error.py
|
FreNeS1/PyEngy-2d
|
771112530ae039e8921369f700ba3b66c3df50c1
|
[
"MIT"
] | null | null | null |
pyengy/error/node_error.py
|
FreNeS1/PyEngy-2d
|
771112530ae039e8921369f700ba3b66c3df50c1
|
[
"MIT"
] | null | null | null |
"""Contains the NodeError class."""
from __future__ import annotations
from typing import List, Optional
from .pyengy_error import PyEngyError
class NodeError(PyEngyError):
"""Error raised when a basic interaction with a node cannot be completed. For example, cyclic node dependencies."""
def __init__(self, node: str, message: str, caused_by: Optional[List[Exception]] = None) -> None:
"""
Instantiates a new NodeError.
:param node: Valid identifier of the node that raised the error.
:param message: Human readable description of the error.
:param caused_by: List of exceptions that caused this one to be raised, if any.
"""
super().__init__(message, caused_by)
self.node = node
"""Valid identifier of the node that raised the error. Usually the string representation of the node."""
def _error_string(self):
return "NodeError for node ({}): {}.".format(self.node, self.message)
| 35.071429
| 119
| 0.687373
| 127
| 982
| 5.173228
| 0.464567
| 0.030441
| 0.041096
| 0.063927
| 0.14003
| 0.14003
| 0.14003
| 0.14003
| 0.14003
| 0.14003
| 0
| 0
| 0.221996
| 982
| 27
| 120
| 36.37037
| 0.859948
| 0.378819
| 0
| 0
| 0
| 0
| 0.0625
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.222222
| false
| 0
| 0.333333
| 0.111111
| 0.777778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 4
|
9891700284763a3f19380f342bc7448060c6da88
| 3,214
|
py
|
Python
|
sauron/parsers.py
|
luebbert42/sauron-engine
|
9d67ecb8254544ec7ac16fbb80b33edc7a9758e3
|
[
"MIT"
] | 30
|
2019-12-17T09:59:15.000Z
|
2021-07-14T20:09:52.000Z
|
sauron/parsers.py
|
luebbert42/sauron-engine
|
9d67ecb8254544ec7ac16fbb80b33edc7a9758e3
|
[
"MIT"
] | 367
|
2020-02-24T17:28:25.000Z
|
2022-03-15T15:47:46.000Z
|
sauron/parsers.py
|
luebbert42/sauron-engine
|
9d67ecb8254544ec7ac16fbb80b33edc7a9758e3
|
[
"MIT"
] | 5
|
2020-04-15T10:14:55.000Z
|
2021-12-21T07:49:06.000Z
|
import json
from json.decoder import JSONDecodeError
from typing import List, Type, Union, Dict, Any
from sauron.models import JobModel
from ruamel.yaml import YAML
class DefaultParser:
single_model: Type[JobModel] = JobModel
def __init__(self):
self.yaml = YAML(typ="safe")
def _parse_single_job(self, job_dict) -> JobModel:
"""
Method that know how to parse a single job dictionary
"""
return self.single_model(**job_dict)
def _parse_jobs_from_list(self, jobs_input) -> List[JobModel]:
"""
Method that know how to parse a list for jobs
"""
parsed_jobs: List = []
for raw_job in jobs_input:
current_job: JobModel = self._parse_single_job(raw_job)
parsed_jobs.append(current_job)
return parsed_jobs
def _parse_jobs_from_string(self, jobs_input) -> List[JobModel]:
"""
Method that know how to parse a list for jobs described by a
json-string with the list of jobs
"""
try:
jobs: list = self.yaml.load(jobs_input)
except JSONDecodeError:
raise ValueError("jobs param is not a valid json string")
else:
return self._parse_jobs_from_list(jobs)
def parse(self, jobs_input) -> List[JobModel]:
"""
Main method called to parse any jobs
"""
jobs_list_data: List[JobModel] = []
if isinstance(jobs_input, str):
jobs_list_data = self._parse_jobs_from_string(jobs_input)
elif isinstance(jobs_input, list):
# jobs_input is a python list
jobs_list_data = self._parse_jobs_from_list(jobs_input)
else:
raise ValueError("jobs param must be a list or json-string")
return jobs_list_data
class RuleEngineParser(DefaultParser):
single_model: Type[JobModel] = JobModel
def __init__(self):
self.yaml = YAML(typ="safe")
def _parse_jobs_from_string(self, jobs_input: str) -> List[JobModel]:
"""
Method that know how to parse a list for jobs described by a
json-string with the list of jobs
"""
try:
decoded_jobs: dict = self.yaml.load(jobs_input)
jobs: list = decoded_jobs["conditions"] + decoded_jobs["actions"]
except JSONDecodeError:
raise ValueError("jobs param is not a valid json string")
else:
return self._parse_jobs_from_list(jobs)
def parse(self, jobs_input: Union[List, str]) -> List[JobModel]:
"""
Main method called to parse any jobs
"""
jobs_list_data: List[JobModel] = []
if isinstance(jobs_input, str):
jobs_list_data = self._parse_jobs_from_string(jobs_input)
elif isinstance(jobs_input, list):
# jobs_input is a python list
jobs_list_data = self._parse_jobs_from_list(jobs_input)
elif isinstance(jobs_input, dict):
# jobs_input is a python list
jobs_list_data = self._parse_jobs_from_string(str(jobs_input))
else:
raise ValueError("jobs param must be a list or json-string")
return jobs_list_data
| 34.934783
| 77
| 0.631612
| 419
| 3,214
| 4.587112
| 0.176611
| 0.098335
| 0.067638
| 0.061915
| 0.761186
| 0.739334
| 0.727367
| 0.727367
| 0.681582
| 0.681582
| 0
| 0
| 0.288115
| 3,214
| 91
| 78
| 35.318681
| 0.840035
| 0.13939
| 0
| 0.571429
| 0
| 0
| 0.068609
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.089286
| 0
| 0.410714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
989a51d09054c4e781530d27242fa96e3092af04
| 1,099
|
py
|
Python
|
libotp/nametag/MarginPopup.py
|
AnonymousDeveloper65535/open-toontown
|
3d05c22a7d960ad843dde231140447c46973dba5
|
[
"BSD-3-Clause"
] | 1
|
2019-11-23T21:54:23.000Z
|
2019-11-23T21:54:23.000Z
|
libotp/nametag/MarginPopup.py
|
AnonymousDeveloper65535/open-toontown
|
3d05c22a7d960ad843dde231140447c46973dba5
|
[
"BSD-3-Clause"
] | 1
|
2021-06-08T17:16:48.000Z
|
2021-06-08T17:16:48.000Z
|
libotp/nametag/MarginPopup.py
|
AnonymousDeveloper65535/open-toontown
|
3d05c22a7d960ad843dde231140447c46973dba5
|
[
"BSD-3-Clause"
] | 3
|
2021-06-03T05:36:36.000Z
|
2021-06-22T15:07:31.000Z
|
from panda3d.core import *
import NametagGlobals
class MarginPopup(PandaNode):
def __init__(self):
PandaNode.__init__(self, 'MarginPopup')
self.m_managed = False
self.m_visible = False
self.m_np = None
self.m_cell_width = 1.0
self.m_seq = NametagGlobals._margin_prop_seq
def getCellWidth(self):
return self.m_cell_width
def setManaged(self, value):
self.m_managed = value
if value:
self.m_np = NodePath.anyPath(self)
else:
self.m_np = None
def isManaged(self):
return self.m_managed
def setVisible(self, value):
self.m_visible = value
def isVisible(self):
return self.m_visible
def getScore(self):
return 0.0
def getObjectCode(self):
return 0
def considerVisible(self):
if self.m_seq != NametagGlobals._margin_prop_seq:
self.m_seq = NametagGlobals._margin_prop_seq
self.updateContents()
def updateContents(self):
pass
def frameCallback(self):
pass
| 21.134615
| 57
| 0.616924
| 131
| 1,099
| 4.923664
| 0.312977
| 0.108527
| 0.055814
| 0.102326
| 0.168992
| 0.168992
| 0.168992
| 0.114729
| 0
| 0
| 0
| 0.007853
| 0.304823
| 1,099
| 51
| 58
| 21.54902
| 0.836387
| 0
| 0
| 0.166667
| 0
| 0
| 0.010009
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.305556
| false
| 0.055556
| 0.055556
| 0.138889
| 0.527778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 4
|
989f890f3cd119ce24bac176059068606afe256e
| 29,932
|
py
|
Python
|
source/plotting/create_plots.py
|
ml-jku/OfflineRL
|
d407457aba144587ce58fc47f4e8ae6099356f03
|
[
"MIT"
] | 6
|
2021-11-30T09:41:54.000Z
|
2022-03-29T18:15:02.000Z
|
source/plotting/create_plots.py
|
kschweig/OfflineRL
|
d407457aba144587ce58fc47f4e8ae6099356f03
|
[
"MIT"
] | null | null | null |
source/plotting/create_plots.py
|
kschweig/OfflineRL
|
d407457aba144587ce58fc47f4e8ae6099356f03
|
[
"MIT"
] | 2
|
2021-11-04T16:47:59.000Z
|
2022-02-15T14:30:21.000Z
|
import os
import glob
import scipy
import pickle
import numpy as np
from source.offline_ds_evaluation.metrics_manager import MetricsManager
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from matplotlib.colors import Normalize
# Turn interactive plotting off
plt.ioff()
import seaborn as sns
sns.set()
matplotlib.rc('xtick', labelsize=10)
matplotlib.rc('ytick', labelsize=10)
folder = "main_figures_paper"
image_type = "pdf"
"""
folder = "main_figures"
image_type = "png"
"""
figsize = (12, 6)
figsize_legend = (12, 1)
figsize_half = (12, 3.5)
figsize_half_half = (9.25, 3.5)
figsize_small_avg = (9, 3.2)
figsize_small = (16, 3)
figsize_comp = (12, 6)
figsize_envs = (12, 7.2)
figsize_theplot = (13, 12)
figsize_thesmallplot = (9, 8)
# metric manager
experiments = ["ex1", "ex2", "ex3", "ex4", "ex5", "ex6"]
mm = MetricsManager(0)
useruns = 5
for ex in experiments:
for userun in range(1, 6):
paths = glob.glob(os.path.join("..", "..", "data", ex, f"metrics_*_run{userun}.pkl"))
for path in paths:
with open(path, "rb") as f:
m = pickle.load(f)
m.recode(userun)
mm.data.update(m.data)
# static stuff
envs = {'CartPole-v1': 0, 'MountainCar-v0': 1, "MiniGrid-LavaGapS7-v0": 2, "MiniGrid-Dynamic-Obstacles-8x8-v0": 3,
'Breakout-MinAtar-v0': 4, "SpaceInvaders-MinAtar-v0": 5}
algos = ["BC", "BVE", "MCE", "DQN", "QRDQN", "REM", "BCQ", "CQL", "CRR"]
buffer = {"random": "Random", "mixed": "Mixed", "er": "Replay", "noisy": "Noisy", "fully": "Expert"}
mc_actions = ["Acc. to the Left", "Don't accelerate", "Acc. to the Right"]
def plt_csv(ax, csv, algo, mode, ylims=None, set_title=True, color=None, set_label=True):
est = np.mean(csv, axis=1)
sd = np.std(csv, axis=1)
cis = (est - sd, est + sd)
ax.fill_between(np.arange(0, len(est) * 100, 100), cis[0], cis[1], alpha=0.2, color=color)
ax.plot(np.arange(0, len(est) * 100, 100), est, label=(algo if set_label else None), color=color)
ax.ticklabel_format(axis="x", style="sci", scilimits=(0, 0))
if set_title:
ax.set_title(buffer[mode])
if ylims != None:
ax.set_ylim(bottom=ylims[0], top=ylims[1])
## get data
indir = os.path.join("..", "..", "results", "csv_per_userun", "return")
files = []
for file in glob.iglob(os.path.join(indir, "**", "*.csv"), recursive=True):
files.append(file)
data = dict()
for file in files:
name = file.split("/")[-1]
userun = int(file.split("/")[-2][-1])
env = file.split("/")[-3]
algo = name.split("_")[-2]
mode = name.split("_")[-1].split(".")[0]
try:
csv = np.loadtxt(file, delimiter=";")
except:
print("Error in ", env, mode, algo)
if len(csv.shape) == 1:
csv = csv.reshape(-1, 1)
if not data.keys() or env not in data.keys():
data[env] = dict()
if not data[env].keys() or userun not in data[env].keys():
data[env][userun] = dict()
if not data[env][userun].keys() or mode not in data[env][userun].keys():
data[env][userun][mode] = dict()
data[env][userun][mode][algo] = csv
###############
# plot metrics for policies
###############
modes = list(buffer.keys())
outdir = os.path.join("..", "..", "results", folder, "metrics")
os.makedirs(outdir, exist_ok=True)
# titles
x_label = "Dataset"
# compact representation averaged over envs
f, axs = plt.subplots(1, 3, figsize=figsize_half, sharex=True)
for m, metric in enumerate([(0, 0), 2, (3, 0)]):
x_all = []
for mode in modes:
x = []
for e, env in enumerate(envs):
for userun in range(1, useruns + 1):
random_return = mm.get_data(env, "random", userun)[0][0]
online_usap = mm.get_data(env, "er", userun)[2]
if m == 1:
result = mm.get_data(env, mode, userun)[metric]
else:
result = mm.get_data(env, mode, userun)[metric[0]][metric[1]]
if m == 0:
csv = data[env][userun]["online"]["DQN"]
x.append((result - random_return) / (np.max(csv) - random_return))
elif m == 1:
x.append(result / online_usap)
else:
x.append(result)
x_all.append(x)
axs[m].boxplot(x_all, positions=range(len(modes)), zorder=20,
medianprops={"c": f"darkcyan", "linewidth": 1.5},
boxprops={"c": f"darkcyan", "linewidth": 1.5},
whiskerprops={"c": f"darkcyan", "linewidth": 1.5},
capprops={"c": f"darkcyan", "linewidth": 1.5},
flierprops={"markeredgecolor": f"darkcyan"})#, "markeredgewidth": 1.5})
if m == 0:
axs[m].set_ylabel("Relative Trajectory Quality")
axs[m].axhline(y=1, color="silver")
elif m == 1:
axs[m].set_ylabel("Relative State-Action Coverage")
axs[m].axhline(y=1, color="silver")
elif m == 2:
axs[m].set_ylabel("Entropy")
axs[m].set_ylim(bottom=-0.05, top=1.45)
axs[m].set_xticks([i for i in range(len(modes))])
axs[m].set_xticklabels([buffer[m] for m in modes],fontsize="small")#, rotation=15, rotation_mode="anchor")
axs[-1].set_ylim(bottom=-0.05, top=1.05)
f.tight_layout(rect=(0, 0.022, 1, 1))
f.text(0.52, 0.01, x_label, ha='center')
plt.savefig(os.path.join(outdir, f"overview_3_avg." + image_type))
plt.close()
# compact representation averaged over envs
f, axs = plt.subplots(1, 2, figsize=figsize_small_avg, sharex=True)
for m, metric in enumerate([(0, 0), 2]):
x_all = []
for mode in modes:
x = []
for e, env in enumerate(envs):
for userun in range(1, useruns + 1):
random_return = mm.get_data(env, "random", userun)[0][0]
online_usap = mm.get_data(env, "er", userun)[2]
if m == 1:
result = mm.get_data(env, mode, userun)[metric]
else:
result = mm.get_data(env, mode, userun)[metric[0]][metric[1]]
if m == 0:
csv = data[env][userun]["online"]["DQN"]
x.append((result - random_return) / (np.max(csv) - random_return))
elif m == 1:
x.append(result / online_usap)
x_all.append(x)
axs[m].boxplot(x_all, positions=range(len(modes)), zorder=20,
medianprops={"c": "darkcyan", "linewidth": 1.5},
boxprops={"c": "darkcyan", "linewidth": 1.5},
whiskerprops={"c": "darkcyan", "linewidth": 1.5},
capprops={"c": "darkcyan", "linewidth": 1.5},
flierprops={"markeredgecolor": "darkcyan"})#, "markeredgewidth": 1.5})
if m == 0:
axs[m].set_ylabel("Relative Trajectory Quality")
axs[m].axhline(y=1, color="silver")
elif m == 1:
axs[m].set_ylabel("Relative State-Action Coverage")
axs[m].axhline(y=1, color="silver")
axs[m].set_ylim(bottom=-0.05, top=1.45)
axs[m].set_xticks([i for i in range(len(modes))])
axs[m].set_xticklabels([buffer[m] for m in modes],fontsize="small")#, rotation=15, rotation_mode="anchor")
f.tight_layout(rect=(0, 0.022, 1, 1))
f.text(0.52, 0.01, x_label, ha='center')
plt.savefig(os.path.join(outdir, f"overview_2_avg." + image_type))
plt.close()
# compact representation
f, axs = plt.subplots(1, 3, figsize=figsize_half, sharex=True)
for m, metric in enumerate([(0, 0), 2, (3, 0)]):
for e, env in enumerate(envs):
x_all = []
for mode in modes:
x = []
for userun in range(1, useruns + 1):
random_return = mm.get_data(env, "random", userun)[0][0]
online_usap = mm.get_data(env, "er", userun)[2]
if m == 1:
result = mm.get_data(env, mode, userun)[metric]
else:
result = mm.get_data(env, mode, userun)[metric[0]][metric[1]]
if m == 0:
csv = data[env][userun]["online"]["DQN"]
x.append((result - random_return) / (np.max(csv) - random_return))
elif m == 1:
x.append(result / online_usap)
else:
x.append(result)
x_all.append(x)
pos = [0.2 + 0.12 * e + m_ for m_ in range(len(modes))]
axs[m].boxplot(x_all, positions=pos, widths=0.1, sym="", zorder=20,
medianprops={"c": f"C{e}", "linewidth": 1.5},
boxprops={"color": f"C{e}", "linewidth": 1.5},
whiskerprops={"color": f"C{e}", "linewidth": 1.5},
capprops={"color": f"C{e}", "linewidth": 1.5},
flierprops={"color": f"C{e}", "linewidth": 1.5})
if m == 0:
axs[m].set_ylabel("Relative Trajectory Quality")
axs[m].axhline(y=1, color="silver")
elif m == 1:
axs[m].set_ylabel("Relative State-Action Coverage")
axs[m].axhline(y=1, color="silver")
elif m == 2:
axs[m].set_ylabel("Entropy")
axs[m].set_ylim(bottom=-0.05, top=1.45)
axs[m].set_xticks([i for i in range(len(modes) + 1)])
names = [buffer[m] for m in modes]
names.append("")
axs[m].set_xticklabels(names,fontsize="small")#, rotation=15, rotation_mode="anchor")
offset = matplotlib.transforms.ScaledTranslation(0.29, 0, f.dpi_scale_trans)
for label in axs[m].xaxis.get_majorticklabels():
label.set_transform(label.get_transform() + offset)
axs[-1].set_ylim(bottom=-0.05, top=1.05)
labels = [mpatches.Patch(color=f"C{e_}", fill=False, linewidth=1.5, label="-".join(env_.split("-")[:-1])) for e_, env_ in enumerate(envs)]
f.legend(handles=labels, handlelength=1, loc="upper center", ncol=len(envs), fontsize="small")
f.tight_layout(rect=(0, 0.022, 1, 0.92))
f.text(0.52, 0.01, x_label, ha='center')
plt.savefig(os.path.join(outdir, f"overview_3." + image_type))
plt.close()
# compact representation
f, axs = plt.subplots(1, 2, figsize=figsize_half_half, sharex=True)
for m, metric in enumerate([(0, 0), 2]):
for e, env in enumerate(envs):
x_all = []
for mode in modes:
x = []
for userun in range(1, useruns + 1):
random_return = mm.get_data(env, "random", userun)[0][0]
online_usap = mm.get_data(env, "er", userun)[2]
if m == 1:
result = mm.get_data(env, mode, userun)[metric]
else:
result = mm.get_data(env, mode, userun)[metric[0]][metric[1]]
if m == 0:
csv = data[env][userun]["online"]["DQN"]
x.append((result - random_return) / (np.max(csv) - random_return))
elif m == 1:
x.append(result / online_usap)
else:
x.append(result)
x_all.append(x)
pos = [0.2 + 0.12 * e + m_ for m_ in range(len(modes))]
axs[m].boxplot(x_all, positions=pos, widths=0.1, sym="", zorder=20,
medianprops={"c": f"C{e}", "linewidth": 1.5},
boxprops={"color": f"C{e}", "linewidth": 1.5},
whiskerprops={"color": f"C{e}", "linewidth": 1.5},
capprops={"color": f"C{e}", "linewidth": 1.5},
flierprops={"color": f"C{e}", "linewidth": 1.5})
#axs[m].plot(range(len(x)), x, "-o", label = "-".join(env.split("-")[:-1]) if m == 0 else None, zorder=20)
if m == 0:
axs[m].set_ylabel("Relative Trajectory Quality")
axs[m].axhline(y=1, color="silver")
elif m == 1:
axs[m].set_ylabel("Relative State-Action Coverage")
axs[m].axhline(y=1, color="silver")
axs[m].set_ylim(bottom=-0.05, top=1.45)
axs[m].set_xticks([i for i in range(len(modes) + 1)])
names = [buffer[m] for m in modes]
names.append("")
axs[m].set_xticklabels(names, fontsize="small")#, rotation=15, rotation_mode="anchor")
offset = matplotlib.transforms.ScaledTranslation(0.33, 0, f.dpi_scale_trans)
for label in axs[m].xaxis.get_majorticklabels():
label.set_transform(label.get_transform() + offset)
labels = [mpatches.Patch(color=f"C{e_}", fill=False, linewidth=1.5, label="-".join(env_.split("-")[:-1])) for e_, env_ in enumerate(envs)]
f.legend(handles=labels, handlelength=1, loc="upper center", ncol=len(envs), fontsize="x-small")
f.tight_layout(rect=(0, 0.022, 1, 0.92))
f.text(0.52, 0.01, x_label, ha='center')
plt.savefig(os.path.join(outdir, f"overview_2." + image_type))
plt.close()
###############
# Main Results
###############
outdir = os.path.join("..", "..", "results", folder, "tq_vs_sac")
os.makedirs(outdir, exist_ok=True)
from matplotlib.colors import LinearSegmentedColormap
#c = ["seagreen", "darkcyan", ""]#["red", "tomato", "lightsalmon", "wheat", "palegreen", "limegreen", "green"]
#v = [i / (len(c) - 1) for i in range(len(c))]
#print(v)
#l = list(zip(v, c))
cmap = "viridis" #LinearSegmentedColormap.from_list('grnylw',l, N=256)
normalize = Normalize(vmin=0, vmax=120, clip=True)
offset_ann = 0.025
# titles
x_label = r"Relative $\bf{{State}{-}{Action} \; Coverage}$ of Dataset"
y_label = r"Relative $\bf{Trajectory \; Quality}$ of Dataset"
# plot for discussion
### algos not averaged
types = ["all", "noMinAtar", "MinAtar"]
for t, environments in enumerate([list(envs), list(envs)[:4], list(envs)[4:]]):
if t == 2:
f, axs = plt.subplots(2, 2, figsize=(figsize_thesmallplot[0], figsize_thesmallplot[1]), sharex=True, sharey=True)
axs = [item for sublist in zip(axs[:, 0], axs[:, 1]) for item in sublist]
algos_ = ["BC", "DQN", "BCQ", "CQL"]
else:
f, axs = plt.subplots(3, 3, figsize=(figsize_theplot[0], figsize_theplot[1]), sharex=True, sharey=True)
axs = [item for sublist in zip(axs[:, 0], axs[:, 1], axs[:, 2]) for item in sublist]
algos_ = algos
for a, algo in enumerate(algos_):
ax = axs[a]
ax.axhline(y=1, color="silver")
ax.axvline(x=1, color="silver")
ax.set_title(algo, fontsize="large")
x, y, performance = [], [], []
for e, env in enumerate(list(environments)):
for userun in range(1, useruns + 1):
online_return = np.max(data[env][userun]["online"]["DQN"])
random_return = mm.get_data(env, "random", userun)[0][0]
online_usap = mm.get_data(env, "er", userun)[2]
for m, mode in enumerate(modes):
try:
performance.append((np.max(np.mean(data[env][userun][mode][algo], axis=1)) - random_return) / (
online_return - random_return) * 100)
x.append(mm.get_data(env, mode, userun)[2] / online_usap)
y.append((mm.get_data(env, mode, userun)[0][0] - random_return) / (online_return - random_return))
except:
continue
ax.scatter(x, y, s = 70, c=performance, cmap=cmap, norm=normalize, zorder=10)
"""
for i in range(len(performance)):
ax.annotate(f"{int(performance[i])}%", (x[i] + offset_ann, y[i] + offset_ann), fontsize="x-small", zorder=20)
"""
if a == 0:
print("-" * 30)
print(types[t])
print("(TQ - SAC):", " ".join([f"{round(i, 3)}" for i in scipy.stats.pearsonr(x, y)]))
print("-" * 30)
print(algo, " (TQ - P):", " ".join([f"{round(i, 3)}" for i in scipy.stats.pearsonr(y, performance)]))
print(algo, " (SAC - P):", " ".join([f"{round(i, 3)}" for i in scipy.stats.pearsonr(x, performance)]))
print("-" * 30)
f.colorbar(matplotlib.cm.ScalarMappable(norm=normalize, cmap=cmap), ax=axs, anchor=(1.35, 0.55),
shrink=0.5 if t < 2 else 0.5).set_label(label="Performance in % of Online Policy", size=14)
f.tight_layout(rect=(0.022, 0.022, 0.92, 1))
f.text(0.5, 0.01, x_label, ha='center', fontsize="large")
f.text(0.005, 0.5, y_label, va='center', rotation='vertical', fontsize="large")
plt.savefig(os.path.join(outdir, f"algos_{types[t]}." + image_type))
plt.close()
### algos averaged
for t, environments in enumerate([list(envs), list(envs)[:4], list(envs)[4:]]):
if t == 2:
f, axs = plt.subplots(2, 2, figsize=(figsize_thesmallplot[0], figsize_thesmallplot[1]), sharex=True,
sharey=True)
axs = [item for sublist in zip(axs[:, 0], axs[:, 1]) for item in sublist]
algos_ = ["BC", "DQN", "BCQ", "CQL"]
else:
f, axs = plt.subplots(3, 3, figsize=(figsize_theplot[0], figsize_theplot[1]), sharex=True, sharey=True)
axs = [item for sublist in zip(axs[:, 0], axs[:, 1], axs[:, 2]) for item in sublist]
algos_ = algos
for a, algo in enumerate(algos_):
ax = axs[a]
ax.axhline(y=1, color="silver")
ax.axvline(x=1, color="silver")
ax.set_title(algo, fontsize="large")
x_, y_, performance_ = [], [], []
for e, env in enumerate(list(environments)):
x, y, performance = [], [], []
for userun in range(1, useruns + 1):
online_return = np.max(data[env][userun]["online"]["DQN"])
random_return = mm.get_data(env, "random", userun)[0][0]
online_usap = mm.get_data(env, "er", userun)[2]
for m, mode in enumerate(modes):
try:
performance.append((np.max(np.mean(data[env][userun][mode][algo], axis=1)) - random_return) / (
online_return - random_return) * 100)
x.append(mm.get_data(env, mode, userun)[2] / online_usap)
y.append((mm.get_data(env, mode, userun)[0][0] - random_return) / (online_return - random_return))
except:
continue
x_.extend(np.mean(np.asarray(x).reshape(useruns, -1), axis=0).tolist())
y_.extend(np.mean(np.asarray(y).reshape(useruns, -1), axis=0).tolist())
performance_.extend(np.mean(np.asarray(performance).reshape(useruns, -1), axis=0).tolist())
ax.scatter(x_, y_, s = 140, c=performance_, cmap=cmap, norm=normalize, zorder=10)
"""
for i in range(len(performance_)):
ax.annotate(f"{int(performance_[i])}%", (x_[i] + offset_ann, y_[i] + offset_ann), fontsize="x-small", zorder=20)
"""
if a == 0:
print("-" * 30)
print(types[t])
print("(TQ - SAC):", " ".join([f"{round(i, 3)}" for i in scipy.stats.pearsonr(x_, y_)]))
print("-" * 30)
print(algo, " (TQ - P):", " ".join([f"{round(i, 3)}" for i in scipy.stats.pearsonr(y_, performance_)]))
print(algo, " (SAC - P):", " ".join([f"{round(i, 3)}" for i in scipy.stats.pearsonr(x_, performance_)]))
print("-" * 30)
f.colorbar(matplotlib.cm.ScalarMappable(norm=normalize, cmap=cmap), ax=axs, anchor=(1.35, 0.55),
shrink=0.5 if t < 2 else 0.5).set_label(label="Performance in % of Online Policy", size=14)
f.tight_layout(rect=(0.022, 0.022, 0.92, 1))
f.text(0.5, 0.01, x_label, ha='center', fontsize="large")
f.text(0.005, 0.5, y_label, va='center', rotation='vertical', fontsize="large")
plt.savefig(os.path.join(outdir, f"algos_avg_{types[t]}." + image_type))
plt.close()
### for environments
for method in ["Mean", "Maximum", "Minimum", "Median", "Mean + STD", "Mean - STD"]:
f, axs = plt.subplots(2, 3, figsize=figsize_envs, sharex=True, sharey=True)
axs = [item for sublist in zip(axs[0], axs[1]) for item in sublist]
for e, env in enumerate(envs):
ax = axs[e]
ax.axhline(y=1, color="silver")
ax.axvline(x=1, color="silver")
for userun in range(1, useruns + 1):
online_return = np.max(data[env][userun]["online"]["DQN"])
random_return = mm.get_data(env, "random", userun)[0][0]
online_usap = mm.get_data(env, "er", userun)[2]
x, y, performance = [], [], []
for m, mode in enumerate(modes):
x.append(mm.get_data(env, mode, userun)[2] / online_usap)
y.append((mm.get_data(env, mode, userun)[0][0] - random_return) / (online_return - random_return))
p = []
for algo in algos:
ax.set_title("-".join(env.split("-")[:-1]), fontsize="large")
try:
p.append((np.max(np.mean(data[env][userun][mode][algo], axis=1)) - random_return) /
(online_return - random_return) * 100)
except:
pass
performance.append(p)
if method == "Mean":
performance = np.mean(np.asarray(performance), axis=1)
elif method == "Maximum":
performance = np.max(np.asarray(performance), axis=1)
elif method == "Minimum":
performance = np.min(np.asarray(performance), axis=1)
elif method == "Mean + STD":
performance = np.mean(np.asarray(performance), axis=1) + np.std(np.asarray(performance), axis=1)
elif method == "Mean - STD":
performance = np.mean(np.asarray(performance), axis=1) - np.std(np.asarray(performance), axis=1)
elif method == "Median":
performance = np.median(np.asarray(performance), axis=1)
ax.scatter(x, y, s=100, c=performance, cmap=cmap, norm=normalize, zorder=10)
"""
for i in range(len(performance)):
ax.annotate(f"{int(performance[i])}%", (x[i] + offset_ann, y[i] + offset_ann), fontsize="x-small", va="bottom", ha="left",zorder=20)
ax.annotate(annotations[i], (x[i] - offset_ann, y[i] + offset_ann), fontsize="x-small", va="bottom", ha="right", zorder=30)
"""
f.colorbar(matplotlib.cm.ScalarMappable(norm=normalize, cmap=cmap), ax=axs, anchor=(1.35, 0.55),
shrink=0.5 if t < 2 else 0.5).set_label(label="Performance in % of Online Policy", size=14)
f.tight_layout(rect=(0.022, 0.022, 0.92, 0.96))
f.text(0.5, 0.96, f"{method} performance across algorithms", ha='center', fontsize="x-large")
f.text(0.5, 0.01, x_label, ha='center', fontsize="large")
f.text(0.005, 0.5, y_label, va='center', rotation='vertical', fontsize="large")
plt.savefig(os.path.join(outdir, f"envs_{method}." + image_type))
plt.close()
#### for Environments average
for method in ["Mean", "Maximum", "Minimum", "Median", "Mean + STD", "Mean - STD"]:
f, axs = plt.subplots(2, 3, figsize=figsize_envs, sharex=True, sharey=True)
axs = [item for sublist in zip(axs[0], axs[1]) for item in sublist]
for e, env in enumerate(envs):
ax = axs[e]
ax.axhline(y=1, color="silver")
ax.axvline(x=1, color="silver")
for userun in range(1, useruns + 1):
online_return = np.max(data[env][userun]["online"]["DQN"])
random_return = mm.get_data(env, "random", userun)[0][0]
online_usap = mm.get_data(env, "er", userun)[2]
x, y, performance = [], [], []
for m, mode in enumerate(modes):
x.append(mm.get_data(env, mode, userun)[2] / online_usap)
y.append((mm.get_data(env, mode, userun)[0][0] - random_return) / (online_return - random_return))
p = []
for algo in algos:
ax.set_title("-".join(env.split("-")[:-1]), fontsize="large")
try:
p.append((np.max(np.mean(data[env][userun][mode][algo], axis=1)) - random_return) /
(online_return - random_return) * 100)
except:
pass
performance.append(p)
if method == "Mean":
performance = np.mean(np.asarray(performance), axis=1)
elif method == "Maximum":
performance = np.max(np.asarray(performance), axis=1)
elif method == "Minimum":
performance = np.min(np.asarray(performance), axis=1)
elif method == "Mean + STD":
performance = np.mean(np.asarray(performance), axis=1) + np.std(np.asarray(performance), axis=1)
elif method == "Mean - STD":
performance = np.mean(np.asarray(performance), axis=1) - np.std(np.asarray(performance), axis=1)
elif method == "Median":
performance = np.median(np.asarray(performance), axis=1)
ax.scatter(x, y, s=100, c=performance, cmap=cmap, norm=normalize, zorder=10)
"""
for i in range(len(performance)):
ax.annotate(f"{int(performance[i])}%", (x[i] + offset_ann, y[i] + offset_ann), fontsize="x-small", va="bottom", ha="left",zorder=20)
ax.annotate(annotations[i], (x[i] - offset_ann, y[i] + offset_ann), fontsize="x-small", va="bottom", ha="right", zorder=30)
"""
f.colorbar(matplotlib.cm.ScalarMappable(norm=normalize, cmap=cmap), ax=axs, anchor=(1.35, 0.55),
shrink=0.5 if t < 2 else 0.5).set_label(label="Performance in % of Online Policy", size=14)
f.tight_layout(rect=(0.022, 0.022, 0.92, 0.96))
f.text(0.5, 0.96, f"{method} performance across algorithms", ha='center', fontsize="x-large")
f.text(0.5, 0.01, x_label, ha='center', fontsize="large")
f.text(0.005, 0.5, y_label, va='center', rotation='vertical', fontsize="large")
plt.savefig(os.path.join(outdir, f"envs_avg_{method}." + image_type))
plt.close()
#############################
# Comparisons #
#############################
##################
# load reward data
##################
outdir = os.path.join("..", "..", "results", folder, "comp_return")
os.makedirs(outdir, exist_ok=True)
###############
# plot metrics + policy for reward
###############
# titles
y_label = "Maximum Average Return"
x_label = "Dataset"
### buffertypes per userun
for userun in range(1, useruns + 1):
# plot for modes
f, axs = plt.subplots(2, 3, figsize=figsize_comp, sharex=True)
axs = [item for sublist in zip(axs[0], axs[1]) for item in sublist]
for e, env in enumerate(envs):
ax = axs[e]
ax.set_title(env[:-3])
x, y = list(range(len(buffer))), []
for mode in modes:
y.append(mm.get_data(env, mode, userun)[0][0])
x, y = [list(tuple) for tuple in zip(*sorted(zip(x, y)))]
ax.plot(x, y, "o:", label=("Behav." if e == 0 else None), color="black")
# Online Policy
csv = data[env][userun]["online"]["DQN"]
ax.axhline(y=np.max(csv), color="black", label=("Online" if e == 0 else None))
for a, algo in enumerate(algos):
x, y, sd = [], [], []
for m, mode in enumerate(modes):
try:
y.append(np.mean(data[env][userun][mode][algo]))
sd.append(np.std(data[env][userun][mode][algo]))
x.append(m)
except:
# print(env, userun, mode, algo)
pass
if len(x) == 0 or len(y) == 0 or len(sd) == 0:
continue
x, y, sd = [list(tuple) for tuple in zip(*sorted(zip(x, y, sd)))]
cis = (np.asarray(y) - np.asarray(sd), np.asarray(y) + np.asarray(sd))
ax.fill_between(x, cis[0], cis[1], alpha=0.2, color=f"C{a}")
ax.plot(x, y, "o-", label=(algo if e == 0 else None), color=f"C{a}")
x = []
for m, mode in enumerate(modes):
x.append(m)
ax.set_xticks(range(len(modes)))
ax.set_xticklabels([buffer[m] for m in modes], fontsize="small")#, rotation=15, rotation_mode="anchor")
f.legend(loc="upper center", ncol=len(algos) + 2, fontsize="small")
f.tight_layout(rect=(0.008, 0.022, 1, 0.95))
f.text(0.52, 0.01, x_label, ha='center', fontsize="large")
f.text(0.005, 0.5, y_label, va='center', rotation='vertical', fontsize="large")
plt.savefig(os.path.join(outdir, f"buffertypes_userun{userun}." + image_type))
plt.close()
### MountainCar
outdir = os.path.join("..", "..", "results", folder, "mountainCar")
os.makedirs(outdir, exist_ok=True)
colors=["#39568CFF", "#ffcf20FF", "#29AF7FFF"]
samples = 10000
np.random.seed(42)
ind = np.random.choice(10**5, (samples, ), replace=False)
f, axs = plt.subplots(5, 5, figsize=figsize_theplot, sharex=True, sharey=True)
axs = [item for sublist in zip(axs[0], axs[1], axs[2], axs[3], axs[4]) for item in sublist]
for m, bt in enumerate(buffer):
for userun in range(1, useruns + 1):
ax = axs[m*5 + userun - 1]
# load saved buffer
with open(os.path.join("..", "..", "data", f"ex2", f"MountainCar-v0_run{userun}_{bt}.pkl"), "rb") as file:
data = pickle.load(file)
if userun == 1:
ax.set_title(buffer[bt])
ax.scatter(data.state[ind, 0], data.state[ind, 1], c=[colors[a] for a in data.action[ind, 0]], s=0.5)
ax.text(0.02, 0.92, f"Run {userun}", fontsize="small", transform=ax.transAxes)
f.tight_layout(rect=(0.022, 0.022, 1, 0.97))
labels = [mpatches.Patch(color=colors[a], fill=True, linewidth=1, label=mc_actions[a]) for a in range(3)]
f.legend(handles=labels, handlelength=1, loc="upper right", ncol=3, fontsize="small")
f.text(0.53, 0.98, "Dataset", ha='center', fontsize="large")
f.text(0.53, 0.01, "Position in m", ha='center', fontsize="large")
f.text(0.005, 0.5, "Velocity in m/s", va='center', rotation='vertical', fontsize="large")
plt.savefig(os.path.join(outdir, f"mountaincar.png"))
plt.close()
| 40.394062
| 144
| 0.5587
| 4,221
| 29,932
| 3.885572
| 0.094764
| 0.023901
| 0.018109
| 0.024145
| 0.788976
| 0.769831
| 0.720078
| 0.706298
| 0.694287
| 0.694287
| 0
| 0.037516
| 0.256415
| 29,932
| 740
| 145
| 40.448649
| 0.699375
| 0.037452
| 0
| 0.672414
| 0
| 0
| 0.099847
| 0.007605
| 0
| 0
| 0
| 0
| 0
| 1
| 0.001916
| false
| 0.005747
| 0.022989
| 0
| 0.024904
| 0.028736
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
98b98fad99a549679ab71ed1e6fe0f75606accc6
| 249
|
py
|
Python
|
fastapi_admin/apps/wxPay/tools.py
|
Chise1/fastapi-admin
|
74693cf8dd854d61ae5bd931ebe85f5b94f48121
|
[
"Apache-2.0"
] | null | null | null |
fastapi_admin/apps/wxPay/tools.py
|
Chise1/fastapi-admin
|
74693cf8dd854d61ae5bd931ebe85f5b94f48121
|
[
"Apache-2.0"
] | null | null | null |
fastapi_admin/apps/wxPay/tools.py
|
Chise1/fastapi-admin
|
74693cf8dd854d61ae5bd931ebe85f5b94f48121
|
[
"Apache-2.0"
] | null | null | null |
# -*- encoding: utf-8 -*-
"""
@File : tools.py
@Time : 2020/4/11 10:43
@Author : chise
@Email : chise123@live.com
@Software: PyCharm
@info :一些处理数据的工具函数
"""
from typing import Dict
def dict2xml(d:Dict[str,str]):
"""字典转xml"""
pass
| 17.785714
| 30
| 0.606426
| 34
| 249
| 4.441176
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081218
| 0.208835
| 249
| 14
| 31
| 17.785714
| 0.685279
| 0.662651
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 4
|
7f28f13048650c2961d83d7539ae3569c83af87b
| 1,295
|
py
|
Python
|
test.py
|
SriMethan/python-tictactoe
|
40dcd64c7a42c1e4a94eb40d54e985f7165f46c2
|
[
"MIT"
] | null | null | null |
test.py
|
SriMethan/python-tictactoe
|
40dcd64c7a42c1e4a94eb40d54e985f7165f46c2
|
[
"MIT"
] | null | null | null |
test.py
|
SriMethan/python-tictactoe
|
40dcd64c7a42c1e4a94eb40d54e985f7165f46c2
|
[
"MIT"
] | null | null | null |
from tictactoe import Board
def draw_board():
board = Board()
board.push((0, 0))
board.push((0, 1))
board.push((0, 2))
board.push((1, 1))
board.push((1, 0))
board.push((2, 0))
board.push((1, 2))
board.push((2, 2))
board.push((2, 1))
return board
def x_win_board():
board = Board()
board.push((0, 0))
board.push((0, 1))
board.push((0, 2))
board.push((1, 1))
board.push((1, 0))
board.push((1, 2))
board.push((2, 0))
return board
def o_win_board():
board = Board()
board.push((0, 0))
board.push((0, 1))
board.push((0, 2))
board.push((1, 1))
board.push((1, 0))
board.push((2, 0))
board.push((1, 2))
board.push((2, 1))
return board
def unfinished_board():
board = Board()
board.push((0, 0))
board.push((0, 1))
board.push((0, 2))
board.push((1, 1))
board.push((1, 0))
board.push((2, 0))
board.push((1, 2))
board.push((2, 2))
return board
def test_result():
assert x_win_board().result() == 1
assert o_win_board().result() == 2
assert draw_board().result() == 0
assert unfinished_board().result() is None
if __name__ == "__main__":
test_result()
| 19.923077
| 47
| 0.51583
| 193
| 1,295
| 3.34715
| 0.119171
| 0.44582
| 0.185759
| 0.085139
| 0.688854
| 0.688854
| 0.688854
| 0.688854
| 0.608359
| 0.608359
| 0
| 0.072826
| 0.289575
| 1,295
| 64
| 48
| 20.234375
| 0.629348
| 0
| 0
| 0.769231
| 0
| 0
| 0.006499
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 1
| 0.096154
| false
| 0
| 0.019231
| 0
| 0.192308
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
7f29a37ba7ca9d8b45eadd4c85b6f59cdd98a5b1
| 671
|
py
|
Python
|
theorems/admin.py
|
austindjones/mathreview
|
34cd22103d5880bb283e695d2114bb6ddf4c2219
|
[
"MIT"
] | 1
|
2021-02-25T20:57:50.000Z
|
2021-02-25T20:57:50.000Z
|
theorems/admin.py
|
austindjones/mathreview
|
34cd22103d5880bb283e695d2114bb6ddf4c2219
|
[
"MIT"
] | null | null | null |
theorems/admin.py
|
austindjones/mathreview
|
34cd22103d5880bb283e695d2114bb6ddf4c2219
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
# Register your models here.
from .models import Subject
from .models import Theorem
from .models import Theorem_Statement
from .models import Theorem_Proof
from .models import Definition
from .models import Proof_Definition_Link
from .models import Question
from .models import Keyword
from .models import Theorem_Keyword_Link
admin.site.register(Subject)
admin.site.register(Theorem)
admin.site.register(Theorem_Statement)
admin.site.register(Theorem_Proof)
admin.site.register(Definition)
admin.site.register(Proof_Definition_Link)
admin.site.register(Question)
admin.site.register(Keyword)
admin.site.register(Theorem_Keyword_Link)
| 29.173913
| 42
| 0.845007
| 93
| 671
| 5.967742
| 0.193548
| 0.162162
| 0.259459
| 0.165766
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083458
| 671
| 22
| 43
| 30.5
| 0.902439
| 0.038748
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.526316
| 0
| 0.526316
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
7f3ae844e2e9d48df0c926798ecbbafdf0478afc
| 803
|
py
|
Python
|
ivreg/services.py
|
sirex/internet-voting-registration
|
b60915507966ec150db36ef63782971d0d5e1a2b
|
[
"MIT"
] | null | null | null |
ivreg/services.py
|
sirex/internet-voting-registration
|
b60915507966ec150db36ef63782971d0d5e1a2b
|
[
"MIT"
] | null | null | null |
ivreg/services.py
|
sirex/internet-voting-registration
|
b60915507966ec150db36ef63782971d0d5e1a2b
|
[
"MIT"
] | null | null | null |
import os
import uuid
import json
import hashlib
import base64
import requests
def generate_request_id():
return base64.b32encode(uuid.uuid4().bytes).decode('ascii').rstrip('=')
def generate_candidate_codes(candidates):
return {x: base64.b32encode(os.urandom(5)).decode('ascii')[:5] for x in candidates}
def generate_ballot_id():
return base64.b32encode(uuid.uuid4().bytes).decode('ascii')[:10]
def verify_vote(data):
vote_hash = hashlib.sha256((data['ballot_id'] + data['candidate_id'] + data['vcode']).encode('utf-8')).hexdigest()
for line in requests.get('http://log.rk.sub.lt/').text.splitlines():
ballot = json.loads(line.strip())
if ballot['ballot_id'] == data['ballot_id'] and ballot['vote_hash'] == vote_hash:
return True
return False
| 28.678571
| 118
| 0.689913
| 111
| 803
| 4.864865
| 0.477477
| 0.059259
| 0.051852
| 0.085185
| 0.177778
| 0.177778
| 0.177778
| 0.177778
| 0.177778
| 0
| 0
| 0.034985
| 0.145704
| 803
| 27
| 119
| 29.740741
| 0.752187
| 0
| 0
| 0
| 1
| 0
| 0.118306
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.210526
| false
| 0
| 0.315789
| 0.157895
| 0.789474
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
|
0
| 4
|
7f6db1e9953197c6e430f0417d4e9234193cc958
| 136
|
py
|
Python
|
slackpicam.py
|
ricklon/slackpicam
|
dc4b3f74cc4f22a9f9d642878e5649bc1ee86ef9
|
[
"Apache-2.0"
] | 1
|
2019-07-12T15:36:06.000Z
|
2019-07-12T15:36:06.000Z
|
slackpicam.py
|
ricklon/slackpicam
|
dc4b3f74cc4f22a9f9d642878e5649bc1ee86ef9
|
[
"Apache-2.0"
] | null | null | null |
slackpicam.py
|
ricklon/slackpicam
|
dc4b3f74cc4f22a9f9d642878e5649bc1ee86ef9
|
[
"Apache-2.0"
] | null | null | null |
from time import sleep
from picamera import PiCamera
camera = PiCamera()
camera.resolution = (1024, 768)
camera.capture('foobar.jpg')
| 17
| 31
| 0.764706
| 18
| 136
| 5.777778
| 0.666667
| 0.269231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.059322
| 0.132353
| 136
| 7
| 32
| 19.428571
| 0.822034
| 0
| 0
| 0
| 0
| 0
| 0.073529
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
7f735174c1fbd8a50460c5cb2d32e73a2ceb1276
| 74
|
py
|
Python
|
smrf/envphys/__init__.py
|
scotthavens/smrf
|
a492d01a5eef994e00728c1cbed9f693879bbade
|
[
"CC0-1.0"
] | null | null | null |
smrf/envphys/__init__.py
|
scotthavens/smrf
|
a492d01a5eef994e00728c1cbed9f693879bbade
|
[
"CC0-1.0"
] | null | null | null |
smrf/envphys/__init__.py
|
scotthavens/smrf
|
a492d01a5eef994e00728c1cbed9f693879bbade
|
[
"CC0-1.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from . import core, phys, radiation, snow, storms
| 24.666667
| 49
| 0.635135
| 10
| 74
| 4.7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016393
| 0.175676
| 74
| 2
| 50
| 37
| 0.754098
| 0.283784
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
f6887a1bc28ae32c0dd52cdd49a32ee38e668ed8
| 218
|
py
|
Python
|
helpcenter/tests/dummy_classes.py
|
smalls12/django_helpcenter
|
e0118447871abad701056bc137d63e6fcd8abde6
|
[
"MIT"
] | 4
|
2017-07-30T17:43:36.000Z
|
2021-09-14T04:26:37.000Z
|
helpcenter/tests/dummy_classes.py
|
cdriehuys/django_helpcenter
|
e0118447871abad701056bc137d63e6fcd8abde6
|
[
"MIT"
] | 20
|
2016-06-30T04:52:26.000Z
|
2016-09-30T04:52:15.000Z
|
helpcenter/tests/dummy_classes.py
|
cdriehuys/django_helpcenter
|
e0118447871abad701056bc137d63e6fcd8abde6
|
[
"MIT"
] | null | null | null |
from django import forms
class BlankForm(forms.Form):
"""A blank form for testing purposes."""
def __init__(self, *args, **kwargs):
"""Consume arguments."""
super(BlankForm, self).__init__()
| 21.8
| 44
| 0.637615
| 25
| 218
| 5.24
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.215596
| 218
| 9
| 45
| 24.222222
| 0.766082
| 0.243119
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
f68a9fd9a2a3b0664886738789562dcab483d747
| 166
|
py
|
Python
|
materi/pertemuan_5/python/login.py
|
ai-qadrlabs/dasar-pemrograman
|
73dbc87ef33159542be89f12f89b2873a06a4e3a
|
[
"MIT"
] | null | null | null |
materi/pertemuan_5/python/login.py
|
ai-qadrlabs/dasar-pemrograman
|
73dbc87ef33159542be89f12f89b2873a06a4e3a
|
[
"MIT"
] | null | null | null |
materi/pertemuan_5/python/login.py
|
ai-qadrlabs/dasar-pemrograman
|
73dbc87ef33159542be89f12f89b2873a06a4e3a
|
[
"MIT"
] | null | null | null |
username = 'admin'
password = 'garahasia'
if (username == 'admin') and (password == 'rahasia'):
print('user berhasil login')
else:
print('user gagal login')
| 20.75
| 53
| 0.650602
| 19
| 166
| 5.684211
| 0.684211
| 0.240741
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.180723
| 166
| 7
| 54
| 23.714286
| 0.794118
| 0
| 0
| 0
| 0
| 0
| 0.36747
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.333333
| 0
| 0
| 0
| 0.333333
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 4
|
f6914ec37499c1b6f5fdf1173c659e29406e4aee
| 60
|
py
|
Python
|
pc_processor/models/__init__.py
|
MasterHow/PanoLiSeg
|
56bd09fe3c85251c46532dba5fcec5fb03951c36
|
[
"MIT"
] | 65
|
2021-08-03T02:37:14.000Z
|
2022-03-28T17:11:23.000Z
|
pc_processor/models/__init__.py
|
MasterHow/PanoLiSeg
|
56bd09fe3c85251c46532dba5fcec5fb03951c36
|
[
"MIT"
] | 12
|
2021-10-30T03:11:00.000Z
|
2022-03-27T11:36:11.000Z
|
pc_processor/models/__init__.py
|
MasterHow/PanoLiSeg
|
56bd09fe3c85251c46532dba5fcec5fb03951c36
|
[
"MIT"
] | 23
|
2021-10-14T02:44:34.000Z
|
2022-03-18T11:45:23.000Z
|
from .salsanext import SalsaNext
from .pmf_net import PMFNet
| 30
| 32
| 0.85
| 9
| 60
| 5.555556
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116667
| 60
| 2
| 33
| 30
| 0.943396
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
f6d69e0e4ee62c1f2a3f87de58751f58ea4ec399
| 87
|
py
|
Python
|
knowledge_sim/ontology/__init__.py
|
t3pleni9/KnowledgeSimulator
|
2c3df1e49372176daa20bbda2bf53910358373a0
|
[
"Apache-2.0"
] | 1
|
2021-06-18T02:22:54.000Z
|
2021-06-18T02:22:54.000Z
|
knowledge_sim/ontology/__init__.py
|
t3pleni9/KnowledgeSimulator
|
2c3df1e49372176daa20bbda2bf53910358373a0
|
[
"Apache-2.0"
] | null | null | null |
knowledge_sim/ontology/__init__.py
|
t3pleni9/KnowledgeSimulator
|
2c3df1e49372176daa20bbda2bf53910358373a0
|
[
"Apache-2.0"
] | null | null | null |
from .reasoner import Reasoner
from .behavior import Behavior
from .state import State
| 21.75
| 30
| 0.827586
| 12
| 87
| 6
| 0.416667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 87
| 3
| 31
| 29
| 0.96
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
f6f0419aeaa7066172b9955e6da6dc1b5331e9ec
| 2,246
|
py
|
Python
|
tests/structure/test_singles.py
|
nakaken88/NKSSG
|
8f635bef3c466afd0842178e9a9a3501c3f39119
|
[
"MIT"
] | 2
|
2021-04-28T11:52:08.000Z
|
2021-11-16T11:32:47.000Z
|
tests/structure/test_singles.py
|
nakaken88/NKSSG
|
8f635bef3c466afd0842178e9a9a3501c3f39119
|
[
"MIT"
] | null | null | null |
tests/structure/test_singles.py
|
nakaken88/NKSSG
|
8f635bef3c466afd0842178e9a9a3501c3f39119
|
[
"MIT"
] | null | null | null |
import datetime
from pathlib import Path
from nkssg.structure.singles import Singles, Single
def test_get_url_from_permalink_no_change():
single = Single('', '')
single.date = datetime.datetime.now()
ret = single.get_url_from_permalink('/sample/', None)
assert ret == '/sample/'
def test_get_url_from_permalink_YMD():
single = Single('', '')
now = datetime.datetime.now()
single.date = datetime.datetime.now()
ret = single.get_url_from_permalink('/%Y/%m/%d/', None)
assert ret == now.strftime('/%Y/%m/%d/')
def test_get_url_from_permalink_YMD_HMS():
single = Single('', '')
now = datetime.datetime.now()
single.date = datetime.datetime.now()
ret = single.get_url_from_permalink('/%Y/%m/%d/%H%M%S/', None)
assert ret == now.strftime('/%Y/%m/%d/%H%M%S/')
def test_get_url_from_permalink_slug():
single = Single('', '')
single.date = datetime.datetime.now()
single.slug = 'sample'
ret = single.get_url_from_permalink('/{slug}/', None)
assert ret == '/sample/'
def test_get_url_from_permalink_filename():
single = Single('', '')
single.date = datetime.datetime.now()
single.filename = 'sample'
ret = single.get_url_from_permalink('/{filename}/', None)
assert ret == '/sample/'
def test_get_url_from_permalink_filename_dirty_name():
single = Single('', '')
single.date = datetime.datetime.now()
single.filename = 'A of C'
ret = single.get_url_from_permalink('/{filename}/', None)
assert ret == '/a-of-c/'
def test_get_url_from_permalink_filename_index():
single = Single('', '')
single.date = datetime.datetime.now()
single.filename = 'index'
single.src_dir = Path('post_type', 'dir1', 'dir2')
ret = single.get_url_from_permalink('/{filename}/', None)
assert ret == '/dir2/'
def test_get_url_from_permalink_filename_top_index():
single = Single('', '')
single.date = datetime.datetime.now()
single.filename = 'index'
single.post_type = 'sample_post_type'
single.src_dir = Path(single.post_type)
config = {'post_type': [{'sample_post_type': {'slug': 'new_post_type'}}]}
ret = single.get_url_from_permalink('/{filename}/', config)
assert ret == '/new_post_type/'
| 30.767123
| 77
| 0.664737
| 298
| 2,246
| 4.724832
| 0.157718
| 0.068182
| 0.113636
| 0.215909
| 0.838778
| 0.801847
| 0.760653
| 0.627131
| 0.555398
| 0.477273
| 0
| 0.001605
| 0.167854
| 2,246
| 72
| 78
| 31.194444
| 0.751739
| 0
| 0
| 0.481481
| 0
| 0
| 0.122049
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 1
| 0.148148
| false
| 0
| 0.055556
| 0
| 0.203704
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
100a4046af88fc4895dc10d3d1832d4b693b3e9c
| 253
|
py
|
Python
|
tests/mockups/firstDiscoveryRequest.py
|
securesonic/safekiddo-mdm
|
05ebd6ba0d01c7b1aa85b473c764b870c0b8e182
|
[
"BSD-2-Clause"
] | null | null | null |
tests/mockups/firstDiscoveryRequest.py
|
securesonic/safekiddo-mdm
|
05ebd6ba0d01c7b1aa85b473c764b870c0b8e182
|
[
"BSD-2-Clause"
] | null | null | null |
tests/mockups/firstDiscoveryRequest.py
|
securesonic/safekiddo-mdm
|
05ebd6ba0d01c7b1aa85b473c764b870c0b8e182
|
[
"BSD-2-Clause"
] | null | null | null |
import urllib2
import mockupsLib
opener = urllib2.build_opener()
request = urllib2.Request(mockupsLib.getDiscoveryRequestUrl()+"/EnrollmentServer/Discovery.svc", headers={'Content-Type': 'unknown'})
response = opener.open(request)
print response.info()
| 36.142857
| 133
| 0.798419
| 27
| 253
| 7.444444
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012712
| 0.067194
| 253
| 7
| 134
| 36.142857
| 0.838983
| 0
| 0
| 0
| 0
| 0
| 0.19685
| 0.122047
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.333333
| null | null | 0.166667
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
100dec975c9ad9898f05a884e1f3edcd0a4bc7cd
| 28
|
py
|
Python
|
sentry/__init__.py
|
justquick/django-sentry
|
07988759144524ba49bc63b308663244d1a69d04
|
[
"BSD-3-Clause"
] | 1
|
2016-03-21T18:56:31.000Z
|
2016-03-21T18:56:31.000Z
|
sentry/__init__.py
|
justquick/django-sentry
|
07988759144524ba49bc63b308663244d1a69d04
|
[
"BSD-3-Clause"
] | null | null | null |
sentry/__init__.py
|
justquick/django-sentry
|
07988759144524ba49bc63b308663244d1a69d04
|
[
"BSD-3-Clause"
] | null | null | null |
__version__ = (1, 0, '-dev')
| 28
| 28
| 0.571429
| 4
| 28
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 0.142857
| 28
| 1
| 28
| 28
| 0.416667
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
101274f29925300afaefe843031ffe5b06b383a8
| 221
|
py
|
Python
|
yawhois/parser/durban_whois_registry_net_za.py
|
huyphan/pyyawhois
|
77fb2f73a9c67989f1d41d98f37037406a69d136
|
[
"MIT"
] | null | null | null |
yawhois/parser/durban_whois_registry_net_za.py
|
huyphan/pyyawhois
|
77fb2f73a9c67989f1d41d98f37037406a69d136
|
[
"MIT"
] | null | null | null |
yawhois/parser/durban_whois_registry_net_za.py
|
huyphan/pyyawhois
|
77fb2f73a9c67989f1d41d98f37037406a69d136
|
[
"MIT"
] | null | null | null |
from .za_central_registry import ZaCentralRegistryParser
class DurbanWhoisRegistryNetZaParser(ZaCentralRegistryParser):
def __init__(self, *args):
super(DurbanWhoisRegistryNetZaParser, self).__init__(*args)
| 31.571429
| 67
| 0.81448
| 18
| 221
| 9.444444
| 0.722222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113122
| 221
| 6
| 68
| 36.833333
| 0.867347
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
122742f1e3fd8eddf7df483de6428e0f81b55333
| 92
|
py
|
Python
|
test/single_layer.py
|
gregvw/pyQAOA
|
59b5abda36d90b45913878e7ffb588a1c146bc38
|
[
"BSD-3-Clause"
] | null | null | null |
test/single_layer.py
|
gregvw/pyQAOA
|
59b5abda36d90b45913878e7ffb588a1c146bc38
|
[
"BSD-3-Clause"
] | null | null | null |
test/single_layer.py
|
gregvw/pyQAOA
|
59b5abda36d90b45913878e7ffb588a1c146bc38
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
import qaoa
import matplotlib.pyplot as plt
if __name__ == '__main__':
| 13.142857
| 31
| 0.76087
| 14
| 92
| 4.428571
| 0.785714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 92
| 6
| 32
| 15.333333
| 0.815789
| 0
| 0
| 0
| 0
| 0
| 0.086957
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.75
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
123ee803ea3bc55ebbc04b94b215d90ef6f7a611
| 70
|
py
|
Python
|
danceschool/guestlist/__init__.py
|
benjwrdill/django-danceschool
|
9ecb2754502e62d0f49aa23d08ca6de6cae3c99a
|
[
"BSD-3-Clause"
] | 1
|
2019-02-04T02:11:32.000Z
|
2019-02-04T02:11:32.000Z
|
danceschool/guestlist/__init__.py
|
benjwrdill/django-danceschool
|
9ecb2754502e62d0f49aa23d08ca6de6cae3c99a
|
[
"BSD-3-Clause"
] | 2
|
2019-03-26T22:37:49.000Z
|
2019-12-02T15:39:35.000Z
|
danceschool/guestlist/__init__.py
|
benjwrdill/django-danceschool
|
9ecb2754502e62d0f49aa23d08ca6de6cae3c99a
|
[
"BSD-3-Clause"
] | 1
|
2019-03-19T22:49:01.000Z
|
2019-03-19T22:49:01.000Z
|
default_app_config = 'danceschool.guestlist.apps.GuestListAppConfig'
| 35
| 69
| 0.857143
| 7
| 70
| 8.285714
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.057143
| 70
| 1
| 70
| 70
| 0.878788
| 0
| 0
| 0
| 0
| 0
| 0.652174
| 0.652174
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
1251cd114b18bdc67612a8b040ba9243a77f076f
| 67
|
py
|
Python
|
setup.py
|
shamanoor/devops-for-data
|
87e4773b45844d12ff96d1be58ade665b5d36d85
|
[
"Apache-2.0"
] | 2
|
2020-10-29T15:27:52.000Z
|
2021-04-10T14:08:20.000Z
|
setup.py
|
shamanoor/devops-for-data
|
87e4773b45844d12ff96d1be58ade665b5d36d85
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
shamanoor/devops-for-data
|
87e4773b45844d12ff96d1be58ade665b5d36d85
|
[
"Apache-2.0"
] | 16
|
2020-10-28T12:21:51.000Z
|
2022-02-04T12:25:25.000Z
|
from setuptools import setup
# All config is in setup.cfg
setup()
| 13.4
| 28
| 0.761194
| 11
| 67
| 4.636364
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.179104
| 67
| 4
| 29
| 16.75
| 0.927273
| 0.38806
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
89efddebbe37d517ff525c9672af20117f46300d
| 1,286
|
py
|
Python
|
astropy/tests/__init__.py
|
REMeyer/astropy
|
28c49fb618538a01812e586cd07bccdf0591a6c6
|
[
"BSD-3-Clause"
] | 3
|
2018-03-20T15:09:16.000Z
|
2021-05-27T11:17:33.000Z
|
astropy/tests/__init__.py
|
REMeyer/astropy
|
28c49fb618538a01812e586cd07bccdf0591a6c6
|
[
"BSD-3-Clause"
] | null | null | null |
astropy/tests/__init__.py
|
REMeyer/astropy
|
28c49fb618538a01812e586cd07bccdf0591a6c6
|
[
"BSD-3-Clause"
] | null | null | null |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package contains utilities to run the astropy test suite, tools
for writing tests, and general tests that are not associated with a
particular package.
"""
# NOTE: This is retained only for backwards compatibility. Affiliated packages
# should no longer import `disable_internet` from `astropy.tests`. It is now
# available from `pytest_remotedata`. However, this is not the recommended
# mechanism for controlling access to remote data in tests. Instead, packages
# should make use of decorators provided by the pytest_remotedata plugin:
# - `@pytest.mark.remote_data` for tests that require remote data access
# - `@pytest.mark.internet_off` for tests that should only run when remote data
# access is disabled.
# Remote data access for the test suite is controlled by the `--remote-data`
# command line flag. This is either passed to `pytest` directly or to the
# `setup.py test` command.
#
# TODO: This import should eventually be removed once backwards compatibility
# is no longer supported.
from pkgutil import find_loader
if find_loader('pytest_remotedata') is not None:
from pytest_remotedata import disable_internet
else:
from ..extern.plugins.pytest_remotedata import disable_internet
| 44.344828
| 79
| 0.781493
| 189
| 1,286
| 5.253968
| 0.513228
| 0.060423
| 0.063444
| 0.058409
| 0.074522
| 0
| 0
| 0
| 0
| 0
| 0
| 0.000923
| 0.157076
| 1,286
| 28
| 80
| 45.928571
| 0.915129
| 0.807932
| 0
| 0
| 0
| 0
| 0.075556
| 0
| 0
| 0
| 0
| 0.035714
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
d63571f14d5098676b716fc361353085c3dfd8ca
| 368
|
py
|
Python
|
setup.py
|
COHRINT/etddf_minau
|
b2770aaaeff37bf580cc23a566f83432300b715c
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
COHRINT/etddf_minau
|
b2770aaaeff37bf580cc23a566f83432300b715c
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
COHRINT/etddf_minau
|
b2770aaaeff37bf580cc23a566f83432300b715c
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
d = generate_distutils_setup()
d['packages'] = ['etddf', 'cuprint', 'cuquantization', 'deltatier']
d['package_dir'] = {'etddf': 'src/etddf/etddf', "cuprint":'src/cuprint', 'cuquantization':"src/cuquantization", "deltatier":"src/deltatier"}
setup(**d)
| 40.888889
| 140
| 0.741848
| 46
| 368
| 5.782609
| 0.456522
| 0.067669
| 0.165414
| 0.172932
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076087
| 368
| 9
| 141
| 40.888889
| 0.782353
| 0.054348
| 0
| 0
| 1
| 0
| 0.41954
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0.333333
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
d6430fc3118621cb938a86fd9fd8009e19b2be00
| 1,624
|
py
|
Python
|
examples/10_Example_DSSInterface.py
|
davilamds/py_dss_interface
|
a447c97787aeac962381db88dd622ccb235eef4b
|
[
"MIT"
] | 8
|
2020-08-15T12:56:03.000Z
|
2022-01-04T15:51:14.000Z
|
examples/10_Example_DSSInterface.py
|
rodolfoplondero/py_dss_interface
|
cb6771b34ed322a5df7ef1cc194611e794f26441
|
[
"MIT"
] | 24
|
2021-04-24T18:33:19.000Z
|
2021-11-13T14:59:54.000Z
|
examples/10_Example_DSSInterface.py
|
rodolfoplondero/py_dss_interface
|
cb6771b34ed322a5df7ef1cc194611e794f26441
|
[
"MIT"
] | 7
|
2020-08-15T12:56:04.000Z
|
2021-10-04T16:14:30.000Z
|
# -*- encoding: utf-8 -*-
"""
Created by Ênio Viana at 15/05/2021
"""
from py_dss_interface.models.Example.ExampleBase import ExampleBase
dss = ExampleBase("13").dss
# Integer methods
print(45 * '=' + ' Integer Methods' + 45 * '=')
print(f'dss.dss_num_circuits(): {dss.dss_num_circuits()}')
print(f'dss.dss_clear_all(): {dss.dss_clear_all()}')
print(f'dss.dss_show_panel(): {dss.dss_show_panel()}')
print(f'dss.dss_start(): {dss.dss_start()}')
print(f'dss.dss_num_classes(): {dss.dss_num_classes()}')
print(f'dss.dss_num_user_classes(): {dss.dss_num_user_classes()}')
print(f'dss.dss_reset(): {dss.dss_reset()}')
print(f'dss.dss_read_allow_forms(): {dss.dss_read_allow_forms()}')
print(f'dss.dss_write_allow_forms(): {dss.dss_write_allow_forms(0)}')
print(f'dss.dss_read_allow_forms(): {dss.dss_read_allow_forms()}')
# String methods
print(45 * '=' + ' String Methods ' + 45 * '=')
print(f'dss.dss_new_circuit(): {dss.dss_new_circuit("new_rest_circuit")}')
print(f'dss.dss_version(): {dss.dss_version()}')
print(f'dss.dss_read_datapath(): {dss.dss_read_datapath()}')
# PAY ATTENTION: According with the OpenDSS original source there is no error here,
dss.dss_write_datapath(r"C:\Users\eniocc\Desktop\epri_projects\fork\py_dss_interface\src\py_dss_interface\models"
r"\Capacitors\CapacitorsS.py")
print(f'dss.dss_read_datapath(): {dss.dss_read_datapath()}')
print(f'dss.dss_default_editor(): {dss.dss_default_editor()}')
# Variant methods
print(45 * '=' + ' Variant Methods ' + 45 * '=')
print(f'dss.dss_classes(): {dss.dss_classes()}')
print(f'dss.dss_user_classes(): {dss.dss_user_classes()}')
| 42.736842
| 113
| 0.716749
| 256
| 1,624
| 4.238281
| 0.285156
| 0.193548
| 0.141014
| 0.188018
| 0.352995
| 0.220277
| 0.162212
| 0.162212
| 0.162212
| 0.162212
| 0
| 0.016205
| 0.088054
| 1,624
| 37
| 114
| 43.891892
| 0.716408
| 0.116379
| 0
| 0.166667
| 0
| 0
| 0.692686
| 0.528129
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.041667
| 0
| 0.041667
| 0.833333
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
c38f8a137a8ab674b0f9ad3ffa00628b4cf57150
| 98
|
py
|
Python
|
matizla/helpers/types.py
|
neotje/matizla
|
23afbc9ad3972c04e5882e0fed2de1ce0b7f397b
|
[
"MIT"
] | null | null | null |
matizla/helpers/types.py
|
neotje/matizla
|
23afbc9ad3972c04e5882e0fed2de1ce0b7f397b
|
[
"MIT"
] | null | null | null |
matizla/helpers/types.py
|
neotje/matizla
|
23afbc9ad3972c04e5882e0fed2de1ce0b7f397b
|
[
"MIT"
] | null | null | null |
from webview.window import Window
class JSwindow:
title: str
uuid: str
hidden: bool
| 12.25
| 33
| 0.683673
| 13
| 98
| 5.153846
| 0.846154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.265306
| 98
| 7
| 34
| 14
| 0.930556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.2
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
c39522539ddd16df4a65be551b45fae7ef2e1827
| 93
|
py
|
Python
|
WebApp/ImgSketch/apps.py
|
devanshsolani/sketchyourlife
|
ff9cec4eeb9fe3ebcb07432f9d12e6bcc10c4322
|
[
"BSD-3-Clause"
] | 1
|
2021-06-03T19:18:41.000Z
|
2021-06-03T19:18:41.000Z
|
WebApp/ImgSketch/apps.py
|
devanshsolani/sketchyourlife
|
ff9cec4eeb9fe3ebcb07432f9d12e6bcc10c4322
|
[
"BSD-3-Clause"
] | 1
|
2021-05-12T10:19:12.000Z
|
2021-05-12T10:19:12.000Z
|
WebApp/ImgSketch/apps.py
|
devanshsolani/sketchyourlife
|
ff9cec4eeb9fe3ebcb07432f9d12e6bcc10c4322
|
[
"BSD-3-Clause"
] | 4
|
2021-05-12T10:19:44.000Z
|
2021-07-03T07:57:31.000Z
|
from django.apps import AppConfig
class ImgSketchConfig(AppConfig):
name = 'ImgSketch'
| 15.5
| 33
| 0.763441
| 10
| 93
| 7.1
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16129
| 93
| 5
| 34
| 18.6
| 0.910256
| 0
| 0
| 0
| 0
| 0
| 0.096774
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
c3957d577ec1fa3898313456a867fe9f6ec2a40f
| 124
|
py
|
Python
|
plugins/docker/__init__.py
|
ajenti/ajen
|
177c1a67278a7763ed06eb2f773d7b409a85ec77
|
[
"MIT"
] | 3,777
|
2015-02-21T00:10:12.000Z
|
2022-03-30T15:33:22.000Z
|
plugins/docker/__init__.py
|
ajenti/ajen
|
177c1a67278a7763ed06eb2f773d7b409a85ec77
|
[
"MIT"
] | 749
|
2015-03-12T14:17:03.000Z
|
2022-03-25T13:22:28.000Z
|
plugins/docker/__init__.py
|
ajenti/ajen
|
177c1a67278a7763ed06eb2f773d7b409a85ec77
|
[
"MIT"
] | 687
|
2015-03-21T10:42:33.000Z
|
2022-03-21T23:18:12.000Z
|
import logging
from .main import ItemProvider
from .views import Handler
logging.info('docker.__init__.py: docker loaded')
| 20.666667
| 49
| 0.806452
| 17
| 124
| 5.647059
| 0.705882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.112903
| 124
| 5
| 50
| 24.8
| 0.872727
| 0
| 0
| 0
| 0
| 0
| 0.266129
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
c3b74f7c7a6c11b158f2affbdf969a225aae6118
| 344
|
py
|
Python
|
qt.py
|
DaelonSuzuka/qtenv
|
ee27d8989664aacd93ef1d8bb58ac6fa14418387
|
[
"MIT"
] | null | null | null |
qt.py
|
DaelonSuzuka/qtenv
|
ee27d8989664aacd93ef1d8bb58ac6fa14418387
|
[
"MIT"
] | null | null | null |
qt.py
|
DaelonSuzuka/qtenv
|
ee27d8989664aacd93ef1d8bb58ac6fa14418387
|
[
"MIT"
] | null | null | null |
import PySide2
from PySide2 import QtCore, QtGui, QtWidgets
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtWidgets import *
from PySide2.QtNetwork import *
from PySide2.QtWebSockets import *
from PySide2.QtCharts import *
from PySide2.QtMultimedia import *
from PySide2.QtSerialPort import *
from PySide2.QtSql import *
| 31.272727
| 44
| 0.81686
| 44
| 344
| 6.386364
| 0.272727
| 0.391459
| 0.483986
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.036545
| 0.125
| 344
| 11
| 45
| 31.272727
| 0.89701
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
c3bc7b903f7d70f70f266720379cbbc77be71313
| 35
|
py
|
Python
|
ssh2net/core/juniper_junos/__init__.py
|
carlmontanari/ssh2net
|
55e969b6d44ec3f2bd2ebbd8dedd68b99bee4c5b
|
[
"MIT"
] | 10
|
2020-01-13T03:28:33.000Z
|
2022-02-08T17:05:59.000Z
|
ssh2net/core/juniper_junos/__init__.py
|
carlmontanari/ssh2net
|
55e969b6d44ec3f2bd2ebbd8dedd68b99bee4c5b
|
[
"MIT"
] | null | null | null |
ssh2net/core/juniper_junos/__init__.py
|
carlmontanari/ssh2net
|
55e969b6d44ec3f2bd2ebbd8dedd68b99bee4c5b
|
[
"MIT"
] | 1
|
2020-05-26T13:35:46.000Z
|
2020-05-26T13:35:46.000Z
|
"""ssh2net juniper junos driver"""
| 17.5
| 34
| 0.714286
| 4
| 35
| 6.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.032258
| 0.114286
| 35
| 1
| 35
| 35
| 0.774194
| 0.8
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
c3c096c6bf5140a9cca00f20e76eddbc5cb9115f
| 99,519
|
py
|
Python
|
spinsim/__init__.py
|
alexander-tritt-monash/spinsim
|
30a635464730e95c6e236931e3103ff6dcf119fe
|
[
"BSD-3-Clause"
] | 2
|
2021-11-09T08:45:42.000Z
|
2022-02-09T22:36:54.000Z
|
spinsim/__init__.py
|
alexander-tritt-monash/spinsim
|
30a635464730e95c6e236931e3103ff6dcf119fe
|
[
"BSD-3-Clause"
] | null | null | null |
spinsim/__init__.py
|
alexander-tritt-monash/spinsim
|
30a635464730e95c6e236931e3103ff6dcf119fe
|
[
"BSD-3-Clause"
] | 1
|
2021-06-02T10:28:50.000Z
|
2021-06-02T10:28:50.000Z
|
"""
"""
from enum import Enum
import numpy as np
import numba as nb
from numba import cuda
from numba import roc
import math
import cmath
sqrt2 = math.sqrt(2)
sqrt3 = math.sqrt(3)
class SpinQuantumNumber(Enum):
"""
Options for the spin quantum number of a system.
Parameters
----------
value : :obj:`float`
The numerical value of the spin quantum number.
dimension : :obj:`int`
Dimension of the hilbert space the states with this spin belong to.
label : :obj:`str`
A text label that can be used for archiving.
plus_x, plus_y, plus_z, zero_x, zero_y, zero_z, minus_x, minus_y, minus_z : :obj:`numpy.ndarray` of :obj:`numpy.complex128`
Eigenstates of the spin operators for quick reference.
"""
def __init__(self, value:np.float64, dimension:int, label:str):
super().__init__()
self._value_ = value
self.dimension = dimension
self.label = label
if self.label == "half":
self.plus_x = np.array([1, 1], np.complex128)/math.sqrt(2)
self.minus_x = np.array([-1, 1], np.complex128)/math.sqrt(2)
self.plus_y = np.array([1, 1j], np.complex128)/math.sqrt(2)
self.minus_y = np.array([1, -1j], np.complex128)/math.sqrt(2)
self.plus_z = np.array([1, 0], np.complex128)
self.minus_z = np.array([0, 1], np.complex128)
else:
self.plus_x = np.array([1, math.sqrt(2), 1], np.complex128)/2
self.zero_x = np.array([-1, 0, 1], np.complex128)/math.sqrt(2)
self.minus_x = np.array([1, -math.sqrt(2), 1], np.complex128)/2
self.plus_y = np.array([-1, -1j*math.sqrt(2), 1], np.complex128)/2
self.zero_y = np.array([1, 0, 1], np.complex128)/math.sqrt(2)
self.minus_y = np.array([1, -1j*math.sqrt(2), 1], np.complex128)/2
self.plus_z = np.array([1, 0, 0], np.complex128)
self.zero_z = np.array([0, 1, 0], np.complex128)
self.minus_z = np.array([0, 0, 1], np.complex128)
HALF = (1/2, 2, "half")
"""
For two level systems.
"""
ONE = (1, 3, "one")
"""
For three level systems.
"""
class IntegrationMethod(Enum):
"""
Options for describing which method is used during the integration.
Parameters
----------
value : :obj:`str`
A text label that can be used for archiving.
"""
MAGNUS_CF4 = "magnus_cf4"
"""
Commutator free, fourth order Magnus based integrator.
"""
EULER = "euler"
"""
Euler integration method.
"""
HEUN = "heun"
"""
Integration method from AtomicPy.
Makes two Euler integration steps, one sampling the field from the start of the time step, one sampling the field from the end of the time step.
The equivalent of the trapezoidal method.
"""
class ExponentiationMethod(Enum):
"""
The implementation to use for matrix exponentiation within the integrator.
Parameters
----------
value : :obj:`str`
A text label that can be used for archiving.
index : :obj:`int`
A reference number, used when compiling the integrator, where higher level objects like enums cannot be interpreted.
"""
def __init__(self, value:str, index:int):
super().__init__()
self._value_ = value
self.index = index
ANALYTIC = ("analytic", 0)
"""
Analytic expression of the matrix exponential.
For spin-half :obj:`SpinQuantumNumber.HALF` systems only.
See :obj:`Utilities.matrix_exponential_analytic()` for more information.
"""
LIE_TROTTER = ("lie_trotter", 1)
"""
Approximation using the Lie Trotter theorem, using the Pauli matrices and a single quadratic operator.
See :obj:`Utilities.matrix_exponential_lie_trotter()` for more information.
"""
LIE_TROTTER_8 = ("lie_trotter_8", 2)
"""
Approximation using the Lie Trotter theorem, using all basis elements of su(3).
For spin-one :obj:`SpinQuantumNumber.HALF` systems only.
See :obj:`Utilities.matrix_exponential_lie_trotter_8()` for more information.
"""
class Device(Enum):
"""
The target device that the integrator is being compiled for.
.. _Supported Python features: http://numba.pydata.org/numba-doc/latest/reference/pysupported.html
.. _Supported Numpy features: http://numba.pydata.org/numba-doc/latest/reference/numpysupported.html
.. _Supported CUDA Python features: http://numba.pydata.org/numba-doc/latest/cuda/cudapysupported.html
"""
def __init__(self, value:str, index:int):
super().__init__()
self._value_ = value
self.index = index
if value == "python":
def jit_host(template, max_registers):
def jit_host(func):
return func
return jit_host
self.jit_host = jit_host
def jit_device(func):
return func
self.jit_device = jit_device
def jit_device_template(template):
def jit_device_template(func):
return func
return jit_device_template
self.jit_device_template = jit_device_template
elif value == "cpu_single":
def jit_host(template, max_registers):
def jit_host(func):
return nb.njit(template)(func)
return jit_host
self.jit_host = jit_host
def jit_device(func):
return nb.njit()(func)
self.jit_device = jit_device
def jit_device_template(template):
def jit_device_template(func):
return nb.njit(template)(func)
return jit_device_template
self.jit_device_template = jit_device_template
elif value == "cpu":
def jit_host(template, max_registers):
def jit_host(func):
return nb.njit(template, parallel = True)(func)
return jit_host
self.jit_host = jit_host
def jit_device(func):
return nb.njit()(func)
self.jit_device = jit_device
def jit_device_template(template):
def jit_device_template(func):
return nb.njit(template)(func)
return jit_device_template
self.jit_device_template = jit_device_template
elif value == "cuda":
def jit_host(template, max_registers):
def jit_host(func):
return cuda.jit(template, debug = False, max_registers = max_registers)(func)
return jit_host
self.jit_host = jit_host
def jit_device(func):
return cuda.jit(device = True, inline = True)(func)
self.jit_device = jit_device
def jit_device_template(template):
def jit_device_template(func):
return cuda.jit(template, device = True, inline = True)(func)
return jit_device_template
self.jit_device_template = jit_device_template
elif value == "roc":
def jit_host(template, max_registers):
def jit_host(func):
return roc.jit(template)(func)
return jit_host
self.jit_host = jit_host
def jit_device(func):
return roc.jit(device = True)(func)
self.jit_device = jit_device
def jit_device_template(template):
def jit_device_template(func):
return roc.jit(template, device = True)(func)
return jit_device_template
self.jit_device_template = jit_device_template
PYTHON = ("python", 0)
"""
Use pure python interpreted code for the integrator, ie, don't compile the integrator.
"""
CPU_SINGLE = ("cpu_single", 0)
"""
Use the :func:`numba.jit()` LLVM compiler to compile the integrator to run on a single CPU core.
.. note ::
To use this device option, the user defined field function must be :func:`numba.jit()` compilable. See `Supported Python features`_ for compilable python features, and `Supported Numpy features`_ for compilable numpy features.
"""
CPU = ("cpu", 0)
"""
Use the :func:`numba.jit()` LLVM compiler to compile the integrator to run on all CPU cores, in parallel.
.. note ::
To use this device option, the user defined field function must be :func:`numba.jit()` compilable. See `Supported Python features`_ for compilable python features, and `Supported Numpy features`_ for compilable numpy features.
"""
CUDA = ("cuda", 1)
"""
Use the :func:`numba.cuda.jit()` LLVM compiler to compile the integrator to run on an Nvidia cuda compatible GPU, in parallel.
.. note ::
To use this device option, the user defined field function must be :func:`numba.cuda.jit()` compilable. See `Supported CUDA Python features`_ for compilable python features.
"""
ROC = ("roc", 2)
"""
Use the :func:`numba.roc.jit()` LLVM compiler to compile the integrator to run on an AMD ROCm compatible GPU, in parallel.
.. warning ::
Work in progress, not currently functional!
"""
class Results:
"""
The results of a an evaluation of the integrator.
Attributes
----------
time : :obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index)
The times that `state` was evaluated at.
time_evolution : :obj:`numpy.ndarray` of :obj:`numpy.float128` (time_index, y_index, x_index)
The evaluated time evolution operator between each time step.
See :ref:`architecture` for some information.
state : :obj:`numpy.ndarray` of :obj:`numpy.complex128` (time_index, magnetic_quantum_number)
The evaluated quantum state of the spin system over time, written in terms of the eigenstates of the spin projection operator in the z direction.
spin : :obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index, spatial_direction)
The expected spin projection (Bloch vector) over time.
This is calculated just in time using the JITed :obj:`callable` `spin_calculator`.
spin_calculator : :obj:`callable`
Calculates the expected spin projection (Bloch vector) over time for a given time series of a quantum state.
Used to calculate `spin` the first time it is referenced by the user.
Parameters:
* **state** (:obj:`numpy.ndarray` of :obj:`numpy.complex128` (time_index, magnetic_quantum_number)) - The quantum state of the spin system over time, written in terms of the eigenstates of the spin projection operator in the z direction.
Returns:
* **spin** (:obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index, spatial_direction)) - The expected spin projection (Bloch vector) over time.
"""
def __init__(self, time:np.ndarray, time_evolution:np.ndarray, state:np.ndarray, spin_calculator:callable):
"""
Parameters
----------
time : :obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index)
The times that `state` was evaluated at.
time_evolution : :obj:`numpy.ndarray` of :obj:`numpy.float128` (time_index, y_index, x_index)
The evaluated time evolution operator between each time step.
See :ref:`architecture` for some information.
state : :obj:`numpy.ndarray` of :obj:`numpy.complex128` (time_index, magnetic_quantum_number)
The evaluated quantum state of the spin system over time, written in terms of the eigenstates of the spin projection operator in the z direction.
spin_calculator : :obj:`callable`
Calculates the expected spin projection (Bloch vector) over time for a given time series of a quantum state.
Used to calculate `spin` the first time it is referenced by the user.
Parameters:
* **state** (:obj:`numpy.ndarray` of :obj:`numpy.complex128` (time_index, magnetic_quantum_number)) - The quantum state of the spin system over time, written in terms of the eigenstates of the spin projection operator in the z direction.
Returns:
* **spin** (:obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index, spatial_direction)) - The expected spin projection (Bloch vector) over time.
"""
self.time = time
self.time_evolution = time_evolution
self.state = state
self.spin_calculator = spin_calculator
def __getattr__(self, attr_name:str) -> np.ndarray:
if attr_name == "spin":
spin = self.spin_calculator(self.state)
setattr(self, attr_name, spin)
return self.spin
raise AttributeError("{} has no attribute called {}.".format(self, attr_name))
class Simulator:
"""
Attributes
----------
spin_quantum_number : :obj:`SpinQuantumNumber`
The option to select whether the simulator will integrate a spin-half :obj:`SpinQuantumNumber.HALF`, or spin-one :obj:`SpinQuantumNumber.ONE` quantum system.
threads_per_block : :obj:`int`
The size of each thread block (workgroup), in terms of the number of threads (workitems) they each contain, when running on the GPU target devices :obj:`Device.CUDA` (:obj:`Device.ROC`).
Defaults to 64.
Modifying might be able to increase execution time for different GPU models.
device : :obj:`Device`
The option to select which device will be targeted for integration.
That is, whether the integrator is compiled for a CPU or GPU.
Defaults to :obj:`Device.CUDA` if the system it is being run on is Nvidia Cuda compatible, and defaults to :obj:`Device.CPU` otherwise.
See :obj:`Device` for all options and more details.
number_of_threads : :obj:`int`
The number of CPU threads to use when running on a CPU device.
get_time_evolution : :obj:`callable`
The internal function for evaluating the time evolution operator in parallel. Compiled for chosen device on object constrution.
Parameters:
* **sweep_parameters** (:obj:`numpy.ndarray` of :obj:`numpy.float64`) - The input to the :obj:`get_field()` function supplied by the user. Modifies the field function so the integrator can be used for many experiments, without the need for slow recompilation. For example, if the `sweep_parameters` is used to define the bias field strength in :obj:`get_field()`, then one can run many simulations, sweeping through bias values, by calling this method multiple times, each time varying `sweep_parameters`.
* **time_coarse** (:obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index)) - The times that `state` was evaluated at.
* **time_end_points** (:obj:`numpy.ndarray` of :obj:`numpy.float64` (start/end)) - The time offset that the experiment is to start at, and the time that the experiment is to finish at. Measured in s.
* **time_step_integration** (:obj:`float`) - The integration time step. Measured in s.
* **time_step_output** (:obj:`float`) - The sample resolution of the output timeseries for the state. Must be a whole number multiple of `time_step_integration`. Measured in s.
* **time_evolution_output** (:obj:`numpy.ndarray` of :obj:`numpy.float128` (time_index, y_index, x_index)) - The evaluated time evolution operator between each time step. See :ref:`architecture` for some information.
spin_calculator : :obj:`callable`
Calculates the expected spin projection (Bloch vector) over time for a given time series of a quantum state.
This :obj:`callable` is passed to the :obj:`Results` object returned from :func:`Simulator.evaluate()`, and is executed there just in time if the `spin` property is needed.
Compiled for chosen device on object constrution.
Parameters:
* **state** (:obj:`numpy.ndarray` of :obj:`numpy.complex128` (time_index, magnetic_quantum_number)) - The quantum state of the spin system over time, written in terms of the eigenstates of the spin projection operator in the z direction.
Returns:
* **spin** (:obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index, spatial_direction)) - The expected spin projection (Bloch vector) over time.
"""
def __init__(self, get_field:callable, spin_quantum_number:SpinQuantumNumber, device:Device = None, exponentiation_method:ExponentiationMethod = None, use_rotating_frame:bool = True, integration_method:IntegrationMethod = IntegrationMethod.MAGNUS_CF4, number_of_squares:int = 24, threads_per_block:int = 64, max_registers:int = None, number_of_threads:int = None):
"""
.. _Achieved Occupancy: https://docs.nvidia.com/gameworks/content/developertools/desktop/analysis/report/cudaexperiments/kernellevel/achievedoccupancy.htm
Parameters
----------
get_field : :obj:`callable`
A python function that describes the field that the spin system is being put under. It must have three arguments:
* **time_sample** (:obj:`float`) - the time to sample the field at, in units of s.
* **simulation_index** (:obj:`int`) - a parameter that can be swept over when multiple simulations need to be run. For example, it is used to sweep over dressing frequencies during the simulations that `spinsim` was designed for.
* **field_sample** (:class:`numpy.ndarray` of :class:`numpy.float64` (spatial_index)) the returned value of the field. This is a four dimensional vector, with the first three entries being x, y, z spatial directions (to model a magnetic field, for example), and the fourth entry being the amplitude of the quadratic shift (only appearing, and required, in spin-one systems).
.. note::
This function must be compilable for the device that the integrator is being compiled for. See :class:`Device` for more information and links.
spin_quantum_number : :obj:`SpinQuantumNumber`
The option to select whether the simulator will integrate a spin-half :obj:`SpinQuantumNumber.HALF`, or spin-one :obj:`SpinQuantumNumber.ONE` quantum system.
device : :obj:`Device`
The option to select which device will be targeted for integration.
That is, whether the integrator is compiled for a CPU or GPU.
Defaults to :obj:`Device.CUDA` if the system it is being run on is Nvidia Cuda compatible, and defaults to :obj:`Device.CPU` otherwise.
See :obj:`Device` for all options and more details.
exponentiation_method : :obj:`ExponentiationMethod`
Which method to use for matrix exponentiation in the integration algorithm.
Defaults to :obj:`ExponentiationMethod.LIE_TROTTER` when `spin_quantum_number` is set to :obj:`SpinQuantumNumber.ONE`, and defaults to :obj:`ExponentiationMethod.ANALYTIC` when `spin_quantum_number` is set to :obj:`SpinQuantumNumber.HALF`.
See :obj:`ExponentiationMethod` for more details.
use_rotating_frame : :obj:`bool`
Whether or not to use the rotating frame optimisation.
Defaults to :obj:`True`.
If set to :obj:`True`, the integrator moves into a frame rotating in the z axis by an amount defined by the field in the z direction.
This removes the (possibly large) z component of the field, which increases the accuracy of the output since the integrator will on average take smaller steps.
.. note ::
The use of a rotating frame is commonly associated with the use of a rotating wave approximation, a technique used to get approximate analytic solutions of spin system dynamics.
This is not done when this option is set to :obj:`True` - no such approximations are made, and the output state in given out of the rotating frame.
One can, of course, use :mod:`spinsim` to integrate states in the rotating frame, using the rating wave approximation: just define :obj:`get_field()` with field functions that use the rotating wave approximation in the rotating frame.
integration_method : :obj:`IntegrationMethod`
Which integration method to use in the integration.
Defaults to :obj:`IntegrationMethod.MAGNUS_CF4`.
See :obj:`IntegrationMethod` for more details.
number_of_squares : :obj:`int`
The number of squares made by the matrix exponentiator, if :obj:`ExponentiationMethod.LIE_TROTTER` is chosen.
threads_per_block : :obj:`int`
The size of each thread block (workgroup), in terms of the number of threads (workitems) they each contain, when running on the GPU target devices :obj:`Device.CUDA` (:obj:`Device.ROC`).
Defaults to 64.
Modifying might be able to increase execution time for different GPU models.
max_registers : :obj:`int`
The maximum number of registers allocated per thread when using :obj:`Device.CUDA` as the target device, and can be modified to increase the execution speed for a specific GPU model.
Raising this value allocates more registers (fast memory) to each thread, out of a maximum number for the whole GPU, for each specific GPU model.
This means that if more registers are allocated than are available for the GPU model, the GPU must run fewer threads concurrently than it has Cuda cores, meaning some cores are inactive, and the GPU is said to have less occupancy.
Lowering the value increases GPU occupancy, meaning more threads run concurrently, at the expense of fewer resgiters being avaliable to each thread, meaning slower memory must be used.
Thus, there will be an optimal value of `max_registers` for each model of GPU running :mod:`spinsim`, balancing more threads vs faster running threads, and changing this value could increase performance for your GPU.
See `Achieved Occupancy`_ for Nvidia's official explanation.
number_of_threads : :obj:`int`
The number of CPU threads to use when running on a CPU device.
"""
if not device:
if cuda.is_available():
device = Device.CUDA
else:
device = Device.CPU
self.threads_per_block = threads_per_block
self.spin_quantum_number = spin_quantum_number
self.device = device
self.number_of_threads = number_of_threads
self.get_time_evolution = None
try:
self.compile_time_evolver(get_field, spin_quantum_number, device, use_rotating_frame, integration_method, exponentiation_method, number_of_squares, threads_per_block, max_registers)
except:
print("\033[31mspinsim error!!!\nnumba could not jit get_field() function into a device function.\033[0m\n")
raise
def compile_time_evolver(self, get_field:callable, spin_quantum_number:SpinQuantumNumber, device:Device, use_rotating_frame:bool = True, integration_method:IntegrationMethod = IntegrationMethod.MAGNUS_CF4, exponentiation_method:ExponentiationMethod = None, number_of_squares:int = 24, threads_per_block:int = 64, max_registers:int = None):
"""
Compiles the integrator and spin calculation functions of the simulator.
Parameters
----------
get_field : :obj:`callable`
A python function that describes the field that the spin system is being put under.
It must have three arguments:
* **time_sample** (:obj:`float`) - the time to sample the field at, in units of s.
* **sweep_parameters** (:obj:`numpy.ndarray` of :obj:`numpy.float64`) - an array of parameters that can be swept over when multiple simulations need to be run. For example, it is used to sweep over dressing frequencies during the magnetometry experiments that `spinsim` was designed for.
* **field_sample** (:class:`numpy.ndarray` of :class:`numpy.float64` (spatial_index)) the returned value of the field. This is a four dimensional vector, with the first three entries being x, y, z spatial directions (to model a magnetic field, for example), and the fourth entry being the amplitude of the quadratic shift (only appearing, and required, in spin-one systems).
.. note::
This function must be compilable for the device that the integrator is being compiled for.
See :class:`Device` for more information and links.
spin_quantum_number : :obj:`SpinQuantumNumber`
The option to select whether the simulator will integrate a spin-half :obj:`SpinQuantumNumber.HALF`, or spin-one :obj:`SpinQuantumNumber.ONE` quantum system.
device : :obj:`Device`
The option to select which device will be targeted for integration.
That is, whether the integrator is compiled for a CPU or GPU.
Defaults to :obj:`Device.CUDA` if the system it is being run on is Nvidia Cuda compatible, and defaults to :obj:`Device.CPU` otherwise.
See :obj:`Device` for all options and more details.
exponentiation_method : :obj:`ExponentiationMethod`
Which method to use for matrix exponentiation in the integration algorithm.
Defaults to :obj:`ExponentiationMethod.LIE_TROTTER` when `spin_quantum_number` is set to :obj:`SpinQuantumNumber.ONE`, and defaults to :obj:`ExponentiationMethod.ANALYTIC` when `spin_quantum_number` is set to :obj:`SpinQuantumNumber.HALF`.
See :obj:`ExponentiationMethod` for more details.
use_rotating_frame : :obj:`bool`
Whether or not to use the rotating frame optimisation.
Defaults to :obj:`True`.
If set to :obj:`True`, the integrator moves into a frame rotating in the z axis by an amount defined by the field in the z direction.
This removes the (possibly large) z component of the field, which increases the accuracy of the output since the integrator will on average take smaller steps.
.. note ::
The use of a rotating frame is commonly associated with the use of a rotating wave approximation, a technique used to get approximate analytic solutions of spin system dynamics.
This is not done when this option is set to :obj:`True` - no such approximations are made, and the output state in given out of the rotating frame.
One can, of course, use :mod:`spinsim` to integrate states in the rotating frame, using the rating wave approximation: just define :obj:`get_field()` with field functions that use the rotating wave approximation in the rotating frame.
integration_method : :obj:`IntegrationMethod`
Which integration method to use in the integration.
Defaults to :obj:`IntegrationMethod.MAGNUS_CF4`.
See :obj:`IntegrationMethod` for more details.
number_of_squares : :obj:`int`
The number of squares made by the matrix exponentiator, if :obj:`ExponentiationMethod.LIE_TROTTER` is chosen.
threads_per_block : :obj:`int`
The size of each thread block (workgroup), in terms of the number of threads (workitems) they each contain, when running on the GPU target devices :obj:`Device.CUDA` (:obj:`Device.ROC`).
Defaults to 64.
Modifying might be able to increase execution time for different GPU models.
max_registers : :obj:`int`
The maximum number of registers allocated per thread when using :obj:`Device.CUDA` as the target device, and can be modified to increase the execution speed for a specific GPU model.
Defaults to 63 (optimal for GTX1070, the device used for testing.
Note that one extra register per thread is always added to the number specified for control, so really this number is 64).
Raising this value allocates more registers (fast memory) to each thread, out of a maximum number for the whole GPU, for each specific GPU model.
This means that if more registers are allocated than are available for the GPU model, the GPU must run fewer threads concurrently than it has Cuda cores, meaning some cores are inactive, and the GPU is said to have less occupancy.
Lowering the value increases GPU occupancy, meaning more threads run concurrently, at the expense of fewer registers being avaliable to each thread, meaning slower memory must be used.
Thus, there will be an optimal value of `max_registers` for each model of GPU running :mod:`spinsim`, balancing more threads vs faster running threads, and changing this value could increase performance for your GPU.
See `Achieved Occupancy`_ for Nvidia's official explanation.
"""
utilities = Utilities(spin_quantum_number, device, threads_per_block, number_of_squares)
conj = utilities.conj
set_to = utilities.set_to
set_to_one = utilities.set_to_one
matrix_multiply = utilities.matrix_multiply
matrix_exponential_analytic = utilities.matrix_exponential_analytic
matrix_exponential_lie_trotter = utilities.matrix_exponential_lie_trotter
matrix_exponential_lie_trotter_8 = utilities.matrix_exponential_lie_trotter_8
jit_host = device.jit_host
jit_device = device.jit_device
jit_device_template = device.jit_device_template
device_index = device.index
if not exponentiation_method:
if spin_quantum_number == SpinQuantumNumber.ONE:
exponentiation_method = ExponentiationMethod.LIE_TROTTER
elif spin_quantum_number == SpinQuantumNumber.HALF:
exponentiation_method = ExponentiationMethod.ANALYTIC
if integration_method == IntegrationMethod.MAGNUS_CF4:
sample_index_max = 3
sample_index_end = 4
elif integration_method == IntegrationMethod.HEUN:
sample_index_max = 3
sample_index_end = 4
elif integration_method == IntegrationMethod.EULER:
sample_index_max = 1
sample_index_end = 1
exponentiation_method_index = exponentiation_method.index
dimension = spin_quantum_number.dimension
if spin_quantum_number == SpinQuantumNumber.HALF:
lie_dimension = 3
elif spin_quantum_number == SpinQuantumNumber.ONE:
if exponentiation_method == ExponentiationMethod.LIE_TROTTER:
lie_dimension = 4
elif exponentiation_method == ExponentiationMethod.LIE_TROTTER_8:
lie_dimension = 8
if (exponentiation_method == ExponentiationMethod.ANALYTIC) and (spin_quantum_number != SpinQuantumNumber.HALF):
print("\033[31mspinsim warning!!!\n_attempting to use an analytic exponentiation method outside of spin-half. Switching to a Lie Trotter method.\033[0m")
exponentiation_method = ExponentiationMethod.LIE_TROTTER
exponentiation_method_index = 1
@jit_device_template("(float64[:], complex128[:, :], complex128[:, :])")
def append_exponentiation(field_sample, time_evolution_fine, time_evolution_output):
if device_index == 0:
time_evolution_old = np.empty((dimension, dimension), dtype = np.complex128)
elif device_index == 1:
time_evolution_old = cuda.local.array((dimension, dimension), dtype = np.complex128)
elif device_index == 2:
time_evolution_old_group = roc.shared.array((threads_per_block, dimension, dimension), dtype = np.complex128)
time_evolution_old = time_evolution_old_group[roc.get_local_id(1), :, :]
# Calculate the exponential
if exponentiation_method_index == 0:
matrix_exponential_analytic(field_sample, time_evolution_fine)
elif exponentiation_method_index == 1:
matrix_exponential_lie_trotter(field_sample, time_evolution_fine)
elif exponentiation_method_index == 2:
matrix_exponential_lie_trotter_8(field_sample, time_evolution_fine)
# Premultiply to the exitsing time evolution operator
set_to(time_evolution_output, time_evolution_old)
matrix_multiply(time_evolution_fine, time_evolution_old, time_evolution_output)
if use_rotating_frame:
if dimension == 3:
if exponentiation_method_index == 2:
@jit_device_template("(float64[:], float64, complex128)")
def transform_frame_spin_one_rotating_8(field_sample, rotating_wave, rotating_wave_winding):
X = (field_sample[0] + 1j*field_sample[1])*conj(rotating_wave_winding)
field_sample[0] = X.real
field_sample[1] = X.imag
field_sample[2] = field_sample[2] - rotating_wave
X = (field_sample[4] + 1j*field_sample[5])*conj(rotating_wave_winding)*conj(rotating_wave_winding)
field_sample[4] = X.real
field_sample[5] = X.imag
X = (field_sample[6] + 1j*field_sample[7])*conj(rotating_wave_winding)
field_sample[6] = X.real
field_sample[7] = X.imag
transform_frame = transform_frame_spin_one_rotating_8
else:
@jit_device_template("(float64[:], float64, complex128)")
def transform_frame_spin_one_rotating(field_sample, rotating_wave, rotating_wave_winding):
X = (field_sample[0] + 1j*field_sample[1])/rotating_wave_winding
field_sample[0] = X.real
field_sample[1] = X.imag
field_sample[2] = field_sample[2] - rotating_wave
transform_frame = transform_frame_spin_one_rotating
else:
@jit_device_template("(float64[:], float64, complex128)")
def transform_frame_spin_half_rotating(field_sample, rotating_wave, rotating_wave_winding):
X = (field_sample[0] + 1j*field_sample[1])/(rotating_wave_winding**2)
field_sample[0] = X.real
field_sample[1] = X.imag
field_sample[2] = field_sample[2] - 2*rotating_wave
transform_frame = transform_frame_spin_half_rotating
else:
@jit_device_template("(float64[:], float64, complex128)")
def transform_frame_lab(field_sample, rotating_wave, rotating_wave_winding):
return
transform_frame = transform_frame_lab
get_field_jit = jit_device_template("(float64, float64[:], float64[:])")(get_field)
if integration_method == IntegrationMethod.MAGNUS_CF4:
@jit_device_template("(float64[:], float64, float64, float64, float64[:, :], float64, complex128[:])")
def get_field_integration_magnus_cf4(sweep_parameters, time_fine, time_coarse, time_step_integration, field_sample, rotating_wave, rotating_wave_winding):
time_sample = ((time_fine + 0.5*time_step_integration*(1 - 1/sqrt3)) - time_coarse)
rotating_wave_winding[0] = cmath.exp(1j*rotating_wave*time_sample)
time_sample += time_coarse
get_field_jit(time_sample, sweep_parameters, field_sample[0, :])
time_sample = ((time_fine + 0.5*time_step_integration*(1 + 1/sqrt3)) - time_coarse)
rotating_wave_winding[1] = cmath.exp(1j*rotating_wave*time_sample)
time_sample += time_coarse
get_field_jit(time_sample, sweep_parameters, field_sample[1, :])
@jit_device_template("(complex128[:, :], complex128[:, :], float64[:, :], float64, float64, complex128[:])")
def append_exponentiation_integration_magnus_cf4(time_evolution_fine, time_evolution_output, field_sample, time_step_integration, rotating_wave, rotating_wave_winding):
transform_frame(field_sample[0, :], rotating_wave, rotating_wave_winding[0])
transform_frame(field_sample[1, :], rotating_wave, rotating_wave_winding[1])
w0 = (1.5 + sqrt3)/6
w1 = (1.5 - sqrt3)/6
field_sample[2, 0] = time_step_integration*(w0*field_sample[0, 0] + w1*field_sample[1, 0])
field_sample[2, 1] = time_step_integration*(w0*field_sample[0, 1] + w1*field_sample[1, 1])
field_sample[2, 2] = time_step_integration*(w0*field_sample[0, 2] + w1*field_sample[1, 2])
if dimension > 2:
field_sample[2, 3] = time_step_integration*(w0*field_sample[0, 3] + w1*field_sample[1, 3])
append_exponentiation(field_sample[2, :], time_evolution_fine, time_evolution_output)
field_sample[2, 0] = time_step_integration*(w1*field_sample[0, 0] + w0*field_sample[1, 0])
field_sample[2, 1] = time_step_integration*(w1*field_sample[0, 1] + w0*field_sample[1, 1])
field_sample[2, 2] = time_step_integration*(w1*field_sample[0, 2] + w0*field_sample[1, 2])
if dimension > 2:
field_sample[2, 3] = time_step_integration*(w1*field_sample[0, 3] + w0*field_sample[1, 3])
append_exponentiation(field_sample[2, :], time_evolution_fine, time_evolution_output)
get_field_integration = get_field_integration_magnus_cf4
append_exponentiation_integration = append_exponentiation_integration_magnus_cf4
elif integration_method == IntegrationMethod.HEUN:
@jit_device_template("(float64[:], float64, float64, float64, float64[:, :], float64, complex128[:])")
def get_field_integration_heun(sweep_parameters, time_fine, time_coarse, time_step_integration, field_sample, rotating_wave, rotating_wave_winding):
time_sample = time_fine - time_coarse
rotating_wave_winding[0] = cmath.exp(1j*rotating_wave*time_sample)
time_sample += time_coarse
get_field_jit(time_sample, sweep_parameters, field_sample[0, :])
time_sample = time_fine + time_step_integration - time_coarse
rotating_wave_winding[1] = cmath.exp(1j*rotating_wave*time_sample)
time_sample += time_coarse
get_field_jit(time_sample, sweep_parameters, field_sample[1, :])
@jit_device_template("(complex128[:, :], complex128[:, :], float64[:, :], float64, float64, complex128[:])")
def append_exponentiation_integration_heun(time_evolution_fine, time_evolution_output, field_sample, time_step_integration, rotating_wave, rotating_wave_winding):
transform_frame(field_sample[0, :], rotating_wave, rotating_wave_winding[0])
transform_frame(field_sample[1, :], rotating_wave, rotating_wave_winding[1])
field_sample[2, 0] = time_step_integration*field_sample[0, 0]/2
field_sample[2, 1] = time_step_integration*field_sample[0, 1]/2
field_sample[2, 2] = time_step_integration*field_sample[0, 2]/2
if dimension > 2:
field_sample[2, 3] = time_step_integration*field_sample[0, 3]/2
append_exponentiation(field_sample[2, :], time_evolution_fine, time_evolution_output)
field_sample[2, 0] = time_step_integration*field_sample[1, 0]/2
field_sample[2, 1] = time_step_integration*field_sample[1, 1]/2
field_sample[2, 2] = time_step_integration*field_sample[1, 2]/2
if dimension > 2:
field_sample[2, 3] = time_step_integration*field_sample[1, 3]/2
append_exponentiation(field_sample[2, :], time_evolution_fine, time_evolution_output)
get_field_integration = get_field_integration_heun
append_exponentiation_integration = append_exponentiation_integration_heun
elif integration_method == IntegrationMethod.EULER:
@jit_device_template("(float64[:], float64, float64, float64, float64[:, :], float64, complex128[:])")
def get_field_integration_euler(sweep_parameters, time_fine, time_coarse, time_step_integration, field_sample, rotating_wave, rotating_wave_winding):
time_sample = time_fine + 0.5*time_step_integration - time_coarse
rotating_wave_winding[0] = cmath.exp(1j*rotating_wave*time_sample)
time_sample += time_coarse
get_field_jit(time_sample, sweep_parameters, field_sample[0, :])
@jit_device_template("(complex128[:, :], complex128[:, :], float64[:, :], float64, float64, complex128[:])")
def append_exponentiation_integration_euler(time_evolution_fine, time_evolution_output, field_sample, time_step_integration, rotating_wave, rotating_wave_winding):
transform_frame(field_sample[0, :], rotating_wave, rotating_wave_winding[0])
field_sample[0, 0] = time_step_integration*field_sample[0, 0]
field_sample[0, 1] = time_step_integration*field_sample[0, 1]
field_sample[0, 2] = time_step_integration*field_sample[0, 2]
if dimension > 2:
field_sample[0, 3] = time_step_integration*field_sample[0, 3]
append_exponentiation(field_sample[0, :], time_evolution_fine, time_evolution_output)
get_field_integration = get_field_integration_euler
append_exponentiation_integration = append_exponentiation_integration_euler
@jit_device_template("(int64, float64[:], float64, float64, float64[:], complex128[:, :, :], float64[:])")
def get_time_evolution_loop(time_index, time_coarse, time_step_output, time_step_integration, time_end_points, time_evolution_output, sweep_parameters):
# Declare variables
if device_index == 0:
time_evolution_fine = np.empty((dimension, dimension), dtype = np.complex128)
field_sample = np.empty((sample_index_max, lie_dimension), dtype = np.float64)
rotating_wave_winding = np.empty(sample_index_end, dtype = np.complex128)
elif device_index == 1:
time_evolution_fine = cuda.local.array((dimension, dimension), dtype = np.complex128)
field_sample = cuda.local.array((sample_index_max, lie_dimension), dtype = np.float64)
rotating_wave_winding = cuda.local.array(sample_index_end, dtype = np.complex128)
elif device_index == 2:
time_evolution_fine_group = roc.shared.array((threads_per_block, dimension, dimension), dtype = np.complex128)
time_evolution_fine = time_evolution_fine_group[roc.get_local_id(1), :, :]
field_sample_group = roc.shared.array((threads_per_block, sample_index_max, lie_dimension), dtype = np.float64)
field_sample = field_sample_group[roc.get_local_id(1), :, :]
rotating_wave_winding_group = roc.shared.array((threads_per_block, sample_index_end), dtype = np.complex128)
rotating_wave_winding = rotating_wave_winding_group[roc.get_local_id(1), :]
time_coarse[time_index] = time_end_points[0] + time_step_output*time_index
time_fine = time_coarse[time_index]
# Initialise time evolution operator to 1
set_to_one(time_evolution_output[time_index, :])
field_sample[0, 2] = 0
if use_rotating_frame:
time_sample = time_coarse[time_index] + time_step_output/2
get_field_jit(time_sample, sweep_parameters, field_sample[0, :])
rotating_wave = field_sample[0, 2]
if dimension == 2:
rotating_wave /= 2
# For every fine step
for time_fine_index in range(math.floor(time_step_output/time_step_integration + 0.5)):
get_field_integration(sweep_parameters, time_fine, time_coarse[time_index], time_step_integration, field_sample, rotating_wave, rotating_wave_winding)
append_exponentiation_integration(time_evolution_fine, time_evolution_output[time_index, :], field_sample, time_step_integration, rotating_wave, rotating_wave_winding)
time_fine += time_step_integration
# Take out of rotating frame
if use_rotating_frame:
rotating_wave_winding[0] = cmath.exp(1j*rotating_wave*time_step_output)
time_evolution_output[time_index, 0, 0] /= rotating_wave_winding[0]
time_evolution_output[time_index, 0, 1] /= rotating_wave_winding[0]
if dimension > 2:
time_evolution_output[time_index, 0, 2] /= rotating_wave_winding[0]
time_evolution_output[time_index, 2, 0] *= rotating_wave_winding[0]
time_evolution_output[time_index, 2, 1] *= rotating_wave_winding[0]
time_evolution_output[time_index, 2, 2] *= rotating_wave_winding[0]
else:
time_evolution_output[time_index, 1, 0] *= rotating_wave_winding[0]
time_evolution_output[time_index, 1, 1] *= rotating_wave_winding[0]
@jit_host("(float64[:], float64[:], float64[:], float64, float64, complex128[:, :, :])", max_registers)
def get_time_evolution(sweep_parameters, time_coarse, time_end_points, time_step_integration, time_step_output, time_evolution_output):
"""
Find the stepwise time evolution opperator.
Parameters
----------
sweep_parameters : :obj:`numpy.ndarray` of :obj:`numpy.float64`
time_coarse : :class:`numpy.ndarray` of :class:`numpy.float64` (time_index)
A coarse grained list of time samples that the time evolution operator is found for.
In units of s.
This is an output, so use an empty :class:`numpy.ndarray` with :func:`numpy.empty()`, or declare a :class:`numpy.ndarray` using :func:`numba.cuda.device_array_like()`.
time_end_points : :class:`numpy.ndarray` of :class:`numpy.float64` (start time (0) or end time (1))
The time values for when the experiment is to start and finishes.
In units of s.
time_step_integration : :obj:`float`
The time step used within the integration algorithm.
In units of s.
time_step_output : :obj:`float`
The time difference between each element of `time_coarse`.
In units of s.
Determines the sample rate of the outputs `time_coarse` and `time_evolution_output`.
time_evolution_output : :class:`numpy.ndarray` of :class:`numpy.complex128` (time_index, bra_state_index, ket_state_index)
Time evolution operator (matrix) between the current and next timesteps, for each time sampled.
See :math:`U(t)` in :ref:`overview_of_simulation_method`.
This is an output, so use an empty :class:`numpy.ndarray` with :func:`numpy.empty()`, or declare a :class:`numpy.ndarray` using :func:`numba.cuda.device_array_like()`.
"""
if device_index == 0:
for time_index in nb.prange(time_coarse.size):
get_time_evolution_loop(time_index, time_coarse, time_step_output, time_step_integration, time_end_points, time_evolution_output, sweep_parameters)
elif device_index == 1:
# Run calculation for each coarse timestep in parallel
time_index = cuda.grid(1)
if time_index < time_coarse.size:
get_time_evolution_loop(time_index, time_coarse, time_step_output, time_step_integration, time_end_points, time_evolution_output, sweep_parameters)
elif device_index == 2:
# Run calculation for each coarse timestep in parallel
time_index = roc.get_global_id(1)
if time_index < time_coarse.size:
get_time_evolution_loop(time_index, time_coarse, time_step_output, time_step_integration, time_end_points, time_evolution_output, sweep_parameters)
return
@jit_host("(complex128[:, :], float64[:, :])", max_registers = max_registers)
def get_spin(state, spin):
"""
Calculate each expected spin value in parallel.
For spin-half:
.. math::
\\begin{align*}
\\langle F\\rangle(t) = \\begin{pmatrix}
\\Re(\\psi_{+\\frac{1}{2}}(t)\\psi_{-\\frac{1}{2}}(t)^*)\\\\
-\\Im(\\psi_{+\\frac{1}{2}}(t)\\psi_{-\\frac{1}{2}}(t)^*)\\\\
\\frac{1}{2}(|\\psi_{+\\frac{1}{2}}(t)|^2 - |\\psi_{-\\frac{1}{2}}(t)|^2)
\\end{pmatrix}
\\end{align*}
For spin-one:
.. math::
\\begin{align*}
\\langle F\\rangle(t) = \\begin{pmatrix}
\\Re(\\sqrt{2}\\psi_{0}(t)^*(\\psi_{+1}(t) + \\psi_{-1}(t))\\\\
-\\Im(\\sqrt{2}\\psi_{0}(t)^*(\\psi_{+1}(t) - \\psi_{-1}(t))\\\\
|\\psi_{+1}(t)|^2 - |\\psi_{-1}(t)|^2
\\end{pmatrix}
\\end{align*}
Parameters
----------
state : :class:`numpy.ndarray` of :class:`numpy.complex128` (time_index, state_index)
The state (wavefunction) of the spin system in the lab frame, for each time sampled.
See :math:`\\psi(t)` in :ref:`overview_of_simulation_method`.
spin : :class:`numpy.ndarray` of :class:`numpy.float64` (time_index, spatial_index)
The expected value for hyperfine spin of the spin system in the lab frame, for each time sampled.
Units of :math:`\\hbar`.
This is an output, so use an empty :class:`numpy.ndarray` with :func:`numpy.empty()`, or declare a :class:`numpy.ndarray` using :func:`numba.cuda.device_array_like()`.
"""
if device_index == 0:
for time_index in nb.prange(spin.shape[0]):
if dimension == 2:
spin[time_index, 0] = (state[time_index, 0]*conj(state[time_index, 1])).real
spin[time_index, 1] = (1j*state[time_index, 0]*conj(state[time_index, 1])).real
spin[time_index, 2] = 0.5*(state[time_index, 0].real**2 + state[time_index, 0].imag**2 - state[time_index, 1].real**2 - state[time_index, 1].imag**2)
else:
spin[time_index, 0] = (2*conj(state[time_index, 1])*(state[time_index, 0] + state[time_index, 2])/sqrt2).real
spin[time_index, 1] = (2j*conj(state[time_index, 1])*(state[time_index, 0] - state[time_index, 2])/sqrt2).real
spin[time_index, 2] = state[time_index, 0].real**2 + state[time_index, 0].imag**2 - state[time_index, 2].real**2 - state[time_index, 2].imag**2
elif device_index > 0:
if device_index == 1:
time_index = cuda.grid(1)
elif device_index == 1:
time_index = roc.get_global_id(1)
if time_index < spin.shape[0]:
if dimension == 2:
spin[time_index, 0] = (state[time_index, 0]*conj(state[time_index, 1])).real
spin[time_index, 1] = (1j*state[time_index, 0]*conj(state[time_index, 1])).real
spin[time_index, 2] = 0.5*(state[time_index, 0].real**2 + state[time_index, 0].imag**2 - state[time_index, 1].real**2 - state[time_index, 1].imag**2)
else:
spin[time_index, 0] = (2*conj(state[time_index, 1])*(state[time_index, 0] + state[time_index, 2])/sqrt2).real
spin[time_index, 1] = (2j*conj(state[time_index, 1])*(state[time_index, 0] - state[time_index, 2])/sqrt2).real
spin[time_index, 2] = state[time_index, 0].real**2 + state[time_index, 0].imag**2 - state[time_index, 2].real**2 - state[time_index, 2].imag**2
return
def spin_calculator(state):
"""
Calculates the expected spin projection (Bloch vector) over time for a given time series of a quantum state.
Parameters
----------
state : :obj:`numpy.ndarray` of :obj:`numpy.complex128` (time_index, magnetic_quantum_number)
The quantum state of the spin system over time, written in terms of the eigenstates of the spin projection operator in the z direction.
Returns
-------
spin : :obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index, spatial_direction)
The expected spin projection (Bloch vector) over time.
"""
if device.index == 0:
spin = np.empty((state.shape[0], 3), np.float64)
get_spin(state, spin)
elif device == Device.CUDA:
spin = cuda.device_array((state.shape[0], 3), np.float64)
blocks_per_grid = (state.shape[0] + (threads_per_block - 1)) // threads_per_block
get_spin[blocks_per_grid, threads_per_block](cuda.to_device(state), spin)
spin = spin.copy_to_host()
elif device == Device.ROC:
spin = roc.device_array((state.shape[0], 3), np.float64)
blocks_per_grid = (state.shape[0] + (threads_per_block - 1)) // threads_per_block
get_spin[blocks_per_grid, threads_per_block](roc.to_device(state), spin)
spin = spin.copy_to_host()
return spin
self.get_time_evolution = get_time_evolution
self.spin_calculator = spin_calculator
def evaluate(self, time_start:np.float64, time_end:np.float64, time_step_integration:np.float64, time_step_output:np.float64, state_init:np.ndarray, sweep_parameters:np.ndarray = [0]) -> Results:
"""
Integrates the time dependent Schroedinger equation and returns the quantum state of the spin system over time.
Parameters
----------
sweep_parameters : :obj:`numpy.ndarray` of :obj:`numpy.float64`
The input to the :obj:`get_field()` function supplied by the user.
Modifies the field function so the integrator can be used for many experiments, without the need for slow recompilation.
For example, if the `sweep_parameters` is used to define the bias field strength in :obj:`get_field()`, then one can run many simulations, sweeping through bias values, by calling this method multiple times, each time varying `sweep_parameters`.
time_start : :obj:`float`
The time offset that the experiment is to start at.
Measured in s.
time_end : :obj:`float`
The time that the experiment is to finish at.
Measured in s.
The duration of the experiment is `time_end - time_start`.
time_step_integration : :obj:`float`
The integration time step.
Measured in s.
time_step_output : :obj:`float`
The sample resolution of the output timeseries for the state.
Must be a whole number multiple of `time_step_integration`.
Measured in s.
state_init : :obj:`numpy.ndarray` of :obj:`numpy.complex128` (magnetic_quantum_number)
The initial quantum state of the spin system, written in terms of the eigenstates of the spin projection operator in the z direction.
Returns
-------
results : :obj:`Results`
An object containing the results of the simulation.
"""
time_step_integration_old = time_step_integration
time_step_integration = time_step_output/round(max(time_step_output/time_step_integration, 1))
if math.fabs(time_step_output/time_step_integration_old - round(time_step_output/time_step_integration_old)) > 1e-6:
print(f"\033[33mspinsim warning!!!\ntime_step_output ({time_step_output:8.4e}) not an integer multiple of time_step_integration ({time_step_integration_old:8.4e}). Resetting time_step_integration to {time_step_integration:8.4e}.\033[0m\n")
time_end_points = np.asarray([time_start, time_end], np.float64)
state_init = np.asarray(state_init, np.complex128)
sweep_parameters = np.asarray(sweep_parameters, np.float64)
time_index_max = int((time_end_points[1] - time_end_points[0])/time_step_output)
if self.device.index == 0:
if self.device == Device.CPU:
if self.number_of_threads:
old_threads = nb.get_num_threads()
nb.set_num_threads(self.number_of_threads)
time = np.empty(time_index_max, np.float64)
time_evolution_output = np.empty((time_index_max, self.spin_quantum_number.dimension, self.spin_quantum_number.dimension), np.complex128)
self.get_time_evolution(sweep_parameters, time, time_end_points, time_step_integration, time_step_output, time_evolution_output)
if self.device == Device.CPU:
if self.number_of_threads:
nb.set_num_threads(old_threads)
elif self.device == Device.CUDA:
try:
time = cuda.device_array(time_index_max, np.float64)
time_evolution_output = cuda.device_array((time_index_max, self.spin_quantum_number.dimension, self.spin_quantum_number.dimension), np.complex128)
sweep_parameters_device = cuda.to_device(sweep_parameters)
blocks_per_grid = (time.size + (self.threads_per_block - 1)) // self.threads_per_block
self.get_time_evolution[blocks_per_grid, self.threads_per_block](sweep_parameters_device, time, time_end_points, time_step_integration, time_step_output, time_evolution_output)
except:
print("\033[31mspinsim error!!!\nnumba.cuda could not jit get_field() function into a cuda device function.\033[0m\n")
raise
time_evolution_output = time_evolution_output.copy_to_host()
time = time.copy_to_host()
elif self.device == Device.ROC:
try:
time = roc.device_array(time_index_max, np.float64)
time_evolution_output = roc.device_array((time_index_max, self.spin_quantum_number.dimension, self.spin_quantum_number.dimension), np.complex128)
sweep_parameters_device = roc.to_device(sweep_parameters)
blocks_per_grid = (time.size + (self.threads_per_block - 1)) // self.threads_per_block
self.get_time_evolution[blocks_per_grid, self.threads_per_block](sweep_parameters_device, time, time_end_points, time_step_integration, time_step_output, time_evolution_output)
except:
print("\033[31mspinsim error!!!\nnumba.roc could not jit get_field() function into a roc device function.\033[0m\n")
raise
time_evolution_output = time_evolution_output.copy_to_host()
time = time.copy_to_host()
state = np.empty((time_index_max, self.spin_quantum_number.dimension), np.complex128)
self.get_state(state_init, state, time_evolution_output)
results = Results(time, time_evolution_output, state, self.spin_calculator)
return results
@staticmethod
@nb.njit
def get_state(state_init:np.ndarray, state:np.ndarray, time_evolution:np.ndarray):
"""
Use the stepwise time evolution operators in succession to find the quantum state timeseries of the 3 level atom.
Parameters
----------
state_init : :class:`numpy.ndarray` of :class:`numpy.complex128`
The state (spin wavefunction) of the system at the start of the simulation.
state : :class:`numpy.ndarray` of :class:`numpy.complex128` (time_index, state_index)
The state (wavefunction) of the spin system in the lab frame, for each time sampled.
time_evolution : :class:`numpy.ndarray` of :class:`numpy.complex128` (time_index, bra_state_index, ket_state_index)
The evaluated time evolution operator between each time step.
See :ref:`architecture` for some information.
"""
for time_index in range(state.shape[0]):
# State = time evolution * previous state
for x_index in nb.prange(state.shape[1]):
state[time_index, x_index] = 0
if time_index > 0:
for z_index in range(state.shape[1]):
state[time_index, x_index] += time_evolution[time_index - 1, x_index, z_index]*state[time_index - 1, z_index]
else:
state[time_index, x_index] += state_init[x_index]
class Utilities:
"""
A on object that contains definitions of all of the device functions (functions compiled for use on the target device) used in the integrator.
These device functions are compiled for the chosen target device on construction of the object.
Attributes
----------
conj(z) : :obj:`callable`
Conjugate of a complex number.
.. math::
\\begin{align*}
(a + ib)^* &= a - ib\\\\
a, b &\\in \\mathbb{R}
\\end{align*}
Parameters:
* **z** (:class:`numpy.complex128`) - The complex number to take the conjugate of.
Returns
* **cz** (:class:`numpy.complex128`) - The conjugate of z.
expm1i(b) : :obj:`callable`
For real input :math:`b`, returns :math:`\\exp(ib) - 1`, while avoiding floating point cancellation errors.
Parameters:
* **b** (:class:`numpy.float64`) - The imaginary component to exponentiate.
Returns
* **em1i** (:class:`numpy.complex128`) - The evalauted output.
cos_exp_m1(a, b) : :obj:`callable`
For real input :math:`a`, :math:`b`, returns :math:`\\cos(a)\\exp(ib) - 1`, while avoiding floating point cancellation errors.
Parameters:
* **a** (:class:`numpy.float64`) - The real component to take the cosine of.
* **b** (:class:`numpy.float64`) - The imaginary component to exponentiate.
Returns
* **cem1** (:class:`numpy.complex128`) - The evalauted output.
cos_m1(a, b) : :obj:`callable`
For real input :math:`a`, returns :math:`\\cos(a) - 1`, while avoiding floating point cancellation errors.
Parameters:
* **a** (:class:`numpy.float64`) - The real component to take the cosine of.
Returns
* **cm1** (:class:`numpy.complex128`) - The evalauted output.
set_to(operator, result) : :obj:`callable`
Copy the contents of one matrix into another.
.. math::
(A)_{i, j} = (B)_{i, j}
Parameters:
* **operator** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix to copy from.
* **result** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix to copy to.
set_to_one(operator) : :obj:`callable`
Make a matrix the multiplicative identity, ie, :math:`1`.
.. math::
\\begin{align*}
(A)_{i, j} &= \\delta_{i, j}\\\\
&= \\begin{cases}
1,&i = j\\\\
0,&i\\neq j
\\end{cases}
\\end{align*}
Parameters:
* **operator** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix to set to :math:`1`.
set_to_zero(operator) : :obj:`callable`
Make a matrix the zero matrix.
.. math::
\\begin{align*}
(A)_{i, j} &= \\0
\\end{align*}
Parameters:
* **operator** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix to set to :math:`0`.
matrix_multiply(left, right, result) : :obj:`callable`
Multiply matrices left and right together, to be returned in result.
.. math::
\\begin{align*}
(LR)_{i,k} = \\sum_j (L)_{i,j} (R)_{j,k}
\\end{align*}
Parameters:
* **left** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix to left multiply by.
* **right** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix to right multiply by.
* **result** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - A matrix to be filled with the result of the product.
matrix_square_m1(operator, result) : :obj:`callable`
For matrix :math:`A = 1 + a` :math:`S = A^2 = 1 + s`.
Here the input is the residuals :math:`a`, and the output is :math:`s`.
This is a way to evaluate :math:`s` without floating point cancellation error.
Specifically,
.. math::
\\begin{align*}
s &= S - 1\\\\
&= A^2 - 1\\\\
&= (2\\cdot 1 + a)a
\\end{align*}
Parameters:
* **operator** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The residual of the matrix to square.
* **result** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - A matrix to be filled with the residual of the result of the product.
matrix_multiply_m1(left, right, result) : :obj:`callable`
For matrices :math:`L = 1 + l` and :math:`R = 1 + r`, evaluates :math:`O = LR = 1 + o`.
Here the inputs are the residuals :math:`l` and :math:`r`, and the output is :math:`o`.
This is a way to evaluate :math:`o` without floating point cancellation error.
Specifically,
.. math::
\\begin{align*}
o &= O - 1\\\\
&= LR - 1\\\\
&= l + r + lr
\\end{align*}
Parameters:
* **left** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The residual of the matrix to left multiply by.
* **right** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The residual of the matrix to right multiply by.
* **result** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - A matrix to be filled with the residual of the result of the product.
matrix_exponential_analytic(field_sample, result) : :obj:`callable`
Calculates a :math:`\\mathfrak{su}(2)` matrix exponential based on its analytic form.
.. warning::
Only available for use with spin-half systems.
Will not work with spin-one systems.
Assumes the exponent is an imaginary linear combination of :math:`\\mathfrak{su}(2)`, being,
.. math::
\\begin{align*}
A &= -i(\\omega_x J_x + \\omega_y J_y + \\omega_z J_z),
\\end{align*}
with
.. math::
\\begin{align*}
J_x &= \\frac{1}{2}\\begin{pmatrix}
0 & 1 \\\\
1 & 0
\\end{pmatrix},&
J_y &= \\frac{1}{2}\\begin{pmatrix}
0 & -i \\\\
i & 0
\\end{pmatrix},&
J_z &= \\frac{1}{2}\\begin{pmatrix}
1 & 0 \\\\
0 & -1
\\end{pmatrix}
\\end{align*}
Then the exponential can be calculated as
.. math::
\\begin{align*}
\\exp(A) &= \\exp(-i\\omega_x J_x - i\\omega_y J_y - i\\omega_z J_z)\\\\
&= \\begin{pmatrix}
\\cos(\\frac{\\omega_r}{2}) - i\\frac{\\omega_z}{\\omega_r}\\sin(\\frac{\\omega_r}{2}) & -\\frac{\\omega_y + i\\omega_x}{\\omega_r}\\sin(\\frac{\\omega_r}{2})\\\\
\\frac{\\omega_y - i\\omega_x}{\\omega_r}\\sin(\\frac{\\omega_r}{2}) & \\cos(\\frac{\\omega_r}{2}) + i\\frac{\\omega_z}{\\omega_r}\\sin(\\frac{\\omega_r}{2})
\\end{pmatrix}
\\end{align*}
with :math:`\\omega_r = \\sqrt{\\omega_x^2 + \\omega_y^2 + \\omega_z^2}`.
Parameters:
* **field_sample** (:class:`numpy.ndarray` of :class:`numpy.float64`, (y_index, x_index)) - The values of :math:`\\omega_x`, :math:`\\omega_y` and :math:`\\omega_z` respectively, as described above.
* **result** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix which the result of the exponentiation is to be written to.
matrix_exponential_lie_trotter(field_sample, result) : :obj:`callable`
Calculates a matrix exponential based on the Lie Product Formula,
.. math::
\\exp(A + B) = \\lim_{c \\to \\infty} \\left(\\exp\\left(\\frac{1}{c}A\\right) \\exp\\left(\\frac{1}{c}B\\right)\\right)^c.
**For spin-half systems:**
Assumes the exponent is an imaginary linear combination of a subspace of :math:`\\mathfrak{su}(2)`, being,
.. math::
\\begin{align*}
A &= -i(\\omega_x J_x + \\omega_y J_y + \\omega_z J_z),
\\end{align*}
with
.. math::
\\begin{align*}
J_x &= \\frac{1}{2}\\begin{pmatrix}
0 & 1 \\\\
1 & 0
\\end{pmatrix},&
J_y &= \\frac{1}{2}\\begin{pmatrix}
0 & -i \\\\
i & 0
\\end{pmatrix},&
J_z &= \\frac{1}{2}\\begin{pmatrix}
1 & 0 \\\\
0 & -1
\\end{pmatrix}
\\end{align*}
Then the exponential can be approximated as, for large :math:`\\tau`,
.. math::
\\begin{align*}
\\exp(A) =& \\exp\\left(-i\\omega_x J_x - i\\omega_y J_y - i\\omega_z J_z\\right)\\\\
=& \\exp\\left(2^{-\\tau}\\left(-i\\omega_x J_x - i\\omega_y J_y - i\\omega_z J_z\\right)\\right)^{2^\\tau}\\\\
\\approx& \\biggl(\\exp\\left(-i\\frac12 2^{-\\tau} \\omega_z J_z\\right)\\exp\\left(-i\\left(2^{-\\tau} \\omega_\\phi J_\\phi\\right)\\right)\\exp\\left(-i\\frac12 2^{-\\tau} \\omega_z J_z\\right)\\biggr)^{2^\\tau}\\\\
=& \\begin{pmatrix}
\\cos\\left(\\frac{\\Phi}{2}\\right)e^{-iz} & -i\\sin\\left(\\frac{\\Phi}{2}\\right) e^{i\\phi}\\\\
-i\\sin\\left(\\frac{\\Phi}{2}\\right) e^{-i\\phi} & \\cos\\left(\\frac{\\Phi}{2}\\right)e^{iz}
\\end{pmatrix}^{2^\\tau}\\\\
=& T^{2^\\tau}.
\\end{align*}
Here :math:`z = 2^{-\\tau}\\frac{\\omega_z}{2}`, :math:`\\Phi = 2^{-\\tau}\\sqrt{\\omega_x^2 + \\omega_y^2}`, and :math:`\\phi = \\mathrm{atan}2(\\omega_y, \\omega_x)`.
**For spin-one systems**
Assumes the exponent is an imaginary linear combination of a subspace of :math:`\\mathfrak{su}(3)`, being,
.. math::
\\begin{align*}
A &= -i(\\omega_x J_x + \\omega_y J_y + \\omega_z J_z + \\omega_q Q),
\\end{align*}
with
.. math::
\\begin{align*}
J_x &= \\frac{1}{\\sqrt{2}}\\begin{pmatrix}
0 & 1 & 0 \\\\
1 & 0 & 1 \\\\
0 & 1 & 0
\\end{pmatrix},&
J_y &= \\frac{1}{\\sqrt{2}}\\begin{pmatrix}
0 & -i & 0 \\\\
i & 0 & -i \\\\
0 & i & 0
\\end{pmatrix},\\\\
J_z &= \\begin{pmatrix}
1 & 0 & 0 \\\\
0 & 0 & 0 \\\\
0 & 0 & -1
\\end{pmatrix},&
Q &= \\frac{1}{3}\\begin{pmatrix}
1 & 0 & 0 \\\\
0 & -2 & 0 \\\\
0 & 0 & 1
\\end{pmatrix}
\\end{align*}
Then the exponential can be approximated as, for large :math:`\\tau`,
.. math::
\\begin{align*}
\\exp(A) =& \\exp\\left(-i\\omega_x J_x - i\\omega_y J_y - i\\omega_z J_z - i\\omega_q Q\\right)\\\\
=& \\exp\\left(2^{-\\tau}\\left(-i\\omega_x J_x - i\\omega_y J_y - i\\omega_z J_z - i\\omega_q Q\\right)\\right)^{2^\\tau}\\\\
\\approx& \\biggl(\\exp\\left(-i\\frac12\\left(2^{-\\tau} \\omega_z J_z + 2^{-\\tau}\\omega_q Q\\right)\\right)\\nonumber\\\\
&\\cdot\\exp\\left(-i\\left(2^{-\\tau} \\omega_\\phi J_\\phi\\right)\\right)\\nonumber\\\\
&\\cdot\\exp\\left(-i\\frac12\\left(2^{-\\tau} \\omega_z J_z + 2^{-\\tau} \\omega_q Q\\right)\\right)\\biggr)^{2^\\tau}\\\\
=& \\begin{pmatrix}
\\left(\\cos\\left(\\frac{\\Phi}{2}\\right) e^{-iz}e^{-iq}\\right)^2 & \\frac{-i}{\\sqrt{2}} \\sin(\\Phi)e^{iq}e^{-iz}e^{-i\\phi} & -\\left(\\sin\\left(\\frac{\\Phi}{2}\\right)e^{iq}e^{-i\\phi}\\right)^2\\\\
\\frac{-i}{\\sqrt{2}} \\sin(\\Phi)e^{iq}e^{-iz}e^{i\\phi} & \\cos(\\Phi)e^{i4q} & \\frac{-i}{\\sqrt{2}} \\sin(\\Phi)e^{iq}e^{iz}e^{-i\\phi}\\\\
-\\left(\\sin\\left(\\frac{\\Phi}{2}\\right)e^{-iq}e^{i\\phi}\\right)^2 & \\frac{-i}{\\sqrt{2}} \\sin(\\Phi)e^{iq}e^{iz}e^{i\\phi} & \\left(\\cos\\left(\\frac{\\Phi}{2}\\right) e^{iz}e^{-iq}\\right)^2
\\end{pmatrix}^{2^\\tau}.\\\\
\\end{align*}
Here :math:`z = 2^{-\\tau}\\frac{\\omega_z}{2}`, :math:`q = 2^{-\\tau}\\frac{\\omega_q}{6}`, :math:`\\Phi = 2^{-\\tau}\\sqrt{\\omega_x^2 + \\omega_y^2}`, and :math:`\\phi = \\mathrm{atan}2(\\omega_y, \\omega_x)`.
Once :math:`T` is calculated, it is then recursively squared :math:`\\tau` times to obtain :math:`\\exp(A)`.
Parameters:
* **field_sample** (:class:`numpy.ndarray` of :class:`numpy.float64`, (y_index, x_index)) - The values of x, y and z (and q for spin-one) respectively, as described above.
* **result** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix which the result of the exponentiation is to be written to.
* **number_of_squares** (:obj:`int`) - The number of squares to make to the approximate matrix (:math:`\\tau` above).
matrix_exponential_lie_trotter_8(field_sample, result) : :obj:`callable`
Calculates a matrix exponential based on the Lie Product Formula,
.. math::
\\exp(A + B) = \\lim_{c \\to \\infty} \\left(\\exp\\left(\\frac{1}{c}A\\right) \\exp\\left(\\frac{1}{c}B\\right)\\right)^c.
.. warning::
Only available for use with spin-one systems.
Will not work with spin-half systems.
Assumes the exponent is an imaginary linear combination elements of :math:`\\mathfrak{su}(3)`, being,
.. math::
\\begin{align*}
A &= -i(\\omega_x J_x + \\omega_y J_y + \\omega_z J_z + \\omega_q Q + \\omega_{u1} U_1 + \\omega_{u2} U_2 + \\omega_{v1} V_1 + \\omega_{v2} V_2),
\\end{align*}
with
.. math::
\\begin{align*}
J_x &= \\frac{1}{\\sqrt{2}}\\begin{pmatrix}
0 & 1 & 0 \\\\
1 & 0 & 1 \\\\
0 & 1 & 0
\\end{pmatrix},&
J_y &= \\frac{1}{\\sqrt{2}}\\begin{pmatrix}
0 & -i & 0 \\\\
i & 0 & -i \\\\
0 & i & 0
\\end{pmatrix},\\\\
J_z &= \\begin{pmatrix}
1 & 0 & 0 \\\\
0 & 0 & 0 \\\\
0 & 0 & -1
\\end{pmatrix},&
Q &= \\frac{1}{3}\\begin{pmatrix}
1 & 0 & 0 \\\\
0 & -2 & 0 \\\\
0 & 0 & 1
\\end{pmatrix},\\\\
U_1 &= \\begin{pmatrix}
0 & 0 & 1 \\\\
0 & 0 & 0 \\\\
1 & 0 & 0
\\end{pmatrix},&
U_2 &= \\begin{pmatrix}
0 & 0 & -i \\\\
0 & 0 & 0 \\\\
i & 0 & 0
\\end{pmatrix},\\\\
V_1 &= \\frac{1}{\\sqrt{2}}\\begin{pmatrix}
0 & 1 & 0 \\\\
1 & 0 & -1 \\\\
0 & -1 & 0
\\end{pmatrix},&
V_2 &= \\frac{1}{\\sqrt{2}}\\begin{pmatrix}
0 & -i & 0 \\\\
i & 0 & i \\\\
0 & -i & 0
\\end{pmatrix}.\\\\
\\end{align*}
Then the exponential can be approximated as, for large :math:`\\tau`,
.. math::
\\begin{align*}
\\exp(A) =& \\exp\\biggl(-i\\omega_x J_x - i\\omega_y J_y - i\\omega_z J_z - i\\omega_q Q\\\\
&- i\\omega_{u1} U_1 - i\\omega_{u2} U_2 - i\\omega_{v1} V_1 - i\\omega_{v2} V_2\\biggr)\\\\
& \\exp\\biggl(2^{-\\tau}\\biggl(-i\\omega_x J_x - i\\omega_y J_y - i\\omega_z J_z - i\\omega_q Q\\\\
&- i\\omega_{u1} U_1 - i\\omega_{u2} U_2 - i\\omega_{v1} V_1 - i\\omega_{v2} V_2\\biggr)\\biggr)^{2^\\tau}\\\\
\\approx& \\biggl(\\exp\\left(-i2^{-\\tau} \\omega_\\phi J_\\phi\\right)\\exp\\left(-i2^{-\\tau} \\omega_{u\\phi} U_{u\\phi}\\right)\\\\
&\\cdot\\exp\\left(-i2^{-\\tau} \\omega_{v\\phi} V_{v\\phi}\\right)\\exp\\left(-i2^{-\\tau} \\omega_z J_z -i2^{-\\tau} \\omega_q Q \\right)\\biggr)^{2^\\tau}\\\\
=& \\biggl(\\begin{pmatrix}
\\cos^2\\left(\\frac{\\Phi}{2}\\right) & \\frac{-i}{\\sqrt{2}} \\sin(\\Phi)e^{-i\\phi} & -\\left(\\sin\\left(\\frac{\\Phi}{2}\\right)e^{-i\\phi}\\right)^2\\\\
\\frac{-i}{\\sqrt{2}} \\sin(\\Phi)e^{i\\phi} & \\cos\\left(\\Phi\\right) & \\frac{-i}{\\sqrt{2}} \\sin(\\Phi)e^{-i\\phi}\\\\
-\\left(\\sin\\left(\\frac{\\Phi}{2}\\right)e^{i\\phi}\\right)^2 & \\frac{-i}{\\sqrt{2}} \\sin(\\Phi)e^{i\\phi} & \\cos^2\\left(\\frac{\\Phi}{2}\\right)
\\end{pmatrix}\\\\
&\\cdot \\begin{pmatrix}
\\cos\\left(\\Phi_u\\right) & 0 & -i \\sin\\left(\\Phi_u\\right)e^{-i\\phi_u}\\\\
0 & 1 & 0\\\\
-i \\sin\\left(\\Phi_u\\right)e^{i\\phi_u} & 0 & \\cos\\left(\\Phi_u\\right)
\\end{pmatrix}\\\\
&\\cdot \\begin{pmatrix}
\\cos^2\\left(\\frac{\\Phi_v}{2}\\right) & \\frac{-i}{\\sqrt{2}} \\sin(\\Phi_v)e^{-i\\phi_v} & \\left(\\sin\\left(\\frac{\\Phi_v}{2}\\right)e^{-i\\phi_v}\\right)^2\\\\
\\frac{-i}{\\sqrt{2}} \\sin(\\Phi_v)e^{i\\phi_v} & \\cos\\left(\\Phi_v\\right) & \\frac{i}{\\sqrt{2}} \\sin(\\Phi_v)e^{-i\\phi_v}\\\\
\\left(\\sin\\left(\\frac{\\Phi_v}{2}\\right)e^{i\\phi_v}\\right)^2 & \\frac{i}{\\sqrt{2}} \\sin(\\Phi_v)e^{i\\phi_v} & \\cos^2\\left(\\frac{\\Phi_v}{2}\\right)
\\end{pmatrix}\\\\
&\\cdot \\begin{pmatrix}
e^{-iz - iq} & 0 & 0\\\\
0 & e^{i2q} & 0\\\\
0 & 0 & e^{iz - iq}
\\end{pmatrix}\\biggr)^{2^\\tau}\\\\
=& T^{2^\\tau}.
\\end{align*}
Here :math:`z = 2^{-\\tau}\\frac{\\omega_z}{2}`, :math:`q = 2^{-\\tau}\\frac{\\omega_q}{6}`, :math:`\\Phi = 2^{-\\tau}\\sqrt{\\omega_x^2 + \\omega_y^2}`, :math:`\\phi = \\mathrm{atan}2(\\omega_y, \\omega_x)`, :math:`\\Phi_u = 2^{-\\tau}\\sqrt{\\omega_{u1}^2 + \\omega_{u2}^2}`, :math:`\\phi_u = \\mathrm{atan}2(\\omega_{u1}, \\omega_{u2})`, :math:`\\Phi_v = 2^{-\\tau}\\sqrt{\\omega_{v1}^2 + \\omega_{v2}^2}`, and :math:`\\phi_v = \\mathrm{atan}2(\\omega_{v1}, \\omega_{v2})`.
Once :math:`T` is calculated, it is then recursively squared :math:`\\tau` times to obtain :math:`\\exp(A)`.
Parameters:
* **field_sample** (:class:`numpy.ndarray` of :class:`numpy.float64`, (y_index, x_index)) - The values of x, y, z, q, u1, u2, v1 and v2 respectively, as described above.
* **result** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix which the result of the exponentiation is to be written to.
* **number_of_squares** (:obj:`int`) - The number of squares to make to the approximate matrix (:math:`\\tau` above).
"""
def __init__(self, spin_quantum_number:SpinQuantumNumber, device:Device, threads_per_block:int, number_of_squares:int):
"""
Parameters
----------
spin_quantum_number : :obj:`SpinQuantumNumber`
The option to select whether the simulator will integrate a spin-half :obj:`SpinQuantumNumber.HALF`, or spin-one :obj:`SpinQuantumNumber.ONE` quantum system.
device : :obj:`Device`
The option to select which device will be targeted for integration.
That is, whether the integrator is compiled for a CPU or GPU.
Defaults to :obj:`Device.CUDA` if the system it is being run on is Nvidia Cuda compatible, and defaults to :obj:`Device.CPU` otherwise.
See :obj:`Device` for all options and more details.
threads_per_block : :obj:`int`
The size of each thread block (workgroup), in terms of the number of threads (workitems) they each contain, when running on the GPU target devices :obj:`Device.CUDA` (:obj:`Device.ROC`).
Defaults to 64.
Modifying might be able to increase execution time for different GPU models.
"""
jit_device = device.jit_device
device_index = device.index
number_of_hypercubes = math.ceil(number_of_squares/2)
if number_of_hypercubes < 0:
number_of_hypercubes = 0
trotter_precision = 4**number_of_hypercubes
@jit_device
def conj(z):
return (z.real - 1j*z.imag)
@jit_device
def expm1i(i):
return -2*(math.sin(i/2)**2) + 1j*math.sin(i)
@jit_device
def cos_exp_m1(c, e):
return (expm1i(c + e) + expm1i(-c + e))/2
@jit_device
def cos_m1(t):
return -2*(math.sin(t/2)**2)
if spin_quantum_number == SpinQuantumNumber.HALF:
@jit_device
def set_to(operator, result):
result[0, 0] = operator[0, 0]
result[1, 0] = operator[1, 0]
result[0, 1] = operator[0, 1]
result[1, 1] = operator[1, 1]
@jit_device
def set_to_one(operator):
operator[0, 0] = 1
operator[1, 0] = 0
operator[0, 1] = 0
operator[1, 1] = 1
@jit_device
def set_to_zero(operator):
operator[0, 0] = 0
operator[1, 0] = 0
operator[0, 1] = 0
operator[1, 1] = 0
@jit_device
def matrix_multiply(left, right, result):
result[0, 0] = left[0, 0]*right[0, 0] + left[0, 1]*right[1, 0]
result[1, 0] = left[1, 0]*right[0, 0] + left[1, 1]*right[1, 0]
result[0, 1] = left[0, 0]*right[0, 1] + left[0, 1]*right[1, 1]
result[1, 1] = left[1, 0]*right[0, 1] + left[1, 1]*right[1, 1]
@jit_device
def matrix_square_m1(operator, result):
result[0, 0] = (2 + operator[0, 0])*operator[0, 0] + operator[0, 1]*operator[1, 0]
result[1, 0] = operator[1, 0]*operator[0, 0] + (2 + operator[1, 1])*operator[1, 0]
result[0, 1] = (2 + operator[0, 0])*operator[0, 1] + operator[0, 1]*operator[1, 1]
result[1, 1] = operator[1, 0]*operator[0, 1] + (2 + operator[1, 1])*operator[1, 1]
@jit_device
def matrix_multiply_m1(left, right, result):
result[0, 0] = (left[0, 0] + right[0, 0]) + (left[0, 0]*right[0, 0] + left[0, 1]*right[1, 0])
result[1, 0] = (left[1, 0] + right[1, 0]) + (left[1, 0]*right[0, 0] + left[1, 1]*right[1, 0])
result[0, 1] = (left[0, 1] + right[0, 1]) + (left[0, 0]*right[0, 1] + left[0, 1]*right[1, 1])
result[1, 1] = (left[1, 1] + right[1, 1]) + (left[1, 0]*right[0, 1] + left[1, 1]*right[1, 1])
@jit_device
def matrix_exponential_analytic(field_sample, result):
x = field_sample[0]
y = field_sample[1]
z = field_sample[2]
r = math.sqrt(x**2 + y**2 + z**2)
if r > 0:
x /= r
y /= r
z /= r
c = cos_exp_m1(r/2, 0)
s = math.sin(r/2)
result[0, 0] = c - 1j*z*s + 1
result[1, 0] = (y - 1j*x)*s
result[0, 1] = -(y + 1j*x)*s
result[1, 1] = c + 1j*z*s + 1
else:
result[0, 0] = 1
result[1, 0] = 0
result[0, 1] = 0
result[1, 1] = 1
@jit_device
def matrix_exponential_lie_trotter(field_sample, result):
a = math.sqrt(field_sample[0]*field_sample[0] + field_sample[1]*field_sample[1])
if a > 0:
ep = (field_sample[0] + 1j*field_sample[1])/a
else:
ep = 1
a = a/trotter_precision
Sa = -1j*math.sin(a/2)
z = field_sample[2]/(2*trotter_precision)
result[0, 0] = cos_exp_m1(a/2, -z)
result[1, 0] = Sa*ep
result[0, 1] = Sa/ep
result[1, 1] = cos_exp_m1(a/2, z)
if device_index == 0:
temporary = np.empty((2, 2), dtype = np.complex128)
elif device_index == 1:
temporary = cuda.local.array((2, 2), dtype = np.complex128)
elif device_index == 2:
temporary_group = roc.shared.array((threads_per_block, 2, 2), dtype = np.complex128)
temporary = temporary_group[roc.get_local_id(1), :, :]
for power_index in range(number_of_hypercubes):
matrix_square_m1(result, temporary)
matrix_square_m1(temporary, result)
result[0, 0] += 1
result[1, 1] += 1
def matrix_exponential_lie_trotter_8(field_sample, result):
pass
else:
@jit_device
def set_to(operator, result):
result[0, 0] = operator[0, 0]
result[1, 0] = operator[1, 0]
result[2, 0] = operator[2, 0]
result[0, 1] = operator[0, 1]
result[1, 1] = operator[1, 1]
result[2, 1] = operator[2, 1]
result[0, 2] = operator[0, 2]
result[1, 2] = operator[1, 2]
result[2, 2] = operator[2, 2]
@jit_device
def set_to_one(operator):
operator[0, 0] = 1
operator[1, 0] = 0
operator[2, 0] = 0
operator[0, 1] = 0
operator[1, 1] = 1
operator[2, 1] = 0
operator[0, 2] = 0
operator[1, 2] = 0
operator[2, 2] = 1
@jit_device
def set_to_zero(operator):
operator[0, 0] = 0
operator[1, 0] = 0
operator[2, 0] = 0
operator[0, 1] = 0
operator[1, 1] = 0
operator[2, 1] = 0
operator[0, 2] = 0
operator[1, 2] = 0
operator[2, 2] = 0
@jit_device
def matrix_multiply(left, right, result):
result[0, 0] = left[0, 0]*right[0, 0] + left[0, 1]*right[1, 0] + left[0, 2]*right[2, 0]
result[1, 0] = left[1, 0]*right[0, 0] + left[1, 1]*right[1, 0] + left[1, 2]*right[2, 0]
result[2, 0] = left[2, 0]*right[0, 0] + left[2, 1]*right[1, 0] + left[2, 2]*right[2, 0]
result[0, 1] = left[0, 0]*right[0, 1] + left[0, 1]*right[1, 1] + left[0, 2]*right[2, 1]
result[1, 1] = left[1, 0]*right[0, 1] + left[1, 1]*right[1, 1] + left[1, 2]*right[2, 1]
result[2, 1] = left[2, 0]*right[0, 1] + left[2, 1]*right[1, 1] + left[2, 2]*right[2, 1]
result[0, 2] = left[0, 0]*right[0, 2] + left[0, 1]*right[1, 2] + left[0, 2]*right[2, 2]
result[1, 2] = left[1, 0]*right[0, 2] + left[1, 1]*right[1, 2] + left[1, 2]*right[2, 2]
result[2, 2] = left[2, 0]*right[0, 2] + left[2, 1]*right[1, 2] + left[2, 2]*right[2, 2]
@jit_device
def matrix_square_m1(operator, result):
result[0, 0] = (2 + operator[0, 0])*operator[0, 0] + operator[0, 1]*operator[1, 0] + operator[0, 2]*operator[2, 0]
result[1, 0] = operator[1, 0]*operator[0, 0] + (2 + operator[1, 1])*operator[1, 0] + operator[1, 2]*operator[2, 0]
result[2, 0] = operator[2, 0]*operator[0, 0] + operator[2, 1]*operator[1, 0] + (2 + operator[2, 2])*operator[2, 0]
result[0, 1] = (2 + operator[0, 0])*operator[0, 1] + operator[0, 1]*operator[1, 1] + operator[0, 2]*operator[2, 1]
result[1, 1] = operator[1, 0]*operator[0, 1] + (2 + operator[1, 1])*operator[1, 1] + operator[1, 2]*operator[2, 1]
result[2, 1] = operator[2, 0]*operator[0, 1] + operator[2, 1]*operator[1, 1] + (2 + operator[2, 2])*operator[2, 1]
result[0, 2] = (2 + operator[0, 0])*operator[0, 2] + operator[0, 1]*operator[1, 2] + operator[0, 2]*operator[2, 2]
result[1, 2] = operator[1, 0]*operator[0, 2] + (2 + operator[1, 1])*operator[1, 2] + operator[1, 2]*operator[2, 2]
result[2, 2] = operator[2, 0]*operator[0, 2] + operator[2, 1]*operator[1, 2] + (2 + operator[2, 2])*operator[2, 2]
@jit_device
def matrix_multiply_m1(left, right, result):
result[0, 0] = (left[0, 0] + right[0, 0]) + (left[0, 0]*right[0, 0] + left[0, 1]*right[1, 0] + left[0, 2]*right[2, 0])
result[1, 0] = (left[1, 0] + right[1, 0]) + (left[1, 0]*right[0, 0] + left[1, 1]*right[1, 0] + left[1, 2]*right[2, 0])
result[2, 0] = (left[2, 0] + right[2, 0]) + (left[2, 0]*right[0, 0] + left[2, 1]*right[1, 0] + left[2, 2]*right[2, 0])
result[0, 1] = (left[0, 1] + right[0, 1]) + (left[0, 0]*right[0, 1] + left[0, 1]*right[1, 1] + left[0, 2]*right[2, 1])
result[1, 1] = (left[1, 1] + right[1, 1]) + (left[1, 0]*right[0, 1] + left[1, 1]*right[1, 1] + left[1, 2]*right[2, 1])
result[2, 1] = (left[2, 1] + right[2, 1]) + (left[2, 0]*right[0, 1] + left[2, 1]*right[1, 1] + left[2, 2]*right[2, 1])
result[0, 2] = (left[0, 2] + right[0, 2]) + (left[0, 0]*right[0, 2] + left[0, 1]*right[1, 2] + left[0, 2]*right[2, 2])
result[1, 2] = (left[1, 2] + right[1, 2]) + (left[1, 0]*right[0, 2] + left[1, 1]*right[1, 2] + left[1, 2]*right[2, 2])
result[2, 2] = (left[2, 2] + right[2, 2]) + (left[2, 0]*right[0, 2] + left[2, 1]*right[1, 2] + left[2, 2]*right[2, 2])
@jit_device
def matrix_exponential_analytic(field_sample, result, number_of_squares):
pass
@jit_device
def matrix_exponential_lie_trotter(field_sample, result):
a = math.sqrt(field_sample[0]*field_sample[0] + field_sample[1]*field_sample[1])
if a > 0:
p = math.atan2(field_sample[1], field_sample[0])
else:
p = 0
a = a/trotter_precision
Sa = math.sin(a/2)
sa = -1j*math.sin(a)/sqrt2
z = field_sample[2]/(2*trotter_precision)
q = field_sample[3]/(6*trotter_precision)
save_cos_exp_m1 = cos_exp_m1(a/2, -z - q)
result[0, 0] = save_cos_exp_m1*(save_cos_exp_m1 + 2)
result[1, 0] = sa*cmath.exp(1j*(q + p - z))
result[2, 0] = -(Sa**2)*cmath.exp(2*1j*(p - q))
result[0, 1] = sa*cmath.exp(1j*(q - p - z))
result[1, 1] = cos_exp_m1(a, 4*q)
result[2, 1] = sa*cmath.exp(1j*(q + p + z))
result[0, 2] = -(Sa**2)*cmath.exp(2*1j*(q - p))
result[1, 2] = sa*cmath.exp(1j*(q - p + z))
save_cos_exp_m1 = cos_exp_m1(a/2, z - q)
result[2, 2] = save_cos_exp_m1*(save_cos_exp_m1 + 2)
if device_index == 0:
temporary = np.empty((3, 3), dtype = np.complex128)
elif device_index == 1:
temporary = cuda.local.array((3, 3), dtype = np.complex128)
elif device_index == 2:
temporary_group = roc.shared.array((threads_per_block, 3, 3), dtype = np.complex128)
temporary = temporary_group[roc.get_local_id(1), :, :]
for power_index in range(number_of_hypercubes):
matrix_square_m1(result, temporary)
matrix_square_m1(temporary, result)
result[0, 0] += 1
result[1, 1] += 1
result[2, 2] += 1
@jit_device
def matrix_exponential_lie_trotter_8(field_sample, result):
if device_index == 0:
temporary_1 = np.empty((3, 3), dtype = np.complex128)
temporary_2 = np.empty((3, 3), dtype = np.complex128)
elif device_index == 1:
temporary_1 = cuda.local.array((3, 3), dtype = np.complex128)
temporary_2 = cuda.local.array((3, 3), dtype = np.complex128)
elif device_index == 2:
temporary_group_1 = roc.shared.array((threads_per_block, 3, 3), dtype = np.complex128)
temporary_group_2 = roc.shared.array((threads_per_block, 3, 3), dtype = np.complex128)
temporary_1 = temporary_group_1[roc.get_local_id(1), :, :]
temporary_2 = temporary_group_2[roc.get_local_id(1), :, :]
a = math.sqrt(field_sample[0]*field_sample[0] + field_sample[1]*field_sample[1])
if a > 0:
ep = (field_sample[0] + 1j*field_sample[1])/a
a = a/trotter_precision
Sa = math.sin(a/2)
sa = -1j*math.sin(a)/sqrt2
Cam1 = cos_m1(a/2)
else:
ep = 1
Sa = 0
sa = 0
Cam1 = 0
result[0, 0] = Cam1*(Cam1 + 2)
result[1, 0] = sa*ep
result[2, 0] = -(Sa*ep)**2
result[0, 1] = sa*conj(ep)
result[1, 1] = cos_m1(a)
result[2, 1] = sa*ep
result[0, 2] = -(Sa*conj(ep))**2
result[1, 2] = sa*conj(ep)
result[2, 2] = Cam1*(Cam1 + 2)
a = math.sqrt(field_sample[6]*field_sample[6] + field_sample[7]*field_sample[7])
if a > 0:
ep = (field_sample[6] + 1j*field_sample[7])/a
a = a/trotter_precision
Sa = math.sin(a/2)
sa = -1j*math.sin(a)/sqrt2
Cam1 = cos_m1(a/2)
else:
ep = 1
Sa = 0
sa = 0
Cam1 = 0
temporary_1[0, 0] = Cam1*(Cam1 + 2)
temporary_1[1, 0] = sa*ep
temporary_1[2, 0] = -(Sa*ep)**2
temporary_1[0, 1] = sa*conj(ep)
temporary_1[1, 1] = cos_m1(a)
temporary_1[2, 1] = -sa*ep
temporary_1[0, 2] = -(Sa*conj(ep))**2
temporary_1[1, 2] = -sa*conj(ep)
temporary_1[2, 2] = Cam1*(Cam1 + 2)
matrix_multiply_m1(result, temporary_1, temporary_2)
a = math.sqrt(field_sample[4]*field_sample[4] + field_sample[5]*field_sample[5])
if a > 0:
ep = (field_sample[4] + 1j*field_sample[5])/a
a = a/trotter_precision
Sa = math.sin(a)
Cam1 = cos_m1(a)
else:
ep = 1
ep = 1
Sa = 0
sa = 0
Cam1 = 0
result[0, 0] = Cam1
result[1, 0] = 0.0
result[2, 0] = -(Sa*ep)**2
result[0, 1] = sa*conj(ep)
result[1, 1] = 0.0
result[2, 1] = -sa*ep
result[0, 2] = -(Sa*conj(ep))**2
result[1, 2] = 0.0
result[2, 2] = Cam1
matrix_multiply_m1(result, temporary_2, temporary_1)
a = field_sample[2]/trotter_precision
ep = field_sample[3]/(3*trotter_precision)
temporary_2[0, 0] = expm1i(-a - ep)
temporary_2[1, 0] = 0.0
temporary_2[2, 0] = 0.0
temporary_2[0, 1] = 0.0
temporary_2[1, 1] = expm1i(2*ep)
temporary_2[2, 1] = 0.0
temporary_2[0, 2] = 0.0
temporary_2[1, 2] = 0.0
temporary_2[2, 2] = expm1i(a - ep)
matrix_multiply_m1(temporary_1, temporary_2, result)
for power_index in range(number_of_hypercubes):
matrix_square_m1(result, temporary_1)
matrix_square_m1(temporary_1, result)
result[0, 0] += 1
result[1, 1] += 1
result[2, 2] += 1
self.conj = conj
self.expm1i = expm1i
self.cos_exp_m1 = cos_exp_m1
self.cos_m1 = cos_m1
self.set_to = set_to
self.set_to_one = set_to_one
self.set_to_zero = set_to_zero
self.matrix_multiply = matrix_multiply
self.matrix_multiply_m1 = matrix_multiply_m1
self.matrix_exponential_analytic = matrix_exponential_analytic
self.matrix_exponential_lie_trotter = matrix_exponential_lie_trotter
self.matrix_exponential_lie_trotter_8 = matrix_exponential_lie_trotter_8
self.matrix_square_m1 = matrix_square_m1
| 53.361394
| 516
| 0.581708
| 13,061
| 99,519
| 4.259322
| 0.052446
| 0.032626
| 0.019809
| 0.009563
| 0.822114
| 0.778145
| 0.733979
| 0.697219
| 0.678812
| 0.654473
| 0
| 0.036116
| 0.291643
| 99,519
| 1,865
| 517
| 53.361394
| 0.753032
| 0.421136
| 0
| 0.44623
| 0
| 0.00618
| 0.034302
| 0.003798
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088999
| false
| 0.002472
| 0.008653
| 0.024722
| 0.165637
| 0.00618
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
c3e7044b9a8d6bc7e21256b2b463446aa536d23e
| 356
|
py
|
Python
|
allegation/tests/utils/filter_tags_test_mixin.py
|
invinst/CPDB
|
c2d8ae8888b13d956cc1068742f18d45736d4121
|
[
"Apache-2.0"
] | 16
|
2016-05-20T09:03:32.000Z
|
2020-09-13T14:23:06.000Z
|
allegation/tests/utils/filter_tags_test_mixin.py
|
invinst/CPDB
|
c2d8ae8888b13d956cc1068742f18d45736d4121
|
[
"Apache-2.0"
] | 2
|
2016-05-24T01:44:14.000Z
|
2016-06-17T22:19:45.000Z
|
allegation/tests/utils/filter_tags_test_mixin.py
|
invinst/CPDB
|
c2d8ae8888b13d956cc1068742f18d45736d4121
|
[
"Apache-2.0"
] | 2
|
2016-10-10T16:14:19.000Z
|
2020-10-26T00:17:02.000Z
|
class FilterTagsTestMixin(object):
def assert_have_filter_tags(self, category, value):
filter_tags = self.find('#filter-tags').text.lower()
filter_tags.should.contain(category.lower())
filter_tags.should.contain(str(value).lower())
def assert_no_filter_tags(self):
self.find_all('.filter').should.have.length_of(0)
| 39.555556
| 60
| 0.705056
| 47
| 356
| 5.106383
| 0.468085
| 0.25
| 0.175
| 0.175
| 0.233333
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003322
| 0.154494
| 356
| 8
| 61
| 44.5
| 0.79402
| 0
| 0
| 0
| 0
| 0
| 0.053371
| 0
| 0
| 0
| 0
| 0
| 0.285714
| 1
| 0.285714
| false
| 0
| 0
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
613658a10de0abe34b68df0e796ca735776a6a36
| 198
|
py
|
Python
|
util/plt_setting.py
|
mnm-analytics/nklearn
|
333b1cadd49b63bdbdbd121b814ade03d19ada49
|
[
"MIT"
] | null | null | null |
util/plt_setting.py
|
mnm-analytics/nklearn
|
333b1cadd49b63bdbdbd121b814ade03d19ada49
|
[
"MIT"
] | null | null | null |
util/plt_setting.py
|
mnm-analytics/nklearn
|
333b1cadd49b63bdbdbd121b814ade03d19ada49
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import japanize_matplotlib
plt.style.use("ggplot")
plt.rcParams["figure.figsize"] = (16,10)
plt.rcParams["font.fontsize"] = 12
| 24.75
| 40
| 0.772727
| 31
| 198
| 4.903226
| 0.677419
| 0.144737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033898
| 0.106061
| 198
| 7
| 41
| 28.285714
| 0.824859
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.571429
| 0
| 0.571429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
615876c3c8ddef74095cbcb8ab41e04c858d2edd
| 161
|
py
|
Python
|
pygdpr/specifications/dpa_node_type_specification/__init__.py
|
GDPRxiv/crawler
|
178ef9ff6c3641ba8b761a49e42c2579e453c1ca
|
[
"MIT"
] | null | null | null |
pygdpr/specifications/dpa_node_type_specification/__init__.py
|
GDPRxiv/crawler
|
178ef9ff6c3641ba8b761a49e42c2579e453c1ca
|
[
"MIT"
] | 2
|
2022-02-19T06:56:03.000Z
|
2022-02-19T07:00:00.000Z
|
pygdpr/specifications/dpa_node_type_specification/__init__.py
|
GDPRxiv/crawler
|
178ef9ff6c3641ba8b761a49e42c2579e453c1ca
|
[
"MIT"
] | null | null | null |
class DPANodeTypeSpecification:
def is_satisfied_by(self, dpa_node):
classname = dpa_node.__class__.__name__
return (classname == 'DPANode')
| 32.2
| 47
| 0.714286
| 17
| 161
| 6.058824
| 0.764706
| 0.135922
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.198758
| 161
| 4
| 48
| 40.25
| 0.79845
| 0
| 0
| 0
| 0
| 0
| 0.043478
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
6159f8ff5e82e8b94750316d4fdfa0da3b6c400d
| 80
|
py
|
Python
|
apps/node/src/app/main/model_centric/processes/__init__.py
|
hivecell-io/federated_learning
|
e251bfa65c32abd83359c2b6847b9d0b62c4f5c3
|
[
"Apache-2.0"
] | 7
|
2020-04-20T22:22:08.000Z
|
2020-07-25T17:32:08.000Z
|
apps/node/src/app/main/model_centric/processes/__init__.py
|
hivecell-io/federated_learning
|
e251bfa65c32abd83359c2b6847b9d0b62c4f5c3
|
[
"Apache-2.0"
] | 3
|
2020-04-24T21:20:57.000Z
|
2020-05-28T09:17:02.000Z
|
apps/node/src/app/main/model_centric/processes/__init__.py
|
hivecell-io/federated_learning
|
e251bfa65c32abd83359c2b6847b9d0b62c4f5c3
|
[
"Apache-2.0"
] | 4
|
2020-04-24T22:32:37.000Z
|
2020-05-25T19:29:20.000Z
|
from .process_manager import ProcessManager
process_manager = ProcessManager()
| 20
| 43
| 0.85
| 8
| 80
| 8.25
| 0.625
| 0.424242
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 80
| 3
| 44
| 26.666667
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
618357e099de41e86313f27afebc444ef9b76033
| 1,141
|
py
|
Python
|
model/utils.py
|
PetrovSergSerg/python_lection7
|
3214e89f8413442c6ffe07bdc20b842209ff6253
|
[
"Apache-2.0"
] | null | null | null |
model/utils.py
|
PetrovSergSerg/python_lection7
|
3214e89f8413442c6ffe07bdc20b842209ff6253
|
[
"Apache-2.0"
] | null | null | null |
model/utils.py
|
PetrovSergSerg/python_lection7
|
3214e89f8413442c6ffe07bdc20b842209ff6253
|
[
"Apache-2.0"
] | null | null | null |
import datetime
from random import randint, choice, randrange
import re
import data.constants as c
def get_random_date(start, end):
"""Generate a random datetime between `start` and `end`"""
return start + datetime.timedelta(
# Get a random amount of seconds between `start` and `end`
seconds=randint(0, int((end - start).total_seconds())),
)
def get_random_word(alphabet: str, length: int):
"""Generate a random word on alphabet with given length"""
return ''.join([choice(alphabet) for i in range(length)])
def get_random_email(alphabet: str):
"""Generate random email using function get_random_word(alphabet, length)"""
return get_random_word(alphabet, randint(3, 10)) + '@' + get_random_word(alphabet, randint(2, 10)) + '.ru'
def random_string(prefix: str, max_length: int):
return prefix+"".join([choice(c.SYMBOLS) for x in range(randrange(max_length))])
def clear(string):
return re.sub("[() -/]", "", string)
def xstr(string):
if string is None:
return ""
return str(string)
def remove_spaces(string):
return ' '.join(xstr(string).strip().split())
| 27.829268
| 110
| 0.680105
| 158
| 1,141
| 4.803797
| 0.398734
| 0.071146
| 0.068511
| 0.110672
| 0.073781
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007487
| 0.180543
| 1,141
| 40
| 111
| 28.525
| 0.804278
| 0.205083
| 0
| 0
| 1
| 0
| 0.013468
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.318182
| false
| 0
| 0.181818
| 0.136364
| 0.863636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
61bbc3af714e2181d128dd2b7228751ee1012c63
| 1,915
|
py
|
Python
|
app/generator/view.py
|
LifeLaboratory/Laboratory_EnergyHack
|
716f5a16b5fd0338fdf895882260b9b284e601fb
|
[
"MIT"
] | null | null | null |
app/generator/view.py
|
LifeLaboratory/Laboratory_EnergyHack
|
716f5a16b5fd0338fdf895882260b9b284e601fb
|
[
"MIT"
] | null | null | null |
app/generator/view.py
|
LifeLaboratory/Laboratory_EnergyHack
|
716f5a16b5fd0338fdf895882260b9b284e601fb
|
[
"MIT"
] | null | null | null |
from json import loads
from app.generator.creator import GenerateCode
def get_code(struct):
"""
Публичный метод для построения кода по правилам
:param struct:
:return:
"""
return GenerateCode().create_code(struct)
def get_file(struct):
"""
Публичный метод для построения кода по правилам и формирования exe
:param struct:
:return:
"""
return GenerateCode().create_file(struct)
data = loads('''
[
{"name": "open", "action": "open_file", "file_path": "C:\\tmp"},
{"name": "cycle", "action":
[
{"name": "write", "action": "write", "filter": "flt", "code": "code"}
]
},
{"name": "close", "action": "close_file"}
]
''')
data = loads('''
[
{"name": "open", "action": "open_file", "file_path": "B:/Program Files (x86)/AbilityCash/AbilityCash.exe"},
{"name": "open", "action": "open_file", "file_path": "B:/Program Files (x86)/AbilityCash/AbilityCash.xls"},
{"name": "Condition", "action": "click", "object": "AC-E"},
{"name": "cycle", "action": "cycle", "index": 10, "for":
[
{"name": "Condition", "action": "click", "object": "AC-E"},
{"name": "Condition", "action": "save_value", "object": "AC-E", "value": "123"},
{"name": "Condition", "action": "save_value", "object": "AC-E", "source": "A"},
{"name": "cycle", "action": "cycle", "index": 10, "for":
[
{"name": "Condition", "action": "click", "object": "AC-E"},
{"name": "Condition", "action": "save_value", "object": "AC-E", "value": "123"},
{"name": "Condition", "action": "save_value", "object": "AC-E", "source": "B"}
]
}
]
},
{"name": "Condition", "action": "save_value", "object": "AC-E", "value": "123"},
{"name": "Condition", "action": "save_value", "object": "AC-E", "source": "A1"}
]
''')
# print(GenerateCode().create_file(data))
| 31.916667
| 109
| 0.542037
| 208
| 1,915
| 4.903846
| 0.283654
| 0.114706
| 0.167647
| 0.135294
| 0.767647
| 0.767647
| 0.687255
| 0.687255
| 0.562745
| 0.515686
| 0
| 0.01196
| 0.214099
| 1,915
| 59
| 110
| 32.457627
| 0.665781
| 0.106005
| 0
| 0.3
| 1
| 0.225
| 0.840144
| 0.043269
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.05
| 0
| 0.15
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
61be3cf346d2e41699e209d40684c448ba1daab6
| 562
|
py
|
Python
|
misc/gen_yupik_words.py
|
cwtliu/yupik-mt
|
b0c53dc5577a8b66f3bb60da64679bba6ce8bbd9
|
[
"MIT"
] | 7
|
2018-05-14T06:07:36.000Z
|
2021-04-29T02:56:54.000Z
|
misc/gen_yupik_words.py
|
cwtliu/yupik-mt
|
b0c53dc5577a8b66f3bb60da64679bba6ce8bbd9
|
[
"MIT"
] | null | null | null |
misc/gen_yupik_words.py
|
cwtliu/yupik-mt
|
b0c53dc5577a8b66f3bb60da64679bba6ce8bbd9
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import pickle
'''
Appends generated english to english word mappings to two files. This will
provide an additional corpus to train an NN which takes an unaranged set of
english words and translates them to a coherent sentence.
The first set of english words are taken from raw translations of yupik roots,
postbases, and endings from dictionary lookups.
The second set of english words are taken from the provided english translations.
author: kechavez
'''
# Retrieve all root, postbases, endings from pickled format.
| 33.058824
| 81
| 0.80427
| 85
| 562
| 5.258824
| 0.647059
| 0.033557
| 0.080537
| 0.114094
| 0.129754
| 0.129754
| 0.129754
| 0
| 0
| 0
| 0
| 0
| 0.16548
| 562
| 16
| 82
| 35.125
| 0.953092
| 0.103203
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.5
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 4
|
f60723dfbed875059ef8d85bdd6279e791d24df9
| 285
|
py
|
Python
|
Soma.py
|
MatheusSouza70/Exerc-cios-Python
|
f8878a0c9d62e49db61dcbce0ee10a161e12a894
|
[
"MIT"
] | 1
|
2022-03-14T01:35:09.000Z
|
2022-03-14T01:35:09.000Z
|
Soma.py
|
MatheusSouza70/Exerc-cios-Python
|
f8878a0c9d62e49db61dcbce0ee10a161e12a894
|
[
"MIT"
] | null | null | null |
Soma.py
|
MatheusSouza70/Exerc-cios-Python
|
f8878a0c9d62e49db61dcbce0ee10a161e12a894
|
[
"MIT"
] | null | null | null |
nota1 = float (input("Informe a primeira nota: "))
nota2 = float (input("Informe a segunda nota: "))
nota3 = float (input("Informe a terceira nota: "))
nota4 = float (input("Informe a quarta nota: "))
media = (nota1+nota2+nota3+nota4) / 4
print("A média é: {}" .format(media))
| 35.625
| 51
| 0.652632
| 40
| 285
| 4.65
| 0.475
| 0.215054
| 0.365591
| 0.387097
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.038298
| 0.175439
| 285
| 8
| 52
| 35.625
| 0.753191
| 0
| 0
| 0
| 0
| 0
| 0.394265
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.166667
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
f60e31e4e5fd5cf2e8629566b00cdd6b967d7337
| 36
|
py
|
Python
|
AlgorithmsPractice/python/test.py
|
YangXiaoo/NoteBook
|
37056acad7a05b876832f72ac34d3d1a41e0dd22
|
[
"CNRI-Python",
"RSA-MD",
"CECILL-B"
] | 58
|
2019-03-03T04:42:23.000Z
|
2022-01-13T04:36:31.000Z
|
AlgorithmsPractice/python/test.py
|
YangXiaoo/NoteBook
|
37056acad7a05b876832f72ac34d3d1a41e0dd22
|
[
"CNRI-Python",
"RSA-MD",
"CECILL-B"
] | null | null | null |
AlgorithmsPractice/python/test.py
|
YangXiaoo/NoteBook
|
37056acad7a05b876832f72ac34d3d1a41e0dd22
|
[
"CNRI-Python",
"RSA-MD",
"CECILL-B"
] | 28
|
2019-08-11T01:25:00.000Z
|
2021-08-22T06:46:06.000Z
|
# coding:utf-8
s = 3
print(s*(1/s))
| 9
| 14
| 0.555556
| 9
| 36
| 2.222222
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 0.166667
| 36
| 4
| 15
| 9
| 0.566667
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
f61a5046f32da237e548fc60980985b0b135b418
| 799
|
py
|
Python
|
toontown/coghq/FactorySpecs.py
|
AnonymousDeveloper65535/open-toontown
|
3d05c22a7d960ad843dde231140447c46973dba5
|
[
"BSD-3-Clause"
] | 8
|
2017-10-10T11:41:01.000Z
|
2021-02-23T12:55:47.000Z
|
toontown/coghq/FactorySpecs.py
|
AnonymousDeveloper65535/open-toontown
|
3d05c22a7d960ad843dde231140447c46973dba5
|
[
"BSD-3-Clause"
] | 1
|
2021-06-08T17:16:48.000Z
|
2021-06-08T17:16:48.000Z
|
toontown/coghq/FactorySpecs.py
|
AnonymousDeveloper65535/open-toontown
|
3d05c22a7d960ad843dde231140447c46973dba5
|
[
"BSD-3-Clause"
] | 3
|
2021-06-03T05:36:36.000Z
|
2021-06-22T15:07:31.000Z
|
from toontown.toonbase import ToontownGlobals
import SellbotLegFactorySpec
import SellbotLegFactoryCogs
import LawbotLegFactorySpec
import LawbotLegFactoryCogs
def getFactorySpecModule(factoryId):
return FactorySpecModules[factoryId]
def getCogSpecModule(factoryId):
return CogSpecModules[factoryId]
FactorySpecModules = {ToontownGlobals.SellbotFactoryInt: SellbotLegFactorySpec,
ToontownGlobals.LawbotOfficeInt: LawbotLegFactorySpec}
CogSpecModules = {ToontownGlobals.SellbotFactoryInt: SellbotLegFactoryCogs,
ToontownGlobals.LawbotOfficeInt: LawbotLegFactoryCogs}
if __dev__:
import FactoryMockupSpec
FactorySpecModules[ToontownGlobals.MockupFactoryId] = FactoryMockupSpec
import FactoryMockupCogs
CogSpecModules[ToontownGlobals.MockupFactoryId] = FactoryMockupCogs
| 33.291667
| 79
| 0.861076
| 53
| 799
| 12.90566
| 0.433962
| 0.04386
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093867
| 799
| 23
| 80
| 34.73913
| 0.944751
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.388889
| 0.111111
| 0.611111
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 4
|
f63bff37ac5336cecf554327a5b0d31204bd5905
| 122
|
py
|
Python
|
olivertwist/ruleengine/__init__.py
|
octoenergy/oliver-twist
|
7496208d9de4c21cd9e0d553f24bf07612ddc720
|
[
"Apache-2.0"
] | 37
|
2020-12-17T13:32:12.000Z
|
2022-03-16T07:19:56.000Z
|
olivertwist/ruleengine/__init__.py
|
Norina-Sun/oliver-twist
|
5bb9b2cddc097d89d4a3eff78c63036682dd19f8
|
[
"Apache-2.0"
] | 28
|
2020-12-17T16:20:14.000Z
|
2022-01-21T09:00:15.000Z
|
olivertwist/ruleengine/__init__.py
|
octoenergy/oliver-twist
|
7496208d9de4c21cd9e0d553f24bf07612ddc720
|
[
"Apache-2.0"
] | 2
|
2021-08-09T17:07:23.000Z
|
2021-11-05T14:37:18.000Z
|
# -*- coding: utf-8 -*-
"""Document __init__.py here.
Copyright (C) 2020, Auto Trader UK
Created 15. Dec 2020 14:31
"""
| 15.25
| 34
| 0.639344
| 19
| 122
| 3.894737
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15
| 0.180328
| 122
| 7
| 35
| 17.428571
| 0.59
| 0.918033
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
f648045e68d81a9f300839dd2e3bdd8844d024cb
| 318
|
py
|
Python
|
utils/local_config.py
|
afcarl/fg-gating
|
9896447e2f91122ecc5d4153db127cbb30d7b5c9
|
[
"MIT"
] | 1
|
2019-04-22T16:43:23.000Z
|
2019-04-22T16:43:23.000Z
|
utils/local_config.py
|
afcarl/fg-gating
|
9896447e2f91122ecc5d4153db127cbb30d7b5c9
|
[
"MIT"
] | null | null | null |
utils/local_config.py
|
afcarl/fg-gating
|
9896447e2f91122ecc5d4153db127cbb30d7b5c9
|
[
"MIT"
] | null | null | null |
import os
CUR_DIRECTORY = os.path.dirname(os.path.abspath(__file__))
NER_MODEL_PATH = CUR_DIRECTORY + 'english.all.3class.distsim.crf.ser.gz'
NER_JAR_PATH = CUR_DIRECTORY + 'stanford-ner.jar'
POS_MODEL_PATH = CUR_DIRECTORY + 'english-left3words-distsim.tagger'
POS_JAR_PATH = CUR_DIRECTORY + 'stanford-postagger.jar'
| 39.75
| 72
| 0.798742
| 48
| 318
| 4.9375
| 0.479167
| 0.253165
| 0.270042
| 0.177215
| 0.464135
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006849
| 0.081761
| 318
| 8
| 73
| 39.75
| 0.804795
| 0
| 0
| 0
| 0
| 0
| 0.339623
| 0.289308
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
f6536fbf3338d5121f74e348e2a369e8473489a6
| 132
|
py
|
Python
|
allegation/views/landing_view.py
|
invinst/CPDB
|
c2d8ae8888b13d956cc1068742f18d45736d4121
|
[
"Apache-2.0"
] | 16
|
2016-05-20T09:03:32.000Z
|
2020-09-13T14:23:06.000Z
|
allegation/views/landing_view.py
|
invinst/CPDB
|
c2d8ae8888b13d956cc1068742f18d45736d4121
|
[
"Apache-2.0"
] | 2
|
2016-05-24T01:44:14.000Z
|
2016-06-17T22:19:45.000Z
|
allegation/views/landing_view.py
|
invinst/CPDB
|
c2d8ae8888b13d956cc1068742f18d45736d4121
|
[
"Apache-2.0"
] | 2
|
2016-10-10T16:14:19.000Z
|
2020-10-26T00:17:02.000Z
|
from django.views.generic.base import TemplateView
class LandingView(TemplateView):
template_name = "allegation/landing.html"
| 22
| 50
| 0.80303
| 15
| 132
| 7
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113636
| 132
| 5
| 51
| 26.4
| 0.897436
| 0
| 0
| 0
| 0
| 0
| 0.174242
| 0.174242
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
9ca8e3b276479c7eabc9dab17d2fe8f9fa1e7133
| 24,551
|
py
|
Python
|
pureples/experiments/pole_balancing/run_all_pole_balancing.py
|
kevinrpb/pureples
|
c591fefd5b20085f1d0537553631e29733374b16
|
[
"MIT"
] | 51
|
2019-02-01T19:43:37.000Z
|
2022-03-16T09:07:03.000Z
|
pureples/experiments/pole_balancing/run_all_pole_balancing.py
|
kevinrpb/pureples
|
c591fefd5b20085f1d0537553631e29733374b16
|
[
"MIT"
] | 2
|
2019-02-23T18:54:22.000Z
|
2019-11-09T01:30:32.000Z
|
pureples/experiments/pole_balancing/run_all_pole_balancing.py
|
kevinrpb/pureples
|
c591fefd5b20085f1d0537553631e29733374b16
|
[
"MIT"
] | 35
|
2019-02-08T02:00:31.000Z
|
2022-03-01T23:17:00.000Z
|
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import neat_pole_balancing
import hyperneat_pole_balancing
import es_hyperneat_pole_balancing_small
import es_hyperneat_pole_balancing_medium
import es_hyperneat_pole_balancing_large
import gym
import multiprocessing as multi
from multiprocessing import Manager
# Initialize lists to keep track during run.
manager = Manager()
neat_stats, hyperneat_stats, es_hyperneat_small_stats = manager.list([]), manager.list([]), manager.list([])
es_hyperneat_medium_stats, es_hyperneat_large_stats = manager.list([]), manager.list([])
neat_run_one_fitnesses, hyperneat_run_one_fitnesses, es_hyperneat_small_run_one_fitnesses = [], [], []
es_hyperneat_medium_run_one_fitnesses, es_hyperneat_large_run_one_fitnesses = [], []
neat_run_ten_fitnesses, hyperneat_run_ten_fitnesses, es_hyperneat_small_run_ten_fitnesses = [], [], []
es_hyperneat_medium_run_ten_fitnesses, es_hyperneat_large_run_ten_fitnesses = [], []
neat_run_hundred_fitnesses, hyperneat_run_hundred_fitnesses, es_hyperneat_small_run_hundred_fitnesses = [], [], []
es_hyperneat_medium_run_hundred_fitnesses, es_hyperneat_large_run_hundred_fitnesses = [], []
neat_one_solved, hyperneat_one_solved, es_hyperneat_small_one_solved = 0, 0, 0
es_hyperneat_medium_one_solved, es_hyperneat_large_one_solved = 0, 0
neat_ten_solved, hyperneat_ten_solved, es_hyperneat_small_ten_solved = 0, 0, 0
es_hyperneat_medium_ten_solved, es_hyperneat_large_ten_solved = 0, 0
neat_hundred_solved, hyperneat_hundred_solved, es_hyperneat_small_hundred_solved = 0, 0, 0
es_hyperneat_medium_hundred_solved, es_hyperneat_large_hundred_solved = 0, 0
runs = 16
inputs = range(runs)
gens = 50
fit_threshold = 475
max_fit = 475
env = gym.make("CartPole-v1")
# Run the experiments.
def run(i):
print("This is run #" + str(i))
neat_stats.append(neat_pole_balancing.run(gens, env)[1])
hyperneat_stats.append(hyperneat_pole_balancing.run(gens, env)[1])
es_hyperneat_small_stats.append(es_hyperneat_pole_balancing_small.run(gens, env)[1])
es_hyperneat_medium_stats.append(es_hyperneat_pole_balancing_medium.run(gens, env)[1])
es_hyperneat_large_stats.append(es_hyperneat_pole_balancing_large.run(gens, env)[1])
p = multi.Pool(multi.cpu_count())
p.map(run,range(runs))
# Average the NEAT runs.
temp_fit_one = [0.0] * gens
temp_fit_ten = [0.0] * gens
temp_fit_hundred = [0.0] * gens
for (stat_one, stat_ten, stat_hundred) in neat_stats:
if stat_one.best_genome().fitness > max_fit:
neat_run_one_fitnesses.append(max_fit)
else:
neat_run_one_fitnesses.append(stat_one.best_genome().fitness)
if stat_ten.best_genome().fitness > max_fit:
neat_run_ten_fitnesses.append(max_fit)
else:
neat_run_ten_fitnesses.append(stat_one.best_genome().fitness)
if stat_hundred.best_genome().fitness > max_fit:
neat_run_hundred_fitnesses.append(max_fit)
else:
neat_run_hundred_fitnesses.append(stat_one.best_genome().fitness)
if stat_one.best_genome().fitness >= fit_threshold:
neat_one_solved += 1
if stat_ten.best_genome().fitness >= fit_threshold:
neat_ten_solved += 1
if stat_hundred.best_genome().fitness >= fit_threshold:
neat_hundred_solved += 1
for i in range(gens):
if i < len(stat_one.most_fit_genomes):
if stat_one.most_fit_genomes[i].fitness > max_fit:
temp_fit_one[i] += max_fit
else:
temp_fit_one[i] += stat_one.most_fit_genomes[i].fitness
else:
temp_fit_one[i] += max_fit
if i < len(stat_ten.most_fit_genomes):
if stat_ten.most_fit_genomes[i].fitness > max_fit:
temp_fit_ten[i] += max_fit
else:
temp_fit_ten[i] += stat_ten.most_fit_genomes[i].fitness
else:
temp_fit_ten[i] += max_fit
if i < len(stat_hundred.most_fit_genomes):
if stat_hundred.most_fit_genomes[i].fitness > max_fit:
temp_fit_hundred[i] += max_fit
else:
temp_fit_hundred[i] += stat_hundred.most_fit_genomes[i].fitness
else:
temp_fit_hundred[i] += max_fit
neat_one_average_fit = [x / runs for x in temp_fit_one]
neat_ten_average_fit = [x / runs for x in temp_fit_ten]
neat_hundred_average_fit = [x / runs for x in temp_fit_hundred]
# Average the HyperNEAT runs.
temp_fit_one = [0.0] * gens
temp_fit_ten = [0.0] * gens
temp_fit_hundred = [0.0] * gens
for (stat_one, stat_ten, stat_hundred) in hyperneat_stats:
if stat_one.best_genome().fitness > max_fit:
hyperneat_run_one_fitnesses.append(max_fit)
else:
hyperneat_run_one_fitnesses.append(stat_one.best_genome().fitness)
if stat_ten.best_genome().fitness > max_fit:
hyperneat_run_ten_fitnesses.append(max_fit)
else:
hyperneat_run_ten_fitnesses.append(stat_one.best_genome().fitness)
if stat_hundred.best_genome().fitness > max_fit:
hyperneat_run_hundred_fitnesses.append(max_fit)
else:
hyperneat_run_hundred_fitnesses.append(stat_one.best_genome().fitness)
if stat_one.best_genome().fitness >= fit_threshold:
hyperneat_one_solved += 1
if stat_ten.best_genome().fitness >= fit_threshold:
hyperneat_ten_solved += 1
if stat_hundred.best_genome().fitness >= fit_threshold:
hyperneat_hundred_solved += 1
for i in range(gens):
if i < len(stat_one.most_fit_genomes):
if stat_one.most_fit_genomes[i].fitness > max_fit:
temp_fit_one[i] += max_fit
else:
temp_fit_one[i] += stat_one.most_fit_genomes[i].fitness
else:
temp_fit_one[i] += max_fit
if i < len(stat_ten.most_fit_genomes):
if stat_ten.most_fit_genomes[i].fitness > max_fit:
temp_fit_ten[i] += max_fit
else:
temp_fit_ten[i] += stat_ten.most_fit_genomes[i].fitness
else:
temp_fit_ten[i] += max_fit
if i < len(stat_hundred.most_fit_genomes):
if stat_hundred.most_fit_genomes[i].fitness > max_fit:
temp_fit_hundred[i] += max_fit
else:
temp_fit_hundred[i] += stat_hundred.most_fit_genomes[i].fitness
else:
temp_fit_hundred[i] += max_fit
hyperneat_one_average_fit = [x / runs for x in temp_fit_one]
hyperneat_ten_average_fit = [x / runs for x in temp_fit_ten]
hyperneat_hundred_average_fit = [x / runs for x in temp_fit_hundred]
# Average the small ES-HyperNEAT runs.
temp_fit_one = [0.0] * gens
temp_fit_ten = [0.0] * gens
temp_fit_hundred = [0.0] * gens
for (stat_one, stat_ten, stat_hundred) in es_hyperneat_small_stats:
if stat_one.best_genome().fitness > max_fit:
es_hyperneat_small_run_one_fitnesses.append(max_fit)
else:
es_hyperneat_small_run_one_fitnesses.append(stat_one.best_genome().fitness)
if stat_ten.best_genome().fitness > max_fit:
es_hyperneat_small_run_ten_fitnesses.append(max_fit)
else:
es_hyperneat_small_run_ten_fitnesses.append(stat_one.best_genome().fitness)
if stat_hundred.best_genome().fitness > max_fit:
es_hyperneat_small_run_hundred_fitnesses.append(max_fit)
else:
es_hyperneat_small_run_hundred_fitnesses.append(stat_one.best_genome().fitness)
if stat_one.best_genome().fitness >= fit_threshold:
es_hyperneat_small_one_solved += 1
if stat_ten.best_genome().fitness >= fit_threshold:
es_hyperneat_small_ten_solved += 1
if stat_hundred.best_genome().fitness >= fit_threshold:
es_hyperneat_small_hundred_solved += 1
for i in range(gens):
if i < len(stat_one.most_fit_genomes):
if stat_one.most_fit_genomes[i].fitness > max_fit:
temp_fit_one[i] += max_fit
else:
temp_fit_one[i] += stat_one.most_fit_genomes[i].fitness
else:
temp_fit_one[i] += max_fit
if i < len(stat_ten.most_fit_genomes):
if stat_ten.most_fit_genomes[i].fitness > max_fit:
temp_fit_ten[i] += max_fit
else:
temp_fit_ten[i] += stat_ten.most_fit_genomes[i].fitness
else:
temp_fit_ten[i] += max_fit
if i < len(stat_hundred.most_fit_genomes):
if stat_hundred.most_fit_genomes[i].fitness > max_fit:
temp_fit_hundred[i] += max_fit
else:
temp_fit_hundred[i] += stat_hundred.most_fit_genomes[i].fitness
else:
temp_fit_hundred[i] += max_fit
es_hyperneat_small_one_average_fit = [x / runs for x in temp_fit_one]
es_hyperneat_small_ten_average_fit = [x / runs for x in temp_fit_ten]
es_hyperneat_small_hundred_average_fit = [x / runs for x in temp_fit_hundred]
# Average the medium ES-HyperNEAT runs.
temp_fit_one = [0.0] * gens
temp_fit_ten = [0.0] * gens
temp_fit_hundred = [0.0] * gens
for (stat_one, stat_ten, stat_hundred) in es_hyperneat_medium_stats:
if stat_one.best_genome().fitness > max_fit:
es_hyperneat_medium_run_one_fitnesses.append(max_fit)
else:
es_hyperneat_medium_run_one_fitnesses.append(stat_one.best_genome().fitness)
if stat_ten.best_genome().fitness > max_fit:
es_hyperneat_medium_run_ten_fitnesses.append(max_fit)
else:
es_hyperneat_medium_run_ten_fitnesses.append(stat_one.best_genome().fitness)
if stat_hundred.best_genome().fitness > max_fit:
es_hyperneat_medium_run_hundred_fitnesses.append(max_fit)
else:
es_hyperneat_medium_run_hundred_fitnesses.append(stat_one.best_genome().fitness)
if stat_one.best_genome().fitness >= fit_threshold:
es_hyperneat_medium_one_solved += 1
if stat_ten.best_genome().fitness >= fit_threshold:
es_hyperneat_medium_ten_solved += 1
if stat_hundred.best_genome().fitness >= fit_threshold:
es_hyperneat_medium_hundred_solved += 1
for i in range(gens):
if i < len(stat_one.most_fit_genomes):
if stat_one.most_fit_genomes[i].fitness > max_fit:
temp_fit_one[i] += max_fit
else:
temp_fit_one[i] += stat_one.most_fit_genomes[i].fitness
else:
temp_fit_one[i] += max_fit
if i < len(stat_ten.most_fit_genomes):
if stat_ten.most_fit_genomes[i].fitness > max_fit:
temp_fit_ten[i] += max_fit
else:
temp_fit_ten[i] += stat_ten.most_fit_genomes[i].fitness
else:
temp_fit_ten[i] += max_fit
if i < len(stat_hundred.most_fit_genomes):
if stat_hundred.most_fit_genomes[i].fitness > max_fit:
temp_fit_hundred[i] += max_fit
else:
temp_fit_hundred[i] += stat_hundred.most_fit_genomes[i].fitness
else:
temp_fit_hundred[i] += max_fit
es_hyperneat_medium_one_average_fit = [x / runs for x in temp_fit_one]
es_hyperneat_medium_ten_average_fit = [x / runs for x in temp_fit_ten]
es_hyperneat_medium_hundred_average_fit = [x / runs for x in temp_fit_hundred]
# Average the large ES-HyperNEAT runs.
temp_fit_one = [0.0] * gens
temp_fit_ten = [0.0] * gens
temp_fit_hundred = [0.0] * gens
for (stat_one, stat_ten, stat_hundred) in es_hyperneat_large_stats:
if stat_one.best_genome().fitness > max_fit:
es_hyperneat_large_run_one_fitnesses.append(max_fit)
else:
es_hyperneat_large_run_one_fitnesses.append(stat_one.best_genome().fitness)
if stat_ten.best_genome().fitness > max_fit:
es_hyperneat_large_run_ten_fitnesses.append(max_fit)
else:
es_hyperneat_large_run_ten_fitnesses.append(stat_one.best_genome().fitness)
if stat_hundred.best_genome().fitness > max_fit:
es_hyperneat_large_run_hundred_fitnesses.append(max_fit)
else:
es_hyperneat_large_run_hundred_fitnesses.append(stat_one.best_genome().fitness)
if stat_one.best_genome().fitness >= fit_threshold:
es_hyperneat_large_one_solved += 1
if stat_ten.best_genome().fitness >= fit_threshold:
es_hyperneat_large_ten_solved += 1
if stat_hundred.best_genome().fitness >= fit_threshold:
es_hyperneat_large_hundred_solved += 1
for i in range(gens):
if i < len(stat_one.most_fit_genomes):
if stat_one.most_fit_genomes[i].fitness > max_fit:
temp_fit_one[i] += max_fit
else:
temp_fit_one[i] += stat_one.most_fit_genomes[i].fitness
else:
temp_fit_one[i] += max_fit
if i < len(stat_ten.most_fit_genomes):
if stat_ten.most_fit_genomes[i].fitness > max_fit:
temp_fit_ten[i] += max_fit
else:
temp_fit_ten[i] += stat_ten.most_fit_genomes[i].fitness
else:
temp_fit_ten[i] += max_fit
if i < len(stat_hundred.most_fit_genomes):
if stat_hundred.most_fit_genomes[i].fitness > max_fit:
temp_fit_hundred[i] += max_fit
else:
temp_fit_hundred[i] += stat_hundred.most_fit_genomes[i].fitness
else:
temp_fit_hundred[i] += max_fit
es_hyperneat_large_one_average_fit = [x / runs for x in temp_fit_one]
es_hyperneat_large_ten_average_fit = [x / runs for x in temp_fit_ten]
es_hyperneat_large_hundred_average_fit = [x / runs for x in temp_fit_hundred]
# Write fitnesses to files.
# NEAT.
thefile = open('neat_pole_balancing_run_fitnesses.txt', 'w+')
thefile.write("NEAT one\n")
for item in neat_run_one_fitnesses:
thefile.write("%s\n" % item)
if max_fit in neat_one_average_fit:
thefile.write("NEAT one solves pole_balancing at generation: " + str(neat_one_average_fit.index(max_fit)))
else:
thefile.write("NEAT one does not solve pole_balancing with best fitness: " + str(neat_one_average_fit[gens-1]))
thefile.write("\nNEAT one solves pole_balancing in " + str(neat_one_solved) + " out of " + str(runs) + " runs.\n")
thefile.write("NEAT ten\n")
for item in neat_run_ten_fitnesses:
thefile.write("%s\n" % item)
if max_fit in neat_ten_average_fit:
thefile.write("NEAT ten solves pole_balancing at generation: " + str(neat_ten_average_fit.index(max_fit)))
else:
thefile.write("NEAT ten does not solve pole_balancing with best fitness: " + str(neat_ten_average_fit[gens-1]))
thefile.write("\nNEAT ten solves pole_balancing in " + str(neat_ten_solved) + " out of " + str(runs) + " runs.\n")
thefile.write("NEAT hundred\n")
for item in neat_run_hundred_fitnesses:
thefile.write("%s\n" % item)
if max_fit in neat_hundred_average_fit:
thefile.write("NEAT hundred solves pole_balancing at generation: " + str(neat_hundred_average_fit.index(max_fit)))
else:
thefile.write("NEAT hundred does not solve pole_balancing with best fitness: " + str(neat_hundred_average_fit[gens-1]))
thefile.write("\nNEAT hundred solves pole_balancing in " + str(neat_hundred_solved) + " out of " + str(runs) + " runs.\n")
# HyperNEAT.
thefile = open('hyperneat_pole_balancing_run_fitnesses.txt', 'w+')
thefile.write("HyperNEAT one\n")
for item in hyperneat_run_one_fitnesses:
thefile.write("%s\n" % item)
if max_fit in hyperneat_one_average_fit:
thefile.write("HyperNEAT one solves pole_balancing at generation: " + str(hyperneat_one_average_fit.index(max_fit)))
else:
thefile.write("HyperNEAT one does not solve pole_balancing with best fitness: " + str(hyperneat_one_average_fit[gens-1]))
thefile.write("\nHyperNEAT one solves pole_balancing in " + str(hyperneat_one_solved) + " out of " + str(runs) + " runs.\n")
thefile.write("HyperNEAT ten\n")
for item in hyperneat_run_ten_fitnesses:
thefile.write("%s\n" % item)
if max_fit in hyperneat_ten_average_fit:
thefile.write("HyperNEAT ten solves pole_balancing at generation: " + str(hyperneat_ten_average_fit.index(max_fit)))
else:
thefile.write("HyperNEAT ten does not solve pole_balancing with best fitness: " + str(hyperneat_ten_average_fit[gens-1]))
thefile.write("\nHyperNEAT ten solves pole_balancing in " + str(hyperneat_ten_solved) + " out of " + str(runs) + " runs.\n")
thefile.write("HyperNEAT hundred\n")
for item in hyperneat_run_hundred_fitnesses:
thefile.write("%s\n" % item)
if max_fit in hyperneat_hundred_average_fit:
thefile.write("HyperNEAT hundred solves pole_balancing at generation: " + str(hyperneat_hundred_average_fit.index(max_fit)))
else:
thefile.write("HyperNEAT hundred does not solve pole_balancing with best fitness: " + str(hyperneat_hundred_average_fit[gens-1]))
thefile.write("\nHyperNEAT hundred solves pole_balancing in " + str(hyperneat_hundred_solved) + " out of " + str(runs) + " runs.\n")
# ES-HyperNEAT small.
thefile = open('es_hyperneat_pole_balancing_small_run_fitnesses.txt', 'w+')
thefile.write("ES-HyperNEAT small one\n")
for item in es_hyperneat_small_run_one_fitnesses:
thefile.write("%s\n" % item)
if max_fit in es_hyperneat_small_one_average_fit:
thefile.write("ES-HyperNEAT small one solves pole_balancing at generation: " + str(es_hyperneat_small_one_average_fit.index(max_fit)))
else:
thefile.write("ES-HyperNEAT small one does not solve pole_balancing with best fitness: " + str(es_hyperneat_small_one_average_fit[gens-1]))
thefile.write("\nES-HyperNEAT small one solves pole_balancing in " + str(es_hyperneat_small_one_solved) + " out of " + str(runs) + " runs.\n")
thefile.write("ES-HyperNEAT small ten\n")
for item in es_hyperneat_small_run_ten_fitnesses:
thefile.write("%s\n" % item)
if max_fit in es_hyperneat_small_ten_average_fit:
thefile.write("ES-HyperNEAT small ten solves pole_balancing at generation: " + str(es_hyperneat_small_ten_average_fit.index(max_fit)))
else:
thefile.write("ES-HyperNEAT small ten does not solve pole_balancing with best fitness: " + str(es_hyperneat_small_ten_average_fit[gens-1]))
thefile.write("\nES-HyperNEAT small ten solves pole_balancing in " + str(es_hyperneat_small_ten_solved) + " out of " + str(runs) + " runs.\n")
thefile.write("ES-HyperNEAT small hundred\n")
for item in es_hyperneat_small_run_hundred_fitnesses:
thefile.write("%s\n" % item)
if max_fit in es_hyperneat_small_hundred_average_fit:
thefile.write("ES-HyperNEAT small hundred solves pole_balancing at generation: " + str(es_hyperneat_small_hundred_average_fit.index(max_fit)))
else:
thefile.write("ES-HyperNEAT small hundred does not solve pole_balancing with best fitness: " + str(es_hyperneat_small_hundred_average_fit[gens-1]))
thefile.write("\nES-HyperNEAT small hundred solves pole_balancing in " + str(es_hyperneat_small_hundred_solved) + " out of " + str(runs) + " runs.\n")
# ES-HyperNEAT medium.
thefile = open('es_hyperneat_pole_balancing_medium_run_fitnesses.txt', 'w+')
thefile.write("ES-HyperNEAT medium one\n")
for item in es_hyperneat_medium_run_one_fitnesses:
thefile.write("%s\n" % item)
if max_fit in es_hyperneat_medium_one_average_fit:
thefile.write("ES-HyperNEAT medium one solves pole_balancing at generation: " + str(es_hyperneat_medium_one_average_fit.index(max_fit)))
else:
thefile.write("ES-HyperNEAT medium one does not solve pole_balancing with best fitness: " + str(es_hyperneat_medium_one_average_fit[gens-1]))
thefile.write("\nES-HyperNEAT medium one solves pole_balancing in " + str(es_hyperneat_medium_one_solved) + " out of " + str(runs) + " runs.\n")
thefile.write("ES-HyperNEAT medium ten\n")
for item in es_hyperneat_medium_run_ten_fitnesses:
thefile.write("%s\n" % item)
if max_fit in es_hyperneat_medium_ten_average_fit:
thefile.write("ES-HyperNEAT medium ten solves pole_balancing at generation: " + str(es_hyperneat_medium_ten_average_fit.index(max_fit)))
else:
thefile.write("ES-HyperNEAT medium ten does not solve pole_balancing with best fitness: " + str(es_hyperneat_medium_ten_average_fit[gens-1]))
thefile.write("\nES-HyperNEAT medium ten solves pole_balancing in " + str(es_hyperneat_medium_ten_solved) + " out of " + str(runs) + " runs.\n")
thefile.write("ES-HyperNEAT medium hundred\n")
for item in es_hyperneat_medium_run_hundred_fitnesses:
thefile.write("%s\n" % item)
if max_fit in es_hyperneat_medium_hundred_average_fit:
thefile.write("ES-HyperNEAT medium hundred solves pole_balancing at generation: " + str(es_hyperneat_medium_hundred_average_fit.index(max_fit)))
else:
thefile.write("ES-HyperNEAT medium hundred does not solve pole_balancing with best fitness: " + str(es_hyperneat_medium_hundred_average_fit[gens-1]))
thefile.write("\nES-HyperNEAT medium hundred solves pole_balancing in " + str(es_hyperneat_medium_hundred_solved) + " out of " + str(runs) + " runs.\n")
# ES-HyperNEAT large.
thefile = open('es_hyperneat_pole_balancing_large_run_fitnesses.txt', 'w+')
thefile.write("ES-HyperNEAT large one\n")
for item in es_hyperneat_large_run_one_fitnesses:
thefile.write("%s\n" % item)
if max_fit in es_hyperneat_large_one_average_fit:
thefile.write("ES-HyperNEAT large one solves pole_balancing at generation: " + str(es_hyperneat_large_one_average_fit.index(max_fit)))
else:
thefile.write("ES-HyperNEAT large one does not solve pole_balancing with best fitness: " + str(es_hyperneat_large_one_average_fit[gens-1]))
thefile.write("\nES-HyperNEAT large one solves pole_balancing in " + str(es_hyperneat_large_one_solved) + " out of " + str(runs) + " runs.\n")
thefile.write("ES-HyperNEAT large ten\n")
for item in es_hyperneat_large_run_ten_fitnesses:
thefile.write("%s\n" % item)
if max_fit in es_hyperneat_large_ten_average_fit:
thefile.write("ES-HyperNEAT large ten solves pole_balancing at generation: " + str(es_hyperneat_large_ten_average_fit.index(max_fit)))
else:
thefile.write("ES-HyperNEAT large ten does not solve pole_balancing with best fitness: " + str(es_hyperneat_large_ten_average_fit[gens-1]))
thefile.write("\nES-HyperNEAT large ten solves pole_balancing in " + str(es_hyperneat_large_ten_solved) + " out of " + str(runs) + " runs.\n")
thefile.write("ES-HyperNEAT large hundred\n")
for item in es_hyperneat_large_run_hundred_fitnesses:
thefile.write("%s\n" % item)
if max_fit in es_hyperneat_large_hundred_average_fit:
thefile.write("ES-HyperNEAT large hundred solves pole_balancing at generation: " + str(es_hyperneat_large_hundred_average_fit.index(max_fit)))
else:
thefile.write("ES-HyperNEAT large hundred does not solve pole_balancing with best fitness: " + str(es_hyperneat_large_hundred_average_fit[gens-1]))
thefile.write("\nES-HyperNEAT large hundred solves pole_balancing in " + str(es_hyperneat_large_hundred_solved) + " out of " + str(runs) + " runs.\n")
# Plot one fitnesses.
plt.plot(range(gens), neat_one_average_fit, 'r-', label="NEAT")
plt.plot(range(gens), hyperneat_one_average_fit, 'g--', label="HyperNEAT")
plt.plot(range(gens), es_hyperneat_small_one_average_fit, 'b-.', label="ES-HyperNEAT small")
plt.plot(range(gens), es_hyperneat_medium_one_average_fit, 'c-.', label="ES-HyperNEAT medium")
plt.plot(range(gens), es_hyperneat_large_one_average_fit, 'm-.', label="ES-HyperNEAT large")
plt.title("Average pole_balancing fitnesses one episode")
plt.xlabel("Generations")
plt.ylabel("Fitness")
plt.grid()
plt.legend(loc="best")
plt.savefig('pole_balancing_one_fitnesses.svg')
plt.close()
# Plot ten fitnesses.
plt.plot(range(gens), neat_ten_average_fit, 'r-', label="NEAT")
plt.plot(range(gens), hyperneat_ten_average_fit, 'g--', label="HyperNEAT")
plt.plot(range(gens), es_hyperneat_small_ten_average_fit, 'b-.', label="ES-HyperNEAT small")
plt.plot(range(gens), es_hyperneat_medium_ten_average_fit, 'c-.', label="ES-HyperNEAT medium")
plt.plot(range(gens), es_hyperneat_large_ten_average_fit, 'm-.', label="ES-HyperNEAT large")
plt.title("Average pole_balancing fitnesses ten episodes")
plt.xlabel("Generations")
plt.ylabel("Fitness")
plt.grid()
plt.legend(loc="best")
plt.savefig('pole_balancing_ten_fitnesses.svg')
plt.close()
# Plot hundred fitnesses.
plt.plot(range(gens), neat_hundred_average_fit, 'r-', label="NEAT")
plt.plot(range(gens), hyperneat_hundred_average_fit, 'g--', label="HyperNEAT")
plt.plot(range(gens), es_hyperneat_small_hundred_average_fit, 'b-.', label="ES-HyperNEAT small")
plt.plot(range(gens), es_hyperneat_medium_hundred_average_fit, 'c-.', label="ES-HyperNEAT medium")
plt.plot(range(gens), es_hyperneat_large_hundred_average_fit, 'm-.', label="ES-HyperNEAT large")
plt.title("Average pole_balancing fitnesses hundred episodes")
plt.xlabel("Generations")
plt.ylabel("Fitness")
plt.grid()
plt.legend(loc="best")
plt.savefig('pole_balancing_hundred_fitnesses.svg')
plt.close()
| 49.39839
| 153
| 0.731864
| 3,729
| 24,551
| 4.450523
| 0.031912
| 0.111352
| 0.050133
| 0.027115
| 0.938118
| 0.898891
| 0.82785
| 0.751024
| 0.694023
| 0.586949
| 0
| 0.00443
| 0.163374
| 24,551
| 496
| 154
| 49.497984
| 0.803554
| 0.016048
| 0
| 0.491991
| 0
| 0
| 0.166777
| 0.013795
| 0
| 0
| 0
| 0
| 0
| 1
| 0.002288
| false
| 0
| 0.022883
| 0
| 0.025172
| 0.002288
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
9cc32e6307652528b4d24d4098dd08d354afda23
| 454
|
py
|
Python
|
ontraportlib/models/sort_dir_enum.py
|
LifePosts/ontraport
|
fb4834e89b897dce3475c89c7e6c34bf8756880e
|
[
"MIT"
] | null | null | null |
ontraportlib/models/sort_dir_enum.py
|
LifePosts/ontraport
|
fb4834e89b897dce3475c89c7e6c34bf8756880e
|
[
"MIT"
] | null | null | null |
ontraportlib/models/sort_dir_enum.py
|
LifePosts/ontraport
|
fb4834e89b897dce3475c89c7e6c34bf8756880e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
ontraportlib.models.sort_dir_enum
This file was automatically generated by APIMATIC v2.0 ( https://apimatic.io ) on 11/14/2017
"""
class SortDirEnum(object):
"""Implementation of the 'SortDir' enum.
TODO: type enum description here.
Attributes:
ASC: TODO: type description here.
DESC: TODO: type description here.
"""
ASC = 'asc'
DESC = 'desc'
| 18.16
| 97
| 0.590308
| 52
| 454
| 5.115385
| 0.711538
| 0.090226
| 0.142857
| 0.172932
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.034268
| 0.292952
| 454
| 24
| 98
| 18.916667
| 0.794393
| 0.689427
| 0
| 0
| 1
| 0
| 0.09589
| 0
| 0
| 0
| 0
| 0.125
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
9cc49692629e3b37e0fc9542527c315936c6a87c
| 276
|
py
|
Python
|
AxePy3Lib/01/textwrap/textwrap_fill.py
|
axetang/AxePython
|
3b517fa3123ce2e939680ad1ae14f7e602d446a6
|
[
"Apache-2.0"
] | 1
|
2019-01-04T05:47:50.000Z
|
2019-01-04T05:47:50.000Z
|
AxePy3Lib/01/textwrap/textwrap_fill.py
|
axetang/AxePython
|
3b517fa3123ce2e939680ad1ae14f7e602d446a6
|
[
"Apache-2.0"
] | null | null | null |
AxePy3Lib/01/textwrap/textwrap_fill.py
|
axetang/AxePython
|
3b517fa3123ce2e939680ad1ae14f7e602d446a6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# encoding: utf-8
#
# Copyright (c) 2008 Doug Hellmann All rights reserved.
#
"""
"""
# end_pymotw_header
import textwrap
from textwrap_example import sample_text
print(textwrap.fill(sample_text, width=50))
print(textwrap.fill(sample_text, width=40))
| 18.4
| 55
| 0.757246
| 40
| 276
| 5.075
| 0.725
| 0.147783
| 0.167488
| 0.226601
| 0.315271
| 0.315271
| 0
| 0
| 0
| 0
| 0
| 0.040984
| 0.115942
| 276
| 14
| 56
| 19.714286
| 0.790984
| 0.394928
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 4
|
9ce066e555ecb3359f16e7f599fa26998b432783
| 3,403
|
py
|
Python
|
spdb/spatialdb/test/int_test_AWS_object_store.py
|
jhuapl-boss/spdb
|
44d41e2b7a7b961e55746e1a5527d5419a74c2ce
|
[
"Apache-2.0"
] | 5
|
2016-05-12T19:48:45.000Z
|
2018-11-17T00:15:23.000Z
|
spdb/spatialdb/test/int_test_AWS_object_store.py
|
jhuapl-boss/spdb
|
44d41e2b7a7b961e55746e1a5527d5419a74c2ce
|
[
"Apache-2.0"
] | 5
|
2018-01-15T18:14:42.000Z
|
2020-07-30T21:59:16.000Z
|
spdb/spatialdb/test/int_test_AWS_object_store.py
|
jhuapl-boss/spdb
|
44d41e2b7a7b961e55746e1a5527d5419a74c2ce
|
[
"Apache-2.0"
] | 3
|
2017-09-21T11:40:06.000Z
|
2018-05-14T20:15:40.000Z
|
# Copyright 2016 The Johns Hopkins University Applied Physics Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from spdb.spatialdb import AWSObjectStore
from spdb.spatialdb.test.test_AWS_object_store import AWSObjectStoreTestMixin
from spdb.spatialdb.test.setup import AWSSetupLayer
from spdb.project import BossResourceBasic
class AWSObjectStoreTestIntegrationMixin(object):
# TODO: implement tests here or remove
def test_put_get_objects_async(self):
"""Method to test putting and getting objects to and from S3"""
#os = AWSObjectStore(self.object_store_config)
#cached_cuboid_keys = ["CACHED-CUBOID&1&1&1&0&0&12", "CACHED-CUBOID&1&1&1&0&0&13"]
#fake_data = [b"aaaadddffffaadddfffaadddfff", b"fffddaaffddffdfffaaa"]
#object_keys = os.cached_cuboid_to_object_keys(cached_cuboid_keys)
#os.put_objects(object_keys, fake_data)
#returned_data = os.get_objects_async(object_keys)
#for rdata, sdata in zip(returned_data, fake_data):
# assert rdata == sdata
pass
def test_page_in_objects(self):
"""Test method for paging in objects from S3 via lambda"""
# os = AWSObjectStore(self.object_store_config)
#
# cached_cuboid_keys = ["CACHED-CUBOID&1&1&1&0&0&12", "CACHED-CUBOID&1&1&1&0&0&13"]
# page_in_channel = "dummy_channel"
# kv_config = {"param1": 1, "param2": 2}
# state_config = {"param1": 1, "param2": 2}
#
# object_keys = os.page_in_objects(cached_cuboid_keys,
# page_in_channel,
# kv_config,
# state_config)
pass
def test_trigger_page_out(self):
"""Test method for paging out objects to S3 via lambda"""
# os = AWSObjectStore(self.object_store_config)
#
# cached_cuboid_keys = ["CACHED-CUBOID&1&1&1&0&0&12", "CACHED-CUBOID&1&1&1&0&0&13"]
# page_in_channel = "dummy_channel"
# kv_config = {"param1": 1, "param2": 2}
# state_config = {"param1": 1, "param2": 2}
#
# object_keys = os.page_in_objects(cached_cuboid_keys,
# page_in_channel,
# kv_config,
# state_config)
pass
class TestAWSObjectStoreInt(AWSObjectStoreTestIntegrationMixin, AWSObjectStoreTestMixin, unittest.TestCase):
layer = AWSSetupLayer
def setUp(self):
""" Copy params from the Layer setUpClass
"""
# Setup Data
self.data = self.layer.setup_helper.get_image8_dict()
self.resource = BossResourceBasic(self.data)
self.setup_helper = self.layer.setup_helper
# Setup config
self.object_store_config = self.layer.object_store_config
| 37.395604
| 108
| 0.644138
| 422
| 3,403
| 5.007109
| 0.329384
| 0.073829
| 0.045433
| 0.039754
| 0.302414
| 0.280644
| 0.280644
| 0.280644
| 0.280644
| 0.280644
| 0
| 0.027978
| 0.264766
| 3,403
| 90
| 109
| 37.811111
| 0.816547
| 0.631795
| 0
| 0.157895
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011111
| 0
| 1
| 0.210526
| false
| 0.157895
| 0.263158
| 0
| 0.631579
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 4
|
9ce7c44edfb802039a3e75fbd02681c6bc9c65e6
| 783
|
py
|
Python
|
tests/code_sample/todo/string_formats.py
|
brendanator/laziest
|
63098402cf7c3b320e0dfd46ca14d2700ed87056
|
[
"Apache-2.0"
] | 1
|
2020-03-31T11:21:33.000Z
|
2020-03-31T11:21:33.000Z
|
tests/code_sample/todo/string_formats.py
|
brendanator/laziest
|
63098402cf7c3b320e0dfd46ca14d2700ed87056
|
[
"Apache-2.0"
] | null | null | null |
tests/code_sample/todo/string_formats.py
|
brendanator/laziest
|
63098402cf7c3b320e0dfd46ca14d2700ed87056
|
[
"Apache-2.0"
] | null | null | null |
def string_format_s(arg1):
return 'this is %s' % arg1
def string_format(arg1):
return 'this is {}'.format(arg1)
def string_format_f(arg1):
return f'this is {arg1}'
def string_format_f_multiple(arg1, arg2, arg3):
return f'{arg2} this is {arg1}! {arg3}'
def string_format_multiple(arg1, arg2, arg3):
return ' {} this is {}! {}'.format(arg1, arg2, arg3)
def string_format_named(arg1):
return 'this is {name}'.format(name=arg1)
def string_format_named_three_args(arg1, arg2, arg3):
return '{first} this is {name} ! {last}'.format(name=arg1, first=arg2, last=arg3)
def string_format_with_un_op(arg1, arg2, arg3):
var = '{first} this is {name} ! {last}'.format(name=arg1, first=arg2, last=arg3)
var += '. End.'
var *= 3
return var
| 23.029412
| 85
| 0.662835
| 120
| 783
| 4.158333
| 0.2
| 0.144289
| 0.240481
| 0.152305
| 0.452906
| 0.200401
| 0.200401
| 0.200401
| 0.200401
| 0.200401
| 0
| 0.051563
| 0.182631
| 783
| 33
| 86
| 23.727273
| 0.728125
| 0
| 0
| 0
| 0
| 0
| 0.208174
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.421053
| false
| 0
| 0
| 0.368421
| 0.842105
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
9cfd166f6930ac35c8c87e63aa513d3e78fb241a
| 790
|
py
|
Python
|
boto3_type_annotations/boto3_type_annotations/cloudformation/waiter.py
|
cowboygneox/boto3_type_annotations
|
450dce1de4e066b939de7eac2ec560ed1a7ddaa2
|
[
"MIT"
] | 119
|
2018-12-01T18:20:57.000Z
|
2022-02-02T10:31:29.000Z
|
boto3_type_annotations/boto3_type_annotations/cloudformation/waiter.py
|
cowboygneox/boto3_type_annotations
|
450dce1de4e066b939de7eac2ec560ed1a7ddaa2
|
[
"MIT"
] | 15
|
2018-11-16T00:16:44.000Z
|
2021-11-13T03:44:18.000Z
|
boto3_type_annotations/boto3_type_annotations/cloudformation/waiter.py
|
cowboygneox/boto3_type_annotations
|
450dce1de4e066b939de7eac2ec560ed1a7ddaa2
|
[
"MIT"
] | 11
|
2019-05-06T05:26:51.000Z
|
2021-09-28T15:27:59.000Z
|
from typing import Dict
from botocore.waiter import Waiter
class ChangeSetCreateComplete(Waiter):
def wait(self, ChangeSetName: str, StackName: str = None, NextToken: str = None, WaiterConfig: Dict = None):
pass
class StackCreateComplete(Waiter):
def wait(self, StackName: str = None, NextToken: str = None, WaiterConfig: Dict = None):
pass
class StackDeleteComplete(Waiter):
def wait(self, StackName: str = None, NextToken: str = None, WaiterConfig: Dict = None):
pass
class StackExists(Waiter):
def wait(self, StackName: str = None, NextToken: str = None, WaiterConfig: Dict = None):
pass
class StackUpdateComplete(Waiter):
def wait(self, StackName: str = None, NextToken: str = None, WaiterConfig: Dict = None):
pass
| 28.214286
| 112
| 0.692405
| 91
| 790
| 6.010989
| 0.241758
| 0.127971
| 0.11883
| 0.155393
| 0.672761
| 0.672761
| 0.672761
| 0.672761
| 0.672761
| 0.672761
| 0
| 0
| 0.208861
| 790
| 27
| 113
| 29.259259
| 0.8752
| 0
| 0
| 0.529412
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.294118
| false
| 0.294118
| 0.117647
| 0
| 0.705882
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 4
|
140e67d033c79e366acb9a3b2745faa866fab465
| 1,346
|
py
|
Python
|
src/utils/hashing.py
|
DiceNameIsMy/fin-a-log
|
0eb47f9ed9f3fbc205b50b7217fabe67e50cea0e
|
[
"MIT"
] | null | null | null |
src/utils/hashing.py
|
DiceNameIsMy/fin-a-log
|
0eb47f9ed9f3fbc205b50b7217fabe67e50cea0e
|
[
"MIT"
] | null | null | null |
src/utils/hashing.py
|
DiceNameIsMy/fin-a-log
|
0eb47f9ed9f3fbc205b50b7217fabe67e50cea0e
|
[
"MIT"
] | null | null | null |
from secrets import randbelow
from hashids import Hashids
from passlib.context import CryptContext
pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
class InvalidHash(Exception):
pass
class InvalidObjectWithID(Exception):
pass
class IDHasher:
def __init__(self, salt: str, min_length: int = 0):
self.hashids = Hashids(salt, min_length=min_length)
def encode(self, num: int) -> str:
return self.hashids.encode(num)
def decode(self, hash_id: str) -> int:
try:
return self.hashids.decode(hash_id)[0]
except IndexError:
raise InvalidHash()
def encode_obj(self, obj):
try:
setattr(obj, "id", self.encode(getattr(obj, "id")))
return obj
except AttributeError:
raise InvalidObjectWithID(f"Object {obj} does not have an `id` attribute")
def get_hashid(salt: str, min_length: int = 0) -> Hashids:
return IDHasher(salt, min_length=min_length)
def verify_password(plain_password: str, hashed_password: str) -> bool:
return pwd_context.verify(plain_password, hashed_password)
def get_password_hash(password: str) -> str:
return pwd_context.hash(password)
def generate_verification_code() -> int:
"""Generate a random 6-digit integer"""
return randbelow(900000) + 100000
| 24.925926
| 86
| 0.679792
| 168
| 1,346
| 5.291667
| 0.392857
| 0.060742
| 0.040495
| 0.035996
| 0.101237
| 0.101237
| 0
| 0
| 0
| 0
| 0
| 0.01518
| 0.216939
| 1,346
| 53
| 87
| 25.396226
| 0.828273
| 0.024517
| 0
| 0.125
| 0
| 0
| 0.044376
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0.21875
| 0.09375
| 0.125
| 0.65625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 4
|
141bbc8596c73b9859075f3010e29ba6684fa308
| 2,688
|
py
|
Python
|
TrackerDash/schemas/api.py
|
wedgieedward/TrackerDash
|
53c3ecc7b9124740f05847dbd235b068601c621e
|
[
"Beerware"
] | null | null | null |
TrackerDash/schemas/api.py
|
wedgieedward/TrackerDash
|
53c3ecc7b9124740f05847dbd235b068601c621e
|
[
"Beerware"
] | 2
|
2015-04-08T23:20:35.000Z
|
2015-04-08T23:21:57.000Z
|
TrackerDash/schemas/api.py
|
wedgieedward/TrackerDash
|
53c3ecc7b9124740f05847dbd235b068601c621e
|
[
"Beerware"
] | null | null | null |
"""
Schemas needed to validate incoming api requests
"""
import colander
SUPPORTED_GRAPHS = (
'line', 'bar', 'area', 'column', 'scatter', 'bar', "pie", "gauge")
class ShowreelItem(colander.MappingSchema):
title = colander.SchemaNode(colander.String())
item_type = colander.SchemaNode(
colander.String(),
validator=colander.OneOf(["graph", "dashboard"]))
class ShowreelItems(colander.SequenceSchema):
item = ShowreelItem()
class Showreel(colander.MappingSchema):
"""
schema for a showreel document
"""
title = colander.SchemaNode(colander.String())
refresh_interval = colander.SchemaNode(colander.Int())
reels = ShowreelItems()
class GraphDimension(colander.MappingSchema):
width = colander.SchemaNode(colander.Int(),
validator=colander.OneOf([4, 6, 8, 12]))
height = colander.SchemaNode(colander.Int(),
validator=colander.Range(1, 5))
class DashGraph(colander.MappingSchema):
title = colander.SchemaNode(colander.String())
dimensions = GraphDimension()
class GraphRow(colander.SequenceSchema):
"""
a list of graph names
"""
row_data = DashGraph()
class GraphRows(colander.SequenceSchema):
"""
A list of graph rows
"""
rows = GraphRow()
class Dashboard(colander.MappingSchema):
"""
Schema for a dashboard document
"""
title = colander.SchemaNode(colander.String())
row_data = GraphRows()
class DataRange(colander.MappingSchema):
"""
Schema for a data range dictionary
"""
minutes = colander.SchemaNode(colander.Int(), missing=0)
hours = colander.SchemaNode(colander.Int(), missing=0)
days = colander.SchemaNode(colander.Int(), missing=0)
weeks = colander.SchemaNode(colander.Int(), missing=0)
seconds = colander.SchemaNode(colander.Int(), missing=0)
class Graph(colander.MappingSchema):
"""
Schema for a graph document
"""
title = colander.SchemaNode(colander.String())
data_source = colander.SchemaNode(colander.String())
# Optional args
description = colander.SchemaNode(colander.String(), missing="")
data_range = DataRange(missing={"minutes": 0,
"hours": 0,
"days": 0,
"weeks": 1,
"seconds": 0})
graph_type = colander.SchemaNode(
colander.String(),
validator=colander.OneOf(SUPPORTED_GRAPHS),
missing="line")
stacked = colander.SchemaNode(colander.Bool(), missing=False)
url = colander.SchemaNode(colander.String(), missing='')
| 27.71134
| 72
| 0.630208
| 257
| 2,688
| 6.55642
| 0.303502
| 0.202967
| 0.293175
| 0.189911
| 0.542433
| 0.422552
| 0.137685
| 0.068843
| 0
| 0
| 0
| 0.00835
| 0.24256
| 2,688
| 96
| 73
| 28
| 0.819253
| 0.08631
| 0
| 0.14
| 0
| 0
| 0.034351
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.02
| 0
| 0.74
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
142617c1149fb198c94820251dde11dd708ad448
| 229
|
py
|
Python
|
djangoapi/courses/serializer.py
|
ptyadana/django-REST-API-course-info
|
9247d5085e28418053975b0800fd42786b6742be
|
[
"MIT"
] | null | null | null |
djangoapi/courses/serializer.py
|
ptyadana/django-REST-API-course-info
|
9247d5085e28418053975b0800fd42786b6742be
|
[
"MIT"
] | null | null | null |
djangoapi/courses/serializer.py
|
ptyadana/django-REST-API-course-info
|
9247d5085e28418053975b0800fd42786b6742be
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from .models import Course
class CourseSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Course
fields = ('id', 'url', 'name', 'langauge', 'price')
| 32.714286
| 63
| 0.71179
| 23
| 229
| 7.043478
| 0.782609
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.183406
| 229
| 7
| 64
| 32.714286
| 0.86631
| 0
| 0
| 0
| 0
| 0
| 0.095652
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
14355fd96dad21ad9be55c6f3e4a53c0f0123d16
| 28
|
py
|
Python
|
venv-lib/lib/python3.7/copy.py
|
migmaciasdiaz/venvs
|
bcdbb75931cb27fc4b5b30f12fc44be85952157e
|
[
"MIT"
] | 2
|
2020-03-30T14:17:10.000Z
|
2020-10-04T12:33:00.000Z
|
venv-lib/lib/python3.7/copy.py
|
migmaciasdiaz/venvs
|
bcdbb75931cb27fc4b5b30f12fc44be85952157e
|
[
"MIT"
] | 1
|
2020-11-24T03:31:13.000Z
|
2020-11-24T03:31:13.000Z
|
venv/lib/python3.7/copy.py
|
wensu425/aws-eb-webapp
|
4b149c75c11fe5b33c9a080313ec336fabb45824
|
[
"MIT"
] | 1
|
2021-05-04T09:18:22.000Z
|
2021-05-04T09:18:22.000Z
|
/usr/lib64/python3.7/copy.py
| 28
| 28
| 0.785714
| 6
| 28
| 3.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 0
| 28
| 1
| 28
| 28
| 0.642857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
1444a0098bfc98d4d2642192bfe348fc5d1b4df5
| 8,683
|
py
|
Python
|
avlDecoder.py
|
karticr/Teltonika_FMBXXX_TCP_Server
|
130a91c033ca0335e48b9aa275186103fc221a75
|
[
"Apache-2.0"
] | 5
|
2021-01-05T03:14:00.000Z
|
2021-09-03T21:50:44.000Z
|
avlDecoder.py
|
karticr/Teltonika_FMBXXX_TCP_Server
|
130a91c033ca0335e48b9aa275186103fc221a75
|
[
"Apache-2.0"
] | 3
|
2021-05-01T14:56:05.000Z
|
2022-03-03T09:53:27.000Z
|
avlDecoder.py
|
karticr/Teltonika_FMBXXX_TCP_Server
|
130a91c033ca0335e48b9aa275186103fc221a75
|
[
"Apache-2.0"
] | 1
|
2022-02-10T04:38:18.000Z
|
2022-02-10T04:38:18.000Z
|
import binascii
import datetime
import math
from IO_decoder import IODecoder
io = IODecoder()
class avlDecoder():
def __init__(self):
self.raw_data = ""
self.initVars()
def initVars(self): # initilizing variables
self.codecid = 0
self.no_records_i = 0
self.no_records_e = 0
self.crc_16 = 0
self.avl_entries = []
self.avl_latest = ""
self.d_time_unix = 0
self.d_time_local = ""
self.avl_io_raw = ""
self.priority = 0
self.lon = 0
self.lat = 0
self.alt = 0
self.angle = 0
self.satellites = 0
self.speed = 0
self.decoded_io = {}
def decodeAVL(self, data):
self.raw_data = data
self.data_field_l = int(data[8:16],16)*2 # Data Field Length – size is calculated starting from Codec ID to Number of Data 2.
self.total_io_size = self.data_field_l-4-2 #-4=> subtract codecid and no of data, -2=> no of data at the end.
self.io_end = 20+self.total_io_size # 20=> start from timestamp
self.codecid = int(data[16:18], 16) # codecid
self.no_record_i = int(data[18:20], 16) # first no of total records
self.no_record_e = int(data[-10:-8], 16) # no of total records before crc-16 check
self.crc_16 = int(data[-8:],16) # crc-16 check
self.first_io_start= 20 # first io starting pos
self.first_io_end = math.ceil(self.total_io_size/ self.no_record_e) # end pos for first io entry
if(self.codecid == 8 and (self.no_record_i == self.no_record_e)):
# record_entries = data[20:-10] # entry data
record_entries = data[self.first_io_start: self.io_end ] # entry data
entries_size = len(record_entries) # total no of entries
division_size = int(len(record_entries)/ self.no_record_i) # division size
self.avl_entries = []
print("old size:", entries_size, "division:", division_size)
print("new size:", self.total_io_size, "division:", self.total_io_size/ self.no_record_e)
for i in range(0, entries_size, division_size):
self.avl_entries.append(record_entries[i:i+division_size]) # splitting into chunks
self.avl_latest = record_entries[0:self.first_io_end] # latest avl data packets
self.avl_latest_1 = self.avl_entries[0]
print("________________________________________")
print("old:", self.avl_entries[0])
print("new:", self.avl_latest)
print("‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾")
self.d_time_unix = int(self.avl_latest[0:16],16) # device time unix
self.d_time_local = self.unixtoLocal(self.d_time_unix) # device time local
self.priority = int(record_entries[16:18], 16) # device data priority
self.lon = int(record_entries[18:26], 16) # longitude
self.lat = int(record_entries[26:34], 16) # latitude
self.alt = int(record_entries[34:38], 16) # altitude
self.angle = int(record_entries[38:42], 16) # angle
self.satellites = int(record_entries[42:44], 16) # no of satellites
self.speed = int(record_entries[44:48], 16) # speed
self.avl_io_raw = self.avl_latest[48:] # avl io data raw
print("raw io",self.avl_io_raw)
self.decoded_io = io.dataDecoder(self.avl_io_raw) # decoded avl data
return self.getAvlData()
else:
return -1
def getDateTime(self): # system time
return datetime.datetime.now().strftime("%d/%m/%Y %H:%M:%S")
def unixtoLocal(self, unix_time): # unix to local time
time = datetime.datetime.fromtimestamp(unix_time/1000)
return f"{time:%Y-%m-%d %H:%M:%S}"
def getAvlData(self):
data = {
"sys_time" : self.getDateTime(),
"codecid" : self.codecid,
"no_record_i": self.no_record_i,
"no_record_e": self.no_record_e,
"crc-16" : self.crc_16,
# "avl_entries": self.avl_entries,
# "avl_latest" : self.avl_latest,
"d_time_unix" : self.d_time_unix,
"d_time_local": self.d_time_local,
"priority" :self.priority,
"lon" :self.lon,
"lat" :self.lat,
"alt" :self.alt,
"angle" :self.angle,
"satellites" :self.satellites,
"speed" :self.speed,
"io_data" :self.decoded_io
}
return data
def getRawData(self):
return self.raw_data
if __name__ == "__main__":
data = b'00000000000004d2081d00000176ccb789480000000000000000000000000000000000060301000200b40002422dea430f150148000000000000000176ccb69ee80000000000000000000000000000000000060301000200b40002422de8430f150148000000000000000176ccb5b4880000000000000000000000000000000000060301000200b40002422de6430f160148000000000000000176ccb4ca280000000000000000000000000000000000060301000200b40002422de6430f130148000000000000000176ccb3dfc80000000000000000000000000000000000060301000200b40002422de6430f160148000000000000000176ccb2f5680000000000000000000000000000000000060301000200b40002422de6430f110148000000000000000176ccb20b080000000000000000000000000000000000060301000200b40002422de4430f110148000000000000000176cc96f1880000000000000000000000000000000000040301000200b400000148000000000000000176cc9607280000000000000000000000000000000000040301000200b400000148000000000000000176cc951cc80000000000000000000000000000000000040301000200b400000148000000000000000176cc9432680000000000000000000000000000000000040301000200b400000148000000000000000176cc9348080000000000000000000000000000000000040301000200b400000148000000000000000176cc925da80000000000000000000000000000000000040301000200b400000148000000000000000176cc9173480000000000000000000000000000000000040301000200b400000148000000000000000176cc900be80000000000000000000000000000000000040301000200b400000148000000000000000176cc8f96b80000000000000000000000000000000000040301000200b400000148000000000000000176cc8eac580000000000000000000000000000000000040301000200b400000148000000000000000176cc8d4cc80200000000000000000000000000000002040301000200b400000148000000000000000176cc8d06780000000000000000000000000000000000040301000200b400000148000000000000000176cc8c1c180000000000000000000000000000000000040301000200b400000148000000000000000176cc8b31b80000000000000000000000000000000000040301000200b400000148000000000000000176cc8a47580000000000000000000000000000000000040301000200b400000148000000000000000176cc895cf80000000000000000000000000000000000040301000200b400000148000000000000000176cc8872980000000000000000000000000000000000040301000200b400000148000000000000000176cc8788380000000000000000000000000000000000040301000200b400000148000000000000000176cc869dd80000000000000000000000000000000000040301000200b400000148000000000000000176cc85b3780000000000000000000000000000000000040301000200b400000148000000000000000176cc84c9180000000000000000000000000000000000040301000200b400000148000000000000000176cc83deb80000000000000000000000000000000000040301000200b40000014800000000001d000027ca'
# data = b'000000000000003608010000016B40D8EA30010000000000000000000000000000000105021503010101425E0F01F10000601A014E0000000000000000010000C7CF'
# data = b'000000000000004308020000016B40D57B480100000000000000000000000000000001010101000000000000016B40D5C198010000000000000000000000000000000101010101000000020000252C'
avl = avlDecoder()
res = avl.decodeAVL(data)
print(res)
# avldata = avl.getAvlData()
# print(avldata)
| 66.282443
| 2,506
| 0.64609
| 699
| 8,683
| 7.829757
| 0.184549
| 0.014252
| 0.02083
| 0.027042
| 0.064681
| 0.025215
| 0.017541
| 0.017541
| 0.007309
| 0.007309
| 0
| 0.43913
| 0.284809
| 8,683
| 130
| 2,507
| 66.792308
| 0.435588
| 0.126569
| 0
| 0.020202
| 0
| 0
| 0.368588
| 0.340888
| 0
| 1
| 0
| 0
| 0
| 1
| 0.070707
| false
| 0
| 0.040404
| 0.020202
| 0.181818
| 0.080808
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
1461c579bd796a534c823ae58d48b47fae36abdb
| 95
|
py
|
Python
|
src/mozloc/__init__.py
|
scivision/mozilla-location-wifi
|
fb41aa88bd89f0734c34fdd14fb3db0697d9b88a
|
[
"MIT"
] | 4
|
2020-11-23T06:25:55.000Z
|
2021-11-04T02:11:53.000Z
|
src/mozloc/__init__.py
|
scivision/mozilla-location-wifi-python
|
fb41aa88bd89f0734c34fdd14fb3db0697d9b88a
|
[
"MIT"
] | null | null | null |
src/mozloc/__init__.py
|
scivision/mozilla-location-wifi-python
|
fb41aa88bd89f0734c34fdd14fb3db0697d9b88a
|
[
"MIT"
] | null | null | null |
from .base import log_wifi_loc
from .modules import get_signal, parse_signal, cli_config_check
| 31.666667
| 63
| 0.852632
| 16
| 95
| 4.6875
| 0.8125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 95
| 2
| 64
| 47.5
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
1463af8b1fc3a452cf428a092fc4c5e6f04b24b2
| 238
|
py
|
Python
|
Section02_Builder/BuilderInheritance/PersonBirthDateBuilder.py
|
enriqueescobar-askida/Kinito.Python
|
e4c5521e771c4de0ceaf81776a4a61f7de01edb4
|
[
"MIT"
] | 1
|
2020-10-20T07:41:51.000Z
|
2020-10-20T07:41:51.000Z
|
Section02_Builder/BuilderInheritance/PersonBirthDateBuilder.py
|
enriqueescobar-askida/Kinito.Python
|
e4c5521e771c4de0ceaf81776a4a61f7de01edb4
|
[
"MIT"
] | null | null | null |
Section02_Builder/BuilderInheritance/PersonBirthDateBuilder.py
|
enriqueescobar-askida/Kinito.Python
|
e4c5521e771c4de0ceaf81776a4a61f7de01edb4
|
[
"MIT"
] | null | null | null |
from Section02_Builder.BuilderInheritance.PersonJobBuilder import PersonJobBuilder
class PersonBirthDateBuilder(PersonJobBuilder):
def born(self, date_of_birth):
self.person.date_of_birth = date_of_birth
return self
| 29.75
| 82
| 0.798319
| 26
| 238
| 7.038462
| 0.615385
| 0.098361
| 0.180328
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009901
| 0.151261
| 238
| 7
| 83
| 34
| 0.89604
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.2
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
147997bd302efc04f1376c6194416d2d473ad26b
| 38
|
py
|
Python
|
ctrl-hyper/ctrl-r.py
|
MTfirst/cmd-ctrl_onLinux
|
38a6db67796bdc8d438ca63171d9fea03e84f5f7
|
[
"MIT"
] | 1
|
2020-05-02T03:46:10.000Z
|
2020-05-02T03:46:10.000Z
|
ctrl-hyper/ctrl-r.py
|
MTfirst/cmd-ctrl_onLinux
|
38a6db67796bdc8d438ca63171d9fea03e84f5f7
|
[
"MIT"
] | null | null | null |
ctrl-hyper/ctrl-r.py
|
MTfirst/cmd-ctrl_onLinux
|
38a6db67796bdc8d438ca63171d9fea03e84f5f7
|
[
"MIT"
] | null | null | null |
keyboard.send_keys("<ctrl>+<shift>+r")
| 38
| 38
| 0.710526
| 6
| 38
| 4.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 38
| 1
| 38
| 38
| 0.684211
| 0
| 0
| 0
| 0
| 0
| 0.410256
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
14986af2d1b45efa6e844fc93b02dbd9e29b1629
| 242
|
py
|
Python
|
python/analysis/beam_ana/__init__.py
|
ACTCollaboration/moby2
|
b0f6bd6add7170999eb964d18f16d795520426e9
|
[
"BSD-2-Clause"
] | 3
|
2020-06-23T15:59:37.000Z
|
2022-03-29T16:04:35.000Z
|
python/analysis/beam_ana/__init__.py
|
ACTCollaboration/moby2
|
b0f6bd6add7170999eb964d18f16d795520426e9
|
[
"BSD-2-Clause"
] | 1
|
2020-04-08T15:10:46.000Z
|
2020-04-08T15:10:46.000Z
|
python/analysis/beam_ana/__init__.py
|
ACTCollaboration/moby2
|
b0f6bd6add7170999eb964d18f16d795520426e9
|
[
"BSD-2-Clause"
] | null | null | null |
from __future__ import print_function
from __future__ import absolute_import
from past.builtins import basestring
from .beam_obs import BeamObs, BeamObsList
from .beam_plot import plot_beam_image
from . import solid_angle
from . import util
| 26.888889
| 42
| 0.85124
| 35
| 242
| 5.457143
| 0.514286
| 0.104712
| 0.167539
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.123967
| 242
| 8
| 43
| 30.25
| 0.900943
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.142857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
14ae89744d23180ef351421ffe231d8cbfbc615a
| 22
|
py
|
Python
|
notebooks/dminteract/version.py
|
chapmanbe/isys90069_w2020_explore
|
305a129ee32035b62741fed2eb722aa4086a1167
|
[
"MIT"
] | 8
|
2020-12-20T02:59:59.000Z
|
2021-09-23T06:04:01.000Z
|
notebooks/dminteract/version.py
|
chapmanbe/isys90069_w2020_explore
|
305a129ee32035b62741fed2eb722aa4086a1167
|
[
"MIT"
] | 5
|
2021-06-08T21:54:49.000Z
|
2022-03-12T00:38:51.000Z
|
notebooks/dminteract/version.py
|
chapmanbe/isys90069_w2020_explore
|
305a129ee32035b62741fed2eb722aa4086a1167
|
[
"MIT"
] | 9
|
2020-06-26T06:00:15.000Z
|
2022-01-06T04:07:38.000Z
|
__version__="0.0.1.5"
| 11
| 21
| 0.681818
| 5
| 22
| 2.2
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.190476
| 0.045455
| 22
| 1
| 22
| 22
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0.318182
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
14bf30645e89a98e38db0b7d2622f85c989d6c58
| 111
|
py
|
Python
|
tests/pipe_proc_tests/dev.py
|
genematx/nmrglue
|
8a24cf6cbd18451e552fc0673b84c42d1dcb69a2
|
[
"BSD-3-Clause"
] | 150
|
2015-01-16T12:24:13.000Z
|
2022-03-03T18:01:18.000Z
|
tests/pipe_proc_tests/dev.py
|
genematx/nmrglue
|
8a24cf6cbd18451e552fc0673b84c42d1dcb69a2
|
[
"BSD-3-Clause"
] | 129
|
2015-01-13T04:58:56.000Z
|
2022-03-02T13:39:16.000Z
|
tests/pipe_proc_tests/dev.py
|
genematx/nmrglue
|
8a24cf6cbd18451e552fc0673b84c42d1dcb69a2
|
[
"BSD-3-Clause"
] | 88
|
2015-02-16T20:04:12.000Z
|
2022-03-10T06:50:30.000Z
|
#! /usr/bin/env python
""" Create files for dev unit test """
# do nothing as NMRPipe goes into infinite loop.
| 27.75
| 48
| 0.702703
| 18
| 111
| 4.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.18018
| 111
| 3
| 49
| 37
| 0.857143
| 0.900901
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
1ae79a0e222d499f9abf55654da6499867c03d60
| 277
|
py
|
Python
|
warpfield/__init__.py
|
xr0038/jasmine_warpfield
|
d3dc8306c30c955eea997e7cb69c1910df6a9515
|
[
"MIT"
] | null | null | null |
warpfield/__init__.py
|
xr0038/jasmine_warpfield
|
d3dc8306c30c955eea997e7cb69c1910df6a9515
|
[
"MIT"
] | 7
|
2021-07-04T07:07:34.000Z
|
2021-09-09T05:22:09.000Z
|
warpfield/__init__.py
|
xr0038/jasmine_warpfield
|
d3dc8306c30c955eea997e7cb69c1910df6a9515
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .util import get_projection
from .source import retrieve_gaia_sources
from .source import display_sources, display_gaia_sources
from .telescope import Optics, Detector, Telescope
# from .distortion import distortion_generator
| 34.625
| 57
| 0.805054
| 37
| 277
| 5.837838
| 0.594595
| 0.092593
| 0.148148
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004065
| 0.111913
| 277
| 7
| 58
| 39.571429
| 0.873984
| 0.314079
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
1aea61bcc9df6cd17982b810157a304388bea2e1
| 48
|
py
|
Python
|
hello.py
|
AbhijeetSrivastava96/Python-Programs
|
d5a7ff9698150a5bc809129214a0b3edc0fa9d91
|
[
"MIT"
] | null | null | null |
hello.py
|
AbhijeetSrivastava96/Python-Programs
|
d5a7ff9698150a5bc809129214a0b3edc0fa9d91
|
[
"MIT"
] | null | null | null |
hello.py
|
AbhijeetSrivastava96/Python-Programs
|
d5a7ff9698150a5bc809129214a0b3edc0fa9d91
|
[
"MIT"
] | null | null | null |
a=4
b=5
c=a+b
print("the value of a+b is : ",c)
| 9.6
| 33
| 0.5625
| 15
| 48
| 1.8
| 0.666667
| 0.148148
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.052632
| 0.208333
| 48
| 4
| 34
| 12
| 0.657895
| 0
| 0
| 0
| 0
| 0
| 0.458333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.25
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
210643efde344f1910eb7a98b108382fc7041c16
| 330
|
py
|
Python
|
anidbcli/__init__.py
|
infinityb/anidbcli
|
fb5ea89b3690190f8f92f23111b44c3cfade92c1
|
[
"MIT"
] | null | null | null |
anidbcli/__init__.py
|
infinityb/anidbcli
|
fb5ea89b3690190f8f92f23111b44c3cfade92c1
|
[
"MIT"
] | null | null | null |
anidbcli/__init__.py
|
infinityb/anidbcli
|
fb5ea89b3690190f8f92f23111b44c3cfade92c1
|
[
"MIT"
] | null | null | null |
from .anidbconnector import AnidbConnector
from .libed2k import get_ed2k_link,hash_file
from .cli import main
from .protocol import FileRequest, AnimeAmaskField, FileFmaskField, FileAmaskField
__all__ = ['AnidbConnector', "FileRequest", "AnimeAmaskField", "FileFmaskField", "FileAmaskField", "main", "hash_file", "get_ed2k_link"]
| 55
| 136
| 0.809091
| 35
| 330
| 7.342857
| 0.485714
| 0.054475
| 0.085603
| 0.420233
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009967
| 0.087879
| 330
| 6
| 136
| 55
| 0.843854
| 0
| 0
| 0
| 0
| 0
| 0.283988
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.8
| 0
| 0.8
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
212f11894e29ef6f76020614604e0276162d9dd3
| 119
|
py
|
Python
|
awspice/__init__.py
|
Telefonica/awspice
|
da6f6ee0a8d7a7206c1ea5e7ca8bbc83716b29fb
|
[
"Apache-2.0"
] | 1
|
2020-08-04T18:22:41.000Z
|
2020-08-04T18:22:41.000Z
|
awspice/__init__.py
|
Telefonica/awspice
|
da6f6ee0a8d7a7206c1ea5e7ca8bbc83716b29fb
|
[
"Apache-2.0"
] | null | null | null |
awspice/__init__.py
|
Telefonica/awspice
|
da6f6ee0a8d7a7206c1ea5e7ca8bbc83716b29fb
|
[
"Apache-2.0"
] | 2
|
2019-04-03T16:56:19.000Z
|
2019-05-06T19:41:26.000Z
|
from .manager import AwsManager as connect
from .servicemanager import ServiceManager
from .helpers import ClsEncoder
| 23.8
| 42
| 0.848739
| 14
| 119
| 7.214286
| 0.642857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12605
| 119
| 4
| 43
| 29.75
| 0.971154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
2135f82bed51051638952f0f902530653e48bfe9
| 554
|
py
|
Python
|
src/purgatory/domain/messages/events.py
|
mardiros/purgatory
|
5905619c0f153eae090c46ed5cd7f165c86eafd5
|
[
"BSD-3-Clause"
] | null | null | null |
src/purgatory/domain/messages/events.py
|
mardiros/purgatory
|
5905619c0f153eae090c46ed5cd7f165c86eafd5
|
[
"BSD-3-Clause"
] | 11
|
2021-12-29T21:28:50.000Z
|
2022-01-17T08:09:38.000Z
|
src/purgatory/domain/messages/events.py
|
mardiros/purgatory
|
5905619c0f153eae090c46ed5cd7f165c86eafd5
|
[
"BSD-3-Clause"
] | null | null | null |
from dataclasses import dataclass
from typing import Optional
from purgatory.typing import TTL, StateName, Threshold
from .base import Event
@dataclass(frozen=True)
class CircuitBreakerCreated(Event):
name: str
threshold: Threshold
ttl: TTL
@dataclass(frozen=True)
class ContextChanged(Event):
name: str
state: StateName
opened_at: Optional[float]
@dataclass(frozen=True)
class CircuitBreakerFailed(Event):
name: str
failure_count: int
@dataclass(frozen=True)
class CircuitBreakerRecovered(Event):
name: str
| 17.3125
| 54
| 0.752708
| 64
| 554
| 6.484375
| 0.4375
| 0.144578
| 0.183133
| 0.231325
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.169675
| 554
| 31
| 55
| 17.870968
| 0.902174
| 0
| 0
| 0.380952
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.190476
| 0
| 0.809524
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
2148c24bf5b42cbd93b875b82aa1b5edb5e85002
| 193
|
py
|
Python
|
leads/apps.py
|
coderj001/Django-CRM
|
7cca0df5d39b92082781047c1f0a11129179f257
|
[
"MIT"
] | null | null | null |
leads/apps.py
|
coderj001/Django-CRM
|
7cca0df5d39b92082781047c1f0a11129179f257
|
[
"MIT"
] | null | null | null |
leads/apps.py
|
coderj001/Django-CRM
|
7cca0df5d39b92082781047c1f0a11129179f257
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class LeadsConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'leads'
def ready(self):
import leads.signals
| 19.3
| 56
| 0.709845
| 23
| 193
| 5.869565
| 0.826087
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.202073
| 193
| 9
| 57
| 21.444444
| 0.876623
| 0
| 0
| 0
| 0
| 0
| 0.176166
| 0.150259
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.