hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7e6eab88b7182b0e458a8868bb7157683002779
| 37
|
py
|
Python
|
pynotifier/__init__.py
|
Atharva-Dev/pynotifier
|
07ee7f242faaf80763f6a0e5651ae8e607ae75d3
|
[
"MIT"
] | null | null | null |
pynotifier/__init__.py
|
Atharva-Dev/pynotifier
|
07ee7f242faaf80763f6a0e5651ae8e607ae75d3
|
[
"MIT"
] | null | null | null |
pynotifier/__init__.py
|
Atharva-Dev/pynotifier
|
07ee7f242faaf80763f6a0e5651ae8e607ae75d3
|
[
"MIT"
] | null | null | null |
from .pynotifier import Notification
| 18.5
| 36
| 0.864865
| 4
| 37
| 8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108108
| 37
| 1
| 37
| 37
| 0.969697
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
791c831685b5f2b634575445c8d3a1a4ef68c1b8
| 5,701
|
py
|
Python
|
fp_pred_group/preprocessor/preprocess_feat.py
|
ndkgit339/filledpause_prediction_group
|
db511c081f155ec2c23afe82bc44c03c38618590
|
[
"MIT"
] | 1
|
2022-02-25T01:03:40.000Z
|
2022-02-25T01:03:40.000Z
|
fp_pred_group/preprocessor/preprocess_feat.py
|
ndkgit339/filledpause_prediction_group
|
db511c081f155ec2c23afe82bc44c03c38618590
|
[
"MIT"
] | null | null | null |
fp_pred_group/preprocessor/preprocess_feat.py
|
ndkgit339/filledpause_prediction_group
|
db511c081f155ec2c23afe82bc44c03c38618590
|
[
"MIT"
] | null | null | null |
import time
from pathlib import Path
from tqdm import tqdm
import hydra
from omegaconf import DictConfig
# 言語処理
# import fasttext
# import fasttext.util
from transformers import BertTokenizer, BertModel
# データ処理
import numpy as np
import torch
def extract_feats(config):
start = time.time()
# FPs
with open(config.fp_list_path, "r") as f:
fp_list = [l.strip() for l in f]
# Prepare bert
bert_model_dir = Path(config.bert_model_dir)
vocab_file_path = bert_model_dir / "vocab.txt"
bert_tokenizer = BertTokenizer(vocab_file_path, do_lower_case=False, do_basic_tokenize=False)
bert_model = BertModel.from_pretrained(bert_model_dir)
bert_model.eval()
def preprocess_ipu(speaker_id, koen_id, ipu_id, ipu_tagtext, in_dir, out_dir):
# get tokens and fp labels
fp_labels = [0] # fps sometimes appear at the head of the breath group
tokens = ["[CLS]"]
for m in ipu_tagtext.split(" "):
if m.startswith("(F"):
fp = m.split("(F")[1].split(")")[0]
if fp in fp_list:
fp_labels[-1] = fp_list.index(fp) + 1
elif m != "":
tokens.append(m)
fp_labels.append(0)
tokens += ["[SEP]"]
fp_labels.append(0)
# get embedding
token_ids = bert_tokenizer.convert_tokens_to_ids(tokens)
token_tensor = torch.Tensor(token_ids).unsqueeze(0).to(torch.long)
outputs = bert_model(token_tensor)
outputs_numpy = outputs[0].numpy().squeeze(axis=0).copy()
assert outputs_numpy.shape[0] == np.array(fp_labels).shape[0], \
"1st array length {} should be equal to 2nd array length {}".format(
outputs_numpy.shape[0], np.array(fp_labels).shape[0])
np.save(in_dir / f"{speaker_id}-{koen_id}-{ipu_id}-feats.npy", outputs_numpy)
np.save(out_dir / f"{speaker_id}-{koen_id}-{ipu_id}-feats.npy", np.array(fp_labels))
# extraxt features
infeats_dir = Path(config.out_dir) / "infeats"
outfeats_dir = Path(config.out_dir) / "outfeats"
infeats_dir.mkdir(parents=True, exist_ok=True)
outfeats_dir.mkdir(parents=True, exist_ok=True)
with open(Path(config.out_dir) / f"ipu.list", "r") as f:
ipus = [tuple(l.split(":")) for l in f.readlines()]
with torch.no_grad():
for speaker_id, koen_id, ipu_id, ipu in tqdm(ipus):
preprocess_ipu(speaker_id, koen_id, ipu_id, ipu, infeats_dir, outfeats_dir)
# count time
n_ipu = len(ipus)
elapsed_time = time.time() - start
time_log = "elapsed_time of feature extraction: {} [sec]".format(elapsed_time)
time_log_ipu = "elapsed_time of feature extraction (per IPU): \
{} [sec]".format(elapsed_time / n_ipu)
print(time_log + "\n" + time_log_ipu)
with open(Path(config.out_dir) / "time.log", "w") as f:
f.write(time_log + "\n" + time_log_ipu)
def extract_feats_test(data_dir, fp_list_path, bert_model_dir, utt_list_name):
start = time.time()
# FPs
with open(fp_list_path, "r") as f:
fp_list = [l.strip() for l in f]
# Prepare bert
bert_model_dir = Path(bert_model_dir)
vocab_file_path = bert_model_dir / "vocab.txt"
bert_tokenizer = BertTokenizer(
vocab_file_path, do_lower_case=False, do_basic_tokenize=False)
bert_model = BertModel.from_pretrained(bert_model_dir)
bert_model.eval()
def preprocess_utt(utt_id, utt, in_dir, out_dir):
# get tokens and fp labels
fp_labels = [0] # fps sometimes appear at the head of the breath group
tokens = ["[CLS]"]
for m in utt.split(" "):
if m.startswith("(F"):
fp = m.split("(F")[1].split(")")[0]
if fp in fp_list:
fp_labels[-1] = fp_list.index(fp) + 1
elif m != "":
tokens.append(m)
fp_labels.append(0)
tokens += ["[SEP]"]
fp_labels.append(0)
# get embedding
token_ids = bert_tokenizer.convert_tokens_to_ids(tokens)
token_tensor = torch.Tensor(token_ids).unsqueeze(0).to(torch.long)
outputs = bert_model(token_tensor)
outputs_numpy = outputs[0].numpy().squeeze(axis=0).copy()
assert outputs_numpy.shape[0] == np.array(fp_labels).shape[0], \
"1st array length {} should be equal to 2nd array length {}".format(
outputs_numpy.shape[0], np.array(fp_labels).shape[0])
np.save(in_dir / f"{utt_id}-feats.npy", outputs_numpy)
np.save(out_dir / f"{utt_id}-feats.npy", np.array(fp_labels))
# extraxt features
infeats_dir = Path(data_dir) / "infeats"
outfeats_dir = Path(data_dir) / "outfeats"
infeats_dir.mkdir(parents=True, exist_ok=True)
outfeats_dir.mkdir(parents=True, exist_ok=True)
with open(Path(data_dir) / "{}.list".format(utt_list_name), "r") as f:
utts = [tuple(l.split(":")) for l in f.readlines()]
with torch.no_grad():
for utt_id, utt in tqdm(utts):
preprocess_utt(utt_id, utt, infeats_dir, outfeats_dir)
# count time
n_utt = len(utts)
elapsed_time = time.time() - start
time_log ="elapsed_time of feature extraction: {} [sec]".format(elapsed_time)
time_log_utt ="elapsed_time of feature extraction (per utt): \
{} [sec]".format(elapsed_time / n_utt)
print(time_log + "\n" + time_log_utt)
with open(Path(data_dir) / "time.log", "w") as f:
f.write(time_log + "\n" + time_log_utt)
@hydra.main(config_path="conf/preprocess", config_name="config")
def main(config: DictConfig):
extract_feats(config)
if __name__=="__main__":
main()
| 38.52027
| 97
| 0.630591
| 825
| 5,701
| 4.109091
| 0.168485
| 0.037758
| 0.031858
| 0.026549
| 0.842183
| 0.79351
| 0.733038
| 0.707965
| 0.707965
| 0.689676
| 0
| 0.007387
| 0.240133
| 5,701
| 148
| 98
| 38.52027
| 0.775162
| 0.056306
| 0
| 0.509091
| 0
| 0
| 0.089146
| 0.015293
| 0
| 0
| 0
| 0
| 0.018182
| 1
| 0.045455
| false
| 0
| 0.072727
| 0
| 0.118182
| 0.018182
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f76ad4a0c7f1f7e8edb0022ebebcf14e5f8a2da2
| 27
|
py
|
Python
|
gsflow/modsim/__init__.py
|
pygsflow/pygsflow
|
83860cd58078017a65e1633b1192469777f1ce15
|
[
"CC0-1.0",
"BSD-3-Clause"
] | 17
|
2019-11-11T02:49:29.000Z
|
2022-02-17T03:45:19.000Z
|
gsflow/modsim/__init__.py
|
jonathanqv/pygsflow
|
d671fdd84245ecb421a0fcab17a578425b514e93
|
[
"Unlicense"
] | 21
|
2019-07-10T21:45:11.000Z
|
2022-02-22T17:57:20.000Z
|
gsflow/modsim/__init__.py
|
jonathanqv/pygsflow
|
d671fdd84245ecb421a0fcab17a578425b514e93
|
[
"Unlicense"
] | 8
|
2019-11-11T02:49:36.000Z
|
2021-09-30T18:43:45.000Z
|
from .modsim import Modsim
| 13.5
| 26
| 0.814815
| 4
| 27
| 5.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 27
| 1
| 27
| 27
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e3a01b0fab9789c74fb9854ab87d1b3a4b62d685
| 74
|
py
|
Python
|
open_horadric_lib/client/middleware.py
|
ERRORthenBSOD/open_horadric_lib
|
7e719057cd5a49382a5cb62dfe67ababb7428552
|
[
"MIT"
] | null | null | null |
open_horadric_lib/client/middleware.py
|
ERRORthenBSOD/open_horadric_lib
|
7e719057cd5a49382a5cb62dfe67ababb7428552
|
[
"MIT"
] | null | null | null |
open_horadric_lib/client/middleware.py
|
ERRORthenBSOD/open_horadric_lib
|
7e719057cd5a49382a5cb62dfe67ababb7428552
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
class BaseClientMiddleware:
pass
| 12.333333
| 34
| 0.810811
| 7
| 74
| 8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.175676
| 74
| 5
| 35
| 14.8
| 0.918033
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
e3db66635d4e51b8c5e07a54bc87fc78bd035d92
| 160
|
py
|
Python
|
src/service/domain/builder.py
|
jdiazromeral/django-ddd-quizs-demo
|
3cbb0ece8cc2440fd344069849c132c4b9502f5b
|
[
"MIT"
] | null | null | null |
src/service/domain/builder.py
|
jdiazromeral/django-ddd-quizs-demo
|
3cbb0ece8cc2440fd344069849c132c4b9502f5b
|
[
"MIT"
] | null | null | null |
src/service/domain/builder.py
|
jdiazromeral/django-ddd-quizs-demo
|
3cbb0ece8cc2440fd344069849c132c4b9502f5b
|
[
"MIT"
] | null | null | null |
from abc import ABC, abstractmethod
from typing import Any, Dict
class Builder(ABC):
@abstractmethod
def build(self, json: Dict) -> Any:
pass
| 17.777778
| 39
| 0.68125
| 21
| 160
| 5.190476
| 0.666667
| 0.311927
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2375
| 160
| 8
| 40
| 20
| 0.893443
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0.166667
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
5420e3a86217a1c886b7829abd138e70e269cd37
| 99
|
py
|
Python
|
models/__init__.py
|
ZFancy/IAD
|
a8091a8c7552cef43d8f3f28085426cb786ce9d3
|
[
"MIT"
] | 6
|
2022-02-11T07:37:48.000Z
|
2022-03-08T09:01:34.000Z
|
models/__init__.py
|
ZFancy/IAD
|
a8091a8c7552cef43d8f3f28085426cb786ce9d3
|
[
"MIT"
] | null | null | null |
models/__init__.py
|
ZFancy/IAD
|
a8091a8c7552cef43d8f3f28085426cb786ce9d3
|
[
"MIT"
] | null | null | null |
from .mobilenetv2 import *
from .wideresnet import *
from .resnet import *
from .preresnet import *
| 24.75
| 26
| 0.767677
| 12
| 99
| 6.333333
| 0.5
| 0.394737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011905
| 0.151515
| 99
| 4
| 27
| 24.75
| 0.892857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
543d3d75da1b3eee80a69f911f00d7139dc7beca
| 61
|
py
|
Python
|
acq4/modules/Manager/__init__.py
|
aleonlein/acq4
|
4b1fcb9ad2c5e8d4595a2b9cf99d50ece0c0f555
|
[
"MIT"
] | 47
|
2015-01-05T16:18:10.000Z
|
2022-03-16T13:09:30.000Z
|
acq4/modules/Manager/__init__.py
|
aleonlein/acq4
|
4b1fcb9ad2c5e8d4595a2b9cf99d50ece0c0f555
|
[
"MIT"
] | 48
|
2015-04-19T16:51:41.000Z
|
2022-03-31T14:48:16.000Z
|
acq4/modules/Manager/__init__.py
|
sensapex/acq4
|
9561ba73caff42c609bd02270527858433862ad8
|
[
"MIT"
] | 32
|
2015-01-15T14:11:49.000Z
|
2021-07-15T13:44:52.000Z
|
from __future__ import print_function
from .Manager import *
| 20.333333
| 37
| 0.836066
| 8
| 61
| 5.75
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131148
| 61
| 2
| 38
| 30.5
| 0.867925
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
545a6f63d456bb39dba39b05f55ec7c3640465e5
| 25,838
|
py
|
Python
|
jinstall/centos7/tools/SyNginx.py
|
a07061625/swooleyaf_install
|
c6ed2b1db6c1dba71558e9e19a2a9678fe536bfa
|
[
"BSD-2-Clause"
] | 5
|
2019-01-16T02:40:39.000Z
|
2021-05-10T08:59:37.000Z
|
jinstall/centos7/tools/SyNginx.py
|
a07061625/swooleyaf_install
|
c6ed2b1db6c1dba71558e9e19a2a9678fe536bfa
|
[
"BSD-2-Clause"
] | null | null | null |
jinstall/centos7/tools/SyNginx.py
|
a07061625/swooleyaf_install
|
c6ed2b1db6c1dba71558e9e19a2a9678fe536bfa
|
[
"BSD-2-Clause"
] | 2
|
2018-09-20T14:28:05.000Z
|
2022-03-31T07:54:59.000Z
|
# -*- coding:utf-8 -*-
from jinstall.centos7.utils.Tool import *
class SyNginx:
@staticmethod
def install_openresty(params: dict):
"""安装openresty"""
Tool.check_local_files([
'resources/linux/zlib-1.2.11.tar.gz',
'resources/nginx/libunwind-1.1.tar.gz',
'resources/nginx/gperftools-2.1.tar.gz',
'resources/nginx/lualib.zip',
'resources/nginx/data/geoip2.tar.gz',
'resources/nginx/modules/module_brotli.tar.gz',
'resources/nginx/modules/module_cache_purge_2.3.tar.gz',
'resources/nginx/modules/module_http_flv.zip',
'resources/nginx/modules/module_iconv.zip',
'resources/nginx/modules/module_lua_kong_0.0.6.tar.gz',
'resources/nginx/modules/module_vts_0.1.18.tar.gz',
'resources/nginx/modules/module_naxsi_1.1a.zip',
'resources/nginx/modules/module_ct_1.3.2.zip',
'resources/nginx/modules/module_pagespeed_1.13.35.2.zip',
'resources/nginx/modules/module_geoip2_3.3.tar.gz',
'resources/nginx/modules/module_substitutions_filter_0.6.4.tar.gz',
'resources/nginx/openresty-1.15.8.3.tar.gz',
'configs/swooleyaf/nginx/context_http/conf.server',
'configs/swooleyaf/nginx/context_http/default.conf',
'configs/swooleyaf/nginx/context_http/vts.conf',
'configs/swooleyaf/nginx/context_http/api.conf_demo',
'configs/swooleyaf/nginx/context_http/api.server',
'configs/swooleyaf/nginx/context_http/api.upstream',
'configs/swooleyaf/nginx/context_http/api_static.conf_demo',
'configs/swooleyaf/nginx/context_http/front.conf_demo',
'configs/swooleyaf/nginx/context_http/front.server',
'configs/swooleyaf/nginx/context_http/front.upstream',
'configs/swooleyaf/nginx/context_http/front_static.conf_demo',
'configs/swooleyaf/nginx/context_http/rtmp.conf_demo',
'configs/swooleyaf/nginx/context_http/rtmp_api.conf_demo',
'configs/swooleyaf/nginx/context_http/naxsi_core.rules',
'configs/swooleyaf/nginx/context_http/rtmp_stat.conf',
'configs/swooleyaf/nginx/context_http/locations/domain_common.location',
'configs/swooleyaf/nginx/context_http/locations/domain_outer_api.location',
'configs/swooleyaf/nginx/context_http/locations/domain_outer_common.location',
'configs/swooleyaf/nginx/context_http/locations/domain_outer_front.location',
'configs/swooleyaf/nginx/context_http/locations/domain_static_base.location',
'configs/swooleyaf/nginx/context_http/locations/domain_static_common.location',
'configs/swooleyaf/nginx/context_http/locations/https_cert.location',
'configs/swooleyaf/nginx/context_http/locations/mirror_monitor.location',
'configs/swooleyaf/nginx/context_http/locations/naxsi_config.location',
'configs/swooleyaf/nginx/context_http/locations/naxsi_forbidden.location',
'configs/swooleyaf/nginx/context_http/locations/pagespeed_admin.location',
'configs/swooleyaf/nginx/context_http/locations/pagespeed_admin_global.location',
'configs/swooleyaf/nginx/context_http/locations/pagespeed_common.location',
'configs/swooleyaf/nginx/context_http/locations/pagespeed_server.location',
'configs/swooleyaf/nginx/context_http/locations/proxy_api_common.location',
'configs/swooleyaf/nginx/context_http/locations/proxy_api_http.location',
'configs/swooleyaf/nginx/context_http/locations/proxy_api_websocket.location',
'configs/swooleyaf/nginx/context_http/locations/proxy_common.location',
'configs/swooleyaf/nginx/context_http/locations/proxy_static.location',
'configs/swooleyaf/nginx/context_http/locations/server_register.location',
'configs/swooleyaf/nginx/context_stream/conf.server',
'configs/swooleyaf/nginx/context_stream/proxy_rpc.server',
'configs/swooleyaf/nginx/context_stream/a01_order.conf_demo',
'configs/swooleyaf/nginx/context_stream/a01_services.conf_demo',
'configs/swooleyaf/nginx/context_stream/a01_user.conf_demo',
'configs/swooleyaf/nginx/context_stream/a01_api.conf_demo',
'configs/swooleyaf/nginx/context_stream/a01_content.conf_demo',
'configs/swooleyaf/nginx/context_rtmp/tv.conf_demo',
'configs/swooleyaf/nginx/certs/dhparam.pem',
'configs/swooleyaf/nginx/certs/fake.crt',
'configs/swooleyaf/nginx/certs/fake.key',
'configs/swooleyaf/nginx/certs/tls_session_ticket.key',
'configs/swooleyaf/nginx/passwd/pagespeed',
'configs/swooleyaf/nginx/nginx.conf',
'configs/swooleyaf/nginx/nginx.service',
])
run('mkdir %s && mkdir %s/pagespeed' % (install_configs['openresty.path.log'], install_configs['openresty.path.log']))
run('mkdir %s && mkdir %s/certs && mkdir %s/scts && mkdir %s/modules && mkdir %s/passwd' % (install_configs['openresty.path.configs'], install_configs['openresty.path.configs'], install_configs['openresty.path.configs'], install_configs['openresty.path.configs'], install_configs['openresty.path.configs']))
run('mkdir %s/context_http && mkdir %s/context_http/locations && mkdir %s/context_rtmp && mkdir %s/context_stream' % (install_configs['openresty.path.configs'], install_configs['openresty.path.configs'], install_configs['openresty.path.configs'], install_configs['openresty.path.configs']))
run('mkdir %s/cache && mkdir %s/cache/pagespeed' % (install_configs['openresty.path.configs'], install_configs['openresty.path.configs']))
run('mkdir %s/temp && mkdir %s/temp/pagespeed' % (install_configs['openresty.path.configs'], install_configs['openresty.path.configs']))
run('mkdir %s/data && mkdir %s/data/geoip2' % (install_configs['openresty.path.configs'], install_configs['openresty.path.configs']))
run('yum -y install gd-devel')
Tool.upload_file_fabric({
'/resources/nginx/libunwind-1.1.tar.gz': 'remote/libunwind-1.1.tar.gz',
})
with cd(install_configs['path.package.remote']):
run('tar -zxf libunwind-1.1.tar.gz')
run('cd libunwind-1.1/ && CFLAGS=-fPIC ./configure --prefix=/usr && make CFLAGS=-fPIC && make CFLAGS=-fPIC install')
run('rm -rf libunwind-1.1/ && rm -rf libunwind-1.1.tar.gz')
Tool.upload_file_fabric({
'/resources/nginx/gperftools-2.1.tar.gz': 'remote/gperftools-2.1.tar.gz',
})
with cd(install_configs['path.package.remote']):
run('tar -zxf gperftools-2.1.tar.gz')
run('cd gperftools-2.1/ && ./configure --prefix=/usr --enable-frame-pointers && make && make install && ldconfig')
run('rm -rf gperftools-2.1/ && rm -rf gperftools-2.1.tar.gz')
run('mkdir /tmp/tcmalloc && chmod 0777 /tmp/tcmalloc')
Tool.upload_file_fabric({
'/resources/nginx/lualib.zip': 'remote/lualib.zip',
})
with cd(install_configs['path.package.remote']):
run('unzip -q lualib.zip')
run('mv lualib/ %s/lualib' % install_configs['openresty.path.configs'])
run('rm -rf lualib.zip')
Tool.upload_file_fabric({
'/resources/nginx/modules/module_cache_purge_2.3.tar.gz': 'remote/module_cache_purge_2.3.tar.gz',
})
with cd(install_configs['path.package.remote']):
run('tar -zxf module_cache_purge_2.3.tar.gz')
run('mv module_cache_purge_2.3/ %s/modules/cache_purge' % install_configs['openresty.path.configs'])
run('rm -rf module_cache_purge_2.3.tar.gz')
Tool.upload_file_fabric({
'/resources/nginx/modules/module_brotli.tar.gz': 'remote/module_brotli.tar.gz',
})
with cd(install_configs['path.package.remote']):
run('tar -zxf module_brotli.tar.gz')
run('mv module_brotli/ %s/modules/brotli' % install_configs['openresty.path.configs'])
run('rm -rf module_brotli.tar.gz')
Tool.upload_file_fabric({
'/resources/nginx/modules/module_http_flv.zip': 'remote/module_http_flv.zip',
})
with cd(install_configs['path.package.remote']):
run('unzip -q module_http_flv.zip')
run('mv module_http_flv/ %s/modules/http_flv' % install_configs['openresty.path.configs'])
run('rm -rf module_http_flv.zip')
Tool.upload_file_fabric({
'/resources/nginx/modules/module_iconv.zip': 'remote/module_iconv.zip',
})
with cd(install_configs['path.package.remote']):
run('unzip -q module_iconv.zip')
run('mv module_iconv/ %s/modules/iconv' % install_configs['openresty.path.configs'])
run('rm -rf module_iconv.zip')
Tool.upload_file_fabric({
'/resources/nginx/modules/module_lua_kong_0.0.6.tar.gz': 'remote/module_lua_kong_0.0.6.tar.gz',
})
with cd(install_configs['path.package.remote']):
run('tar -zxf module_lua_kong_0.0.6.tar.gz')
run('mv module_lua_kong_0.0.6/ %s/modules/lua_kong' % install_configs['openresty.path.configs'])
run('rm -rf module_lua_kong_0.0.6.tar.gz')
Tool.upload_file_fabric({
'/resources/nginx/modules/module_vts_0.1.18.tar.gz': 'remote/module_vts_0.1.18.tar.gz',
})
with cd(install_configs['path.package.remote']):
run('tar -zxf module_vts_0.1.18.tar.gz')
run('mv module_vts_0.1.18/ %s/modules/vts' % install_configs['openresty.path.configs'])
run('rm -rf module_vts_0.1.18.tar.gz')
Tool.upload_file_fabric({
'/resources/nginx/modules/module_naxsi_1.1a.zip': 'remote/module_naxsi_1.1a.zip',
})
with cd(install_configs['path.package.remote']):
run('unzip -q module_naxsi_1.1a.zip')
run('mv module_naxsi_1.1a/ %s/modules/naxsi' % install_configs['openresty.path.configs'])
run('rm -rf module_naxsi_1.1a.zip')
Tool.upload_file_fabric({
'/resources/nginx/modules/module_ct_1.3.2.zip': 'remote/module_ct_1.3.2.zip',
})
with cd(install_configs['path.package.remote']):
run('unzip -q module_ct_1.3.2.zip')
run('mv module_ct_1.3.2/ %s/modules/ct' % install_configs['openresty.path.configs'])
run('rm -rf module_ct_1.3.2.zip')
Tool.upload_file_fabric({
'/resources/nginx/modules/module_pagespeed_1.13.35.2.zip': 'remote/module_pagespeed_1.13.35.2.zip',
})
with cd(install_configs['path.package.remote']):
run('unzip -q module_pagespeed_1.13.35.2.zip')
run('mv module_pagespeed_1.13.35.2/ %s/modules/pagespeed' % install_configs['openresty.path.configs'])
run('chmod a+x %s/modules/pagespeed/scripts/*.sh' % install_configs['openresty.path.configs'])
run('rm -rf module_pagespeed_1.13.35.2.zip')
Tool.upload_file_fabric({
'/resources/nginx/data/geoip2.tar.gz': 'remote/geoip2.tar.gz',
'/resources/nginx/modules/module_geoip2_3.3.tar.gz': 'remote/module_geoip2_3.3.tar.gz',
})
with cd(install_configs['path.package.remote']):
run('tar -zxf geoip2.tar.gz')
run('mv city.mmdb %s/data/geoip2/ && mv country.mmdb %s/data/geoip2/' % (install_configs['openresty.path.configs'], install_configs['openresty.path.configs']))
run('tar -zxf module_geoip2_3.3.tar.gz')
run('mv module_geoip2_3.3/ %s/modules/geoip2' % install_configs['openresty.path.configs'])
run('rm -rf geoip2.tar.gz && rm -rf module_geoip2_3.3.tar.gz')
Tool.upload_file_fabric({
'/resources/nginx/modules/module_substitutions_filter_0.6.4.tar.gz': 'remote/module_substitutions_filter_0.6.4.tar.gz',
})
with cd(install_configs['path.package.remote']):
run('tar -zxf module_substitutions_filter_0.6.4.tar.gz')
run('mv module_substitutions_filter_0.6.4/ %s/modules/substitutions_filter' % install_configs['openresty.path.configs'])
run('rm -rf module_substitutions_filter_0.6.4.tar.gz')
Tool.upload_file_fabric({
'/resources/linux/zlib-1.2.11.tar.gz': 'remote/zlib-1.2.11.tar.gz',
'/resources/nginx/openresty-1.15.8.3.tar.gz': 'remote/openresty-1.15.8.3.tar.gz',
})
openresty_dir = '/usr/local/openresty'
with cd(install_configs['path.package.remote']):
zlib_dir_remote = ''.join([install_configs['path.package.remote'], '/zlib-1.2.11'])
pcre_include = '/usr/local/pcre/include'
pcre_lib = '/usr/local/pcre/lib'
openssl_include = '/usr/local/openssl/include'
openssl_lib = '/usr/local/openssl/lib'
run('mkdir %s' % openresty_dir)
run('tar -zxf openresty-1.15.8.3.tar.gz')
run('tar -zxf zlib-1.2.11.tar.gz')
ngx_conf_start = './configure --prefix=%s' % openresty_dir
ngx_conf_custom1 = '--with-cc-opt="-I%s -I%s" --with-ld-opt="-L%s -L%s -Wl,-rpath,%s:%s"' % (pcre_include, openssl_include, pcre_lib, openssl_lib, pcre_lib, openssl_lib)
ngx_conf_custom2 = '--with-zlib=%s --with-openssl-opt="enable-tls1_3 enable-weak-ssl-ciphers" --with-luajit --with-luajit-xcflags="-DLUAJIT_NUMMODE=2" --with-pcre-jit' % zlib_dir_remote
ngx_conf_custom3 = '--with-debug --with-threads --with-file-aio --with-google_perftools_module'
ngx_conf_without = '--without-http_autoindex_module --without-http_ssi_module'
ngx_conf_http1 = '--with-http_ssl_module --with-http_realip_module --with-http_stub_status_module --with-http_sub_module'
ngx_conf_http2 = '--with-http_v2_module --with-http_gzip_static_module --with-http_image_filter_module --with-http_addition_module'
ngx_conf_stream = '--with-stream --with-stream_realip_module --with-stream_ssl_module --with-stream_ssl_preread_module'
ngx_conf_modules1 = '--add-module=%s/modules/cache_purge --add-module=%s/modules/lua_kong' % (install_configs['openresty.path.configs'], install_configs['openresty.path.configs'])
ngx_conf_modules2 = '--add-module=%s/modules/vts --add-module=%s/modules/http_flv' % (install_configs['openresty.path.configs'], install_configs['openresty.path.configs'])
ngx_conf_modules3 = '--add-module=%s/modules/brotli --add-module=%s/modules/iconv' % (install_configs['openresty.path.configs'], install_configs['openresty.path.configs'])
ngx_conf_modules4 = '--add-module=%s/modules/naxsi/naxsi_src --add-module=%s/modules/ct' % (install_configs['openresty.path.configs'], install_configs['openresty.path.configs'])
ngx_conf_modules5 = '--add-module=%s/modules/pagespeed --add-module=%s/modules/geoip2' % (install_configs['openresty.path.configs'], install_configs['openresty.path.configs'])
ngx_conf_modules6 = '--add-module=%s/modules/substitutions_filter' % (install_configs['openresty.path.configs'])
ngx_conf = ' '.join([
ngx_conf_start,
ngx_conf_custom1,
ngx_conf_custom2,
ngx_conf_custom3,
ngx_conf_without,
ngx_conf_http1,
ngx_conf_http2,
ngx_conf_stream,
ngx_conf_modules1,
ngx_conf_modules2,
ngx_conf_modules3,
ngx_conf_modules4,
ngx_conf_modules5,
ngx_conf_modules6,
])
# 中间会弹出关于PSOL的选择,选Y即可
run('cd openresty-1.15.8.3/ && %s && gmake && gmake install' % ngx_conf)
run('rm -rf openresty-1.15.8.3/ && rm -rf openresty-1.15.8.3.tar.gz')
run('rm -rf zlib-1.2.11/ && rm -rf zlib-1.2.11.tar.gz')
conf_remote_nginx = ''.join([openresty_dir, '/nginx/conf/nginx.conf'])
run('rm -rf %s' % conf_remote_nginx)
service_remote_nginx = '/lib/systemd/system/nginx.service'
Tool.upload_file_fabric({
'/configs/swooleyaf/nginx/context_http/conf.server': ''.join([install_configs['openresty.path.configs'], '/context_http/conf.server']),
'/configs/swooleyaf/nginx/context_http/default.conf': ''.join([install_configs['openresty.path.configs'], '/context_http/default.conf']),
'/configs/swooleyaf/nginx/context_http/vts.conf': ''.join([install_configs['openresty.path.configs'], '/context_http/vts.conf']),
'/configs/swooleyaf/nginx/context_http/api.conf_demo': ''.join([install_configs['openresty.path.configs'], '/context_http/api.conf_demo']),
'/configs/swooleyaf/nginx/context_http/api.server': ''.join([install_configs['openresty.path.configs'], '/context_http/api.server']),
'/configs/swooleyaf/nginx/context_http/api.upstream': ''.join([install_configs['openresty.path.configs'], '/context_http/api.upstream']),
'/configs/swooleyaf/nginx/context_http/api_static.conf_demo': ''.join([install_configs['openresty.path.configs'], '/context_http/api_static.conf_demo']),
'/configs/swooleyaf/nginx/context_http/front.conf_demo': ''.join([install_configs['openresty.path.configs'], '/context_http/front.conf_demo']),
'/configs/swooleyaf/nginx/context_http/front.server': ''.join([install_configs['openresty.path.configs'], '/context_http/front.server']),
'/configs/swooleyaf/nginx/context_http/front.upstream': ''.join([install_configs['openresty.path.configs'], '/context_http/front.upstream']),
'/configs/swooleyaf/nginx/context_http/front_static.conf_demo': ''.join([install_configs['openresty.path.configs'], '/context_http/front_static.conf_demo']),
'/configs/swooleyaf/nginx/context_http/rtmp.conf_demo': ''.join([install_configs['openresty.path.configs'], '/context_http/rtmp.conf_demo']),
'/configs/swooleyaf/nginx/context_http/rtmp_api.conf_demo': ''.join([install_configs['openresty.path.configs'], '/context_http/rtmp_api.conf_demo']),
'/configs/swooleyaf/nginx/context_http/rtmp_stat.conf': ''.join([install_configs['openresty.path.configs'], '/context_http/rtmp_stat.conf']),
'/configs/swooleyaf/nginx/context_http/naxsi_core.rules': ''.join([install_configs['openresty.path.configs'], '/context_http/naxsi_core.rules']),
'/configs/swooleyaf/nginx/context_http/locations/domain_common.location': ''.join([install_configs['openresty.path.configs'], '/context_http/locations/domain_common.location']),
'/configs/swooleyaf/nginx/context_http/locations/domain_outer_api.location': ''.join([install_configs['openresty.path.configs'], '/context_http/locations/domain_outer_api.location']),
'/configs/swooleyaf/nginx/context_http/locations/domain_outer_common.location': ''.join([install_configs['openresty.path.configs'], '/context_http/locations/domain_outer_common.location']),
'/configs/swooleyaf/nginx/context_http/locations/domain_outer_front.location': ''.join([install_configs['openresty.path.configs'], '/context_http/locations/domain_outer_front.location']),
'/configs/swooleyaf/nginx/context_http/locations/domain_static_base.location': ''.join([install_configs['openresty.path.configs'], '/context_http/locations/domain_static_base.location']),
'/configs/swooleyaf/nginx/context_http/locations/domain_static_common.location': ''.join([install_configs['openresty.path.configs'], '/context_http/locations/domain_static_common.location']),
'/configs/swooleyaf/nginx/context_http/locations/https_cert.location': ''.join([install_configs['openresty.path.configs'], '/context_http/locations/https_cert.location']),
'/configs/swooleyaf/nginx/context_http/locations/mirror_monitor.location': ''.join([install_configs['openresty.path.configs'], '/context_http/locations/mirror_monitor.location']),
'/configs/swooleyaf/nginx/context_http/locations/naxsi_config.location': ''.join([install_configs['openresty.path.configs'], '/context_http/locations/naxsi_config.location']),
'/configs/swooleyaf/nginx/context_http/locations/naxsi_forbidden.location': ''.join([install_configs['openresty.path.configs'], '/context_http/locations/naxsi_forbidden.location']),
'/configs/swooleyaf/nginx/context_http/locations/pagespeed_admin.location': ''.join([install_configs['openresty.path.configs'], '/context_http/locations/pagespeed_admin.location']),
'/configs/swooleyaf/nginx/context_http/locations/pagespeed_admin_global.location': ''.join([install_configs['openresty.path.configs'], '/context_http/locations/pagespeed_admin_global.location']),
'/configs/swooleyaf/nginx/context_http/locations/pagespeed_common.location': ''.join([install_configs['openresty.path.configs'], '/context_http/locations/pagespeed_common.location']),
'/configs/swooleyaf/nginx/context_http/locations/pagespeed_server.location': ''.join([install_configs['openresty.path.configs'], '/context_http/locations/pagespeed_server.location']),
'/configs/swooleyaf/nginx/context_http/locations/proxy_api_common.location': ''.join([install_configs['openresty.path.configs'], '/context_http/locations/proxy_api_common.location']),
'/configs/swooleyaf/nginx/context_http/locations/proxy_api_http.location': ''.join([install_configs['openresty.path.configs'], '/context_http/locations/proxy_api_http.location']),
'/configs/swooleyaf/nginx/context_http/locations/proxy_api_websocket.location': ''.join([install_configs['openresty.path.configs'], '/context_http/locations/proxy_api_websocket.location']),
'/configs/swooleyaf/nginx/context_http/locations/proxy_common.location': ''.join([install_configs['openresty.path.configs'], '/context_http/locations/proxy_common.location']),
'/configs/swooleyaf/nginx/context_http/locations/proxy_static.location': ''.join([install_configs['openresty.path.configs'], '/context_http/locations/proxy_static.location']),
'/configs/swooleyaf/nginx/context_http/locations/server_register.location': ''.join([install_configs['openresty.path.configs'], '/context_http/locations/server_register.location']),
'/configs/swooleyaf/nginx/context_stream/conf.server': ''.join([install_configs['openresty.path.configs'], '/context_stream/conf.server']),
'/configs/swooleyaf/nginx/context_stream/proxy_rpc.server': ''.join([install_configs['openresty.path.configs'], '/context_stream/proxy_rpc.server']),
'/configs/swooleyaf/nginx/context_stream/a01_order.conf_demo': ''.join([install_configs['openresty.path.configs'], '/context_stream/a01_order.conf_demo']),
'/configs/swooleyaf/nginx/context_stream/a01_services.conf_demo': ''.join([install_configs['openresty.path.configs'], '/context_stream/a01_services.conf_demo']),
'/configs/swooleyaf/nginx/context_stream/a01_user.conf_demo': ''.join([install_configs['openresty.path.configs'], '/context_stream/a01_user.conf_demo']),
'/configs/swooleyaf/nginx/context_stream/a01_api.conf_demo': ''.join([install_configs['openresty.path.configs'], '/context_stream/a01_api.conf_demo']),
'/configs/swooleyaf/nginx/context_stream/a01_content.conf_demo': ''.join([install_configs['openresty.path.configs'], '/context_stream/a01_content.conf_demo']),
'/configs/swooleyaf/nginx/context_rtmp/tv.conf_demo': ''.join([install_configs['openresty.path.configs'], '/context_rtmp/tv.conf_demo']),
'/configs/swooleyaf/nginx/certs/dhparam.pem': ''.join([install_configs['openresty.path.configs'], '/certs/dhparam.pem']),
'/configs/swooleyaf/nginx/certs/fake.crt': ''.join([install_configs['openresty.path.configs'], '/certs/fake.crt']),
'/configs/swooleyaf/nginx/certs/fake.key': ''.join([install_configs['openresty.path.configs'], '/certs/fake.key']),
'/configs/swooleyaf/nginx/certs/tls_session_ticket.key': ''.join([install_configs['openresty.path.configs'], '/certs/tls_session_ticket.key']),
'/configs/swooleyaf/nginx/passwd/pagespeed': ''.join([install_configs['openresty.path.configs'], '/passwd/pagespeed']),
'/configs/swooleyaf/nginx/nginx.conf': conf_remote_nginx,
'/configs/swooleyaf/nginx/nginx.service': service_remote_nginx,
})
run('chmod 754 %s' % service_remote_nginx)
run('systemctl enable nginx')
@staticmethod
def install_kong(params: dict):
"""安装kong,建议通过rpm包安装,未解决源码安装启动报错的问题"""
Tool.check_local_files([
'configs/swooleyaf/nginx/kong/kong',
'configs/swooleyaf/nginx/kong/kong.conf',
'configs/swooleyaf/nginx/kong/tls.lua',
])
with cd(install_configs['path.package.remote']):
run('mkdir /home/logs/kong && mkdir /usr/local/kong && mkdir /usr/local/kong/bin && mkdir /etc/kong && mkdir %s/share/lua/5.1/resty/kong' % install_configs['luarocks.path.install'])
# tls.lua文件在lua-kong-module扩展的lualib目录下有
kong_bin = '/usr/local/kong/bin/kong'
Tool.upload_file_fabric({
'/configs/swooleyaf/nginx/kong/kong': kong_bin,
'/configs/swooleyaf/nginx/kong/kong.conf': '/etc/kong/kong.conf',
'/configs/swooleyaf/nginx/kong/tls.lua': ''.join([install_configs['luarocks.path.install'], '/share/lua/5.1/resty/kong/tls.lua']),
})
run('chmod a+x %s' % kong_bin)
| 77.825301
| 315
| 0.677878
| 3,232
| 25,838
| 5.197401
| 0.071473
| 0.091678
| 0.132516
| 0.146267
| 0.838671
| 0.804203
| 0.769615
| 0.729551
| 0.685855
| 0.591201
| 0
| 0.015134
| 0.166305
| 25,838
| 331
| 316
| 78.060423
| 0.764681
| 0.004838
| 0
| 0.188119
| 0
| 0.042904
| 0.627121
| 0.526304
| 0
| 0
| 0
| 0
| 0
| 1
| 0.006601
| false
| 0.009901
| 0.0033
| 0
| 0.013201
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5463f84e3b94532a28f0d0cb7d1752b51ce7281b
| 37
|
py
|
Python
|
pydp/distributions/__init__.py
|
BrendanSchell/PyDP
|
e56548c1b024bce9ed3ffd09407d177d1029c238
|
[
"Apache-2.0"
] | null | null | null |
pydp/distributions/__init__.py
|
BrendanSchell/PyDP
|
e56548c1b024bce9ed3ffd09407d177d1029c238
|
[
"Apache-2.0"
] | null | null | null |
pydp/distributions/__init__.py
|
BrendanSchell/PyDP
|
e56548c1b024bce9ed3ffd09407d177d1029c238
|
[
"Apache-2.0"
] | null | null | null |
from .._pydp._distributions import *
| 18.5
| 36
| 0.783784
| 4
| 37
| 6.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108108
| 37
| 1
| 37
| 37
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
547e1a6f5fc0134f887b0268514a831331ee4c5d
| 27
|
py
|
Python
|
indra_network_search/pathfinding/__init__.py
|
kkaris/indra_network_service
|
4209f1c3ea010fd543bb2fc73905e9146e9a78fe
|
[
"BSD-2-Clause"
] | null | null | null |
indra_network_search/pathfinding/__init__.py
|
kkaris/indra_network_service
|
4209f1c3ea010fd543bb2fc73905e9146e9a78fe
|
[
"BSD-2-Clause"
] | 13
|
2021-08-17T13:43:54.000Z
|
2022-03-06T02:05:26.000Z
|
indra_network_search/pathfinding/__init__.py
|
kkaris/indra_network_service
|
4209f1c3ea010fd543bb2fc73905e9146e9a78fe
|
[
"BSD-2-Clause"
] | null | null | null |
from .pathfinding import *
| 13.5
| 26
| 0.777778
| 3
| 27
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 27
| 1
| 27
| 27
| 0.913043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5496911a48bffdf4d6238d7be1dea0e93d24b6e8
| 22
|
py
|
Python
|
utils/misc/__init__.py
|
RSk141/exchange_bot
|
e84ca22c9421e984acd0f88df544ad4c7b025edc
|
[
"MIT"
] | 21
|
2018-08-10T16:45:21.000Z
|
2022-01-25T13:04:07.000Z
|
utils/misc/__init__.py
|
RSk141/exchange_bot
|
e84ca22c9421e984acd0f88df544ad4c7b025edc
|
[
"MIT"
] | 6
|
2018-07-18T15:34:32.000Z
|
2021-02-02T21:59:04.000Z
|
staticpy/common/__init__.py
|
SnowWalkerJ/StaticPy
|
818b7f009af7a6040313791993f543779781dddf
|
[
"BSD-3-Clause"
] | 10
|
2018-10-24T22:14:10.000Z
|
2022-02-08T17:21:47.000Z
|
from . import logging
| 11
| 21
| 0.772727
| 3
| 22
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 22
| 1
| 22
| 22
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
49bc71e26ab1dc1d8e97cd570b678137e87879de
| 34
|
py
|
Python
|
venv/Lib/site-packages/Database/es/__init__.py
|
jhonniel/Queuing-python
|
1b117dc7e4b3274b2f8fe72cce4beea363f563ef
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/Database/es/__init__.py
|
jhonniel/Queuing-python
|
1b117dc7e4b3274b2f8fe72cce4beea363f563ef
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/Database/es/__init__.py
|
jhonniel/Queuing-python
|
1b117dc7e4b3274b2f8fe72cce4beea363f563ef
|
[
"MIT"
] | null | null | null |
from ElasticSearchClient import *
| 17
| 33
| 0.852941
| 3
| 34
| 9.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 34
| 1
| 34
| 34
| 0.966667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
49d7b000f2a9dae3d153473d4d98b45ba1bf55be
| 97
|
py
|
Python
|
project-euler/0015_lattice_paths.py
|
alenic/comprosol
|
101d43ea7fef5e1847842420ab08e481c82bc526
|
[
"MIT"
] | null | null | null |
project-euler/0015_lattice_paths.py
|
alenic/comprosol
|
101d43ea7fef5e1847842420ab08e481c82bc526
|
[
"MIT"
] | null | null | null |
project-euler/0015_lattice_paths.py
|
alenic/comprosol
|
101d43ea7fef5e1847842420ab08e481c82bc526
|
[
"MIT"
] | null | null | null |
import math
def count(n):
return math.factorial(2*n)//math.factorial(n)**2
print(count(20))
| 16.166667
| 52
| 0.690722
| 17
| 97
| 3.941176
| 0.588235
| 0.38806
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.047059
| 0.123711
| 97
| 6
| 53
| 16.166667
| 0.741176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0.25
| 0.75
| 0.25
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
b71c0e4e91e6f8e14e124dcb604415429ed23beb
| 49
|
py
|
Python
|
armstrong/core/arm_layout/tests/__init__.py
|
cirlabs/armstrong.core.arm_layout
|
fc0bfe5666b287f38bd48ce910abc1bfe82c353c
|
[
"Apache-2.0"
] | null | null | null |
armstrong/core/arm_layout/tests/__init__.py
|
cirlabs/armstrong.core.arm_layout
|
fc0bfe5666b287f38bd48ce910abc1bfe82c353c
|
[
"Apache-2.0"
] | null | null | null |
armstrong/core/arm_layout/tests/__init__.py
|
cirlabs/armstrong.core.arm_layout
|
fc0bfe5666b287f38bd48ce910abc1bfe82c353c
|
[
"Apache-2.0"
] | null | null | null |
from .templatetags import *
from .utils import *
| 16.333333
| 27
| 0.755102
| 6
| 49
| 6.166667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.163265
| 49
| 2
| 28
| 24.5
| 0.902439
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b749e5a34a60cf8224366df25719708c641664d8
| 20,628
|
py
|
Python
|
spreedly/pyspreedly/api.py
|
guitarparty/django-spreedly
|
532e594d5163bf177141ce1e6a4b349c714e4e6d
|
[
"MIT"
] | null | null | null |
spreedly/pyspreedly/api.py
|
guitarparty/django-spreedly
|
532e594d5163bf177141ce1e6a4b349c714e4e6d
|
[
"MIT"
] | null | null | null |
spreedly/pyspreedly/api.py
|
guitarparty/django-spreedly
|
532e594d5163bf177141ce1e6a4b349c714e4e6d
|
[
"MIT"
] | null | null | null |
import httplib, urllib2
from datetime import datetime
from decimal import Decimal
from xml.etree.ElementTree import fromstring
from base64 import b64encode
API_VERSION = 'v4'
class Client:
def __init__(self, token, site_name):
self.auth = b64encode('%s:x' % token)
self.base_host = 'subs.pinpayments.com'
self.base_path = '/api/%s/%s' % (API_VERSION, site_name)
self.base_url = 'https://%s%s' % (self.base_host, self.base_path)
self.url = None
def get_response(self):
return self.response
def get_url(self):
return self.url
def set_url(self, url):
self.url = '%s/%s' % (self.base_url, url)
def query(self, data=None):
req = urllib2.Request(url=self.get_url())
req.add_header('User-agent', 'python-spreedly 1.0')
req.add_header('Authorization', 'Basic %s' % self.auth)
# Convert to POST if we got some data
if data:
req.add_header('Content-Type', 'application/xml')
req.add_data(data)
f = urllib2.urlopen(req)
self.response = f.read()
def get_plans(self):
self.set_url('subscription_plans.xml')
self.query()
# Parse
result = []
tree = fromstring(self.get_response())
for plan in tree.getiterator('subscription-plan'):
data = {
'name': plan.findtext('name'),
'description': plan.findtext('description'),
'terms': plan.findtext('terms'),
'plan_type': plan.findtext('plan-type'),
'price': Decimal(plan.findtext('price')),
'enabled': True if plan.findtext('enabled') == 'true' else False,
'force_recurring': \
True if plan.findtext('force-recurring') == 'true' else False,
'force_renew': \
True if plan.findtext('needs-to-be-renewed') == 'true' else False,
'duration': int(plan.findtext('duration-quantity')),
'duration_units': plan.findtext('duration-units'),
'feature_level': plan.findtext('feature-level'),
'return_url': plan.findtext('return-url'),
'version': int(plan.findtext('version')) \
if plan.findtext('version') else 0,
'speedly_id': int(plan.findtext('id')),
'speedly_site_id': int(plan.findtext('site-id')) \
if plan.findtext('site-id') else 0,
'created_at': datetime.strptime(
plan.findtext('created-at'), '%Y-%m-%dT%H:%M:%SZ'
),
'date_changed': datetime.strptime(
plan.findtext('updated-at'), '%Y-%m-%dT%H:%M:%SZ'
),
'currency_code': plan.findtext('currency-code'),
}
result.append(data)
return result
def create_subscriber(self, customer_id, screen_name):
'''
Creates a subscription
'''
data = '''
<subscriber>
<customer-id>%d</customer-id>
<screen-name>%s</screen-name>
</subscriber>
''' % (customer_id, screen_name)
self.set_url('subscribers.xml')
self.query(data)
# Parse
result = []
tree = fromstring(self.get_response())
for plan in tree.getiterator('subscriber'):
data = {
'customer_id': int(plan.findtext('customer-id')),
'first_name': plan.findtext('billing-first-name'),
'last_name': plan.findtext('billing-last-name'),
'active': True if plan.findtext('active') == 'true' else False,
'trial_active': \
True if plan.findtext('on-trial') == 'true' else False,
'trial_elegible': \
True if plan.findtext('eligible-for-free-trial') == 'true' \
else False,
'lifetime': \
True if plan.findtext('lifetime-subscription') == 'true' \
else False,
'recurring': \
True if plan.findtext('recurring') == 'true' \
else False,
'card_expires_before_next_auto_renew': \
True if plan.findtext('card-expires-before-next-auto-renew') == 'true' \
else False,
'token': plan.findtext('token'),
'name': plan.findtext('subscription-plan-name'),
'feature_level': plan.findtext('feature-level'),
'store_credit': Decimal(plan.findtext('store-credit')),
'created_at': datetime.strptime(
plan.findtext('created-at'), '%Y-%m-%dT%H:%M:%SZ'
),
'date_changed': datetime.strptime(
plan.findtext('updated-at'), '%Y-%m-%dT%H:%M:%SZ'
),
'active_until': datetime.strptime(
plan.findtext('active-until'), '%Y-%m-%dT%H:%M:%SZ'
) if plan.findtext('active-until') else None,
'on_trial': True if plan.findtext('on-trial') == 'true' \
else False,
}
result.append(data)
return result[0]
def delete_subscriber(self, id):
if 'test' in self.base_path:
headers = {'Authorization': 'Basic %s' % self.auth}
conn = httplib.HTTPSConnection(self.base_host)
conn.request(
'DELETE', '%s/subscribers/%d.xml' % (self.base_path, id),
'',
headers
)
response = conn.getresponse()
return response.status
return
def change_subscription(self, subscriber_id, plan_id):
'''
Subscribe a user to some plan
'''
data = '<subscription_plan><id>%d</id></subscription_plan>' % plan_id
headers = {'Authorization': 'Basic %s' % self.auth}
conn = httplib.HTTPSConnection(self.base_host)
conn.request(
'PUT', '%s/subscribers/%d/change_subscription_plan.xml' % (self.base_path, subscriber_id),
data,
headers
)
response = conn.getresponse()
return response.status
def subscribe(self, subscriber_id, plan_id, trial=False):
'''
Subscribe a user to some plan
'''
data = '<subscription_plan><id>%d</id></subscription_plan>' % plan_id
if trial:
self.set_url('subscribers/%d/subscribe_to_free_trial.xml' % subscriber_id)
self.query(data)
# Parse
result = []
tree = fromstring(self.get_response())
for plan in tree.getiterator('subscriber'):
data = {
'customer_id': int(plan.findtext('customer-id')),
'first_name': plan.findtext('billing-first-name'),
'last_name': plan.findtext('billing-last-name'),
'active': True if plan.findtext('active') == 'true' else False,
'trial_active': \
True if plan.findtext('on-trial') == 'true' else False,
'trial_elegible': \
True if plan.findtext('eligible-for-free-trial') == 'true' \
else False,
'lifetime': \
True if plan.findtext('lifetime-subscription') == 'true' \
else False,
'recurring': \
True if plan.findtext('recurring') == 'true' \
else False,
'card_expires_before_next_auto_renew': \
True if plan.findtext('card-expires-before-next-auto-renew') == 'true' \
else False,
'token': plan.findtext('token'),
'name': plan.findtext('subscription-plan-name'),
'feature_level': plan.findtext('feature-level'),
'store_credit': Decimal(plan.findtext('store-credit')),
'created_at': datetime.strptime(
plan.findtext('created-at'), '%Y-%m-%dT%H:%M:%SZ'
),
'date_changed': datetime.strptime(
plan.findtext('updated-at'), '%Y-%m-%dT%H:%M:%SZ'
),
'active_until': datetime.strptime(
plan.findtext('active-until'), '%Y-%m-%dT%H:%M:%SZ'
) if plan.findtext('active-until') else None,
'on_trial': True if plan.findtext('on-trial') == 'true' \
else False,
}
result.append(data)
return result[0]
def complimentary_subscription(self, subscriber_id, duration_quantity, duration_units, feature_level):
'''
Creates a complimentary subscription for the specified feature level
'''
data = '''
<complimentary_subscription>
<duration_quantity>%s</duration_quantity>
<duration_units>%s</duration_units>
<feature_level>%s</feature_level>
</complimentary_subscription>
''' % (duration_quantity, duration_units, feature_level)
self.set_url('subscribers/%d/complimentary_subscriptions.xml' % subscriber_id)
self.query(data)
# Parse
result = []
tree = fromstring(self.get_response())
for plan in tree.getiterator('subscriber'):
data = {
'customer_id': int(plan.findtext('customer-id')),
'first_name': plan.findtext('billing-first-name'),
'last_name': plan.findtext('billing-last-name'),
'active': True if plan.findtext('active') == 'true' else False,
'trial_active': \
True if plan.findtext('on-trial') == 'true' else False,
'trial_elegible': \
True if plan.findtext('eligible-for-free-trial') == 'true' \
else False,
'lifetime': \
True if plan.findtext('lifetime-subscription') == 'true' \
else False,
'recurring': \
True if plan.findtext('recurring') == 'true' \
else False,
'card_expires_before_next_auto_renew': \
True if plan.findtext('card-expires-before-next-auto-renew') == 'true' \
else False,
'token': plan.findtext('token'),
'name': plan.findtext('subscription-plan-name'),
'feature_level': plan.findtext('feature-level'),
'store_credit': Decimal(plan.findtext('store-credit')),
'created_at': datetime.strptime(
plan.findtext('created-at'), '%Y-%m-%dT%H:%M:%SZ'
),
'date_changed': datetime.strptime(
plan.findtext('updated-at'), '%Y-%m-%dT%H:%M:%SZ'
),
'active_until': datetime.strptime(
plan.findtext('active-until'), '%Y-%m-%dT%H:%M:%SZ'
) if plan.findtext('active-until') else None,
'on_trial': True if plan.findtext('on-trial') == 'true' \
else False,
}
result.append(data)
return result[0]
def lifetime_complimentary_subscription(self, subscriber_id, feature_level):
'''
Creates a lifetime complimentary subscription for the specified feature level
'''
data = '''
<lifetime_complimentary_subscription>
<feature_level>%s</feature_level>
</lifetime_complimentary_subscription>
''' % feature_level
self.set_url('subscribers/%d/lifetime_complimentary_subscriptions.xml' % subscriber_id)
self.query(data)
# Parse
result = []
tree = fromstring(self.get_response())
for plan in tree.getiterator('subscriber'):
data = {
'customer_id': int(plan.findtext('customer-id')),
'first_name': plan.findtext('billing-first-name'),
'last_name': plan.findtext('billing-last-name'),
'active': True if plan.findtext('active') == 'true' else False,
'trial_active': \
True if plan.findtext('on-trial') == 'true' else False,
'trial_elegible': \
True if plan.findtext('eligible-for-free-trial') == 'true' \
else False,
'lifetime': \
True if plan.findtext('lifetime-subscription') == 'true' \
else False,
'recurring': \
True if plan.findtext('recurring') == 'true' \
else False,
'card_expires_before_next_auto_renew': \
True if plan.findtext('card-expires-before-next-auto-renew') == 'true' \
else False,
'token': plan.findtext('token'),
'name': plan.findtext('subscription-plan-name'),
'feature_level': plan.findtext('feature-level'),
'store_credit': Decimal(plan.findtext('store-credit')),
'created_at': datetime.strptime(
plan.findtext('created-at'), '%Y-%m-%dT%H:%M:%SZ'
),
'date_changed': datetime.strptime(
plan.findtext('updated-at'), '%Y-%m-%dT%H:%M:%SZ'
),
'active_until': datetime.strptime(
plan.findtext('active-until'), '%Y-%m-%dT%H:%M:%SZ'
) if plan.findtext('active-until') else None,
'on_trial': True if plan.findtext('on-trial') == 'true' \
else False,
}
result.append(data)
return result[0]
def complimentary_time_extension(self, subscriber_id, duration_quantity, duration_units):
'''
Creates a complimentary time extension
'''
data = '''
<complimentary_time_extension>
<duration_quantity>%s</duration_quantity>
<duration_units>%s</duration_units>
</complimentary_time_extension>
''' % (duration_quantity, duration_units)
self.set_url('subscribers/%d/complimentary_time_extensions.xml' % subscriber_id)
self.query(data)
# Parse
result = []
tree = fromstring(self.get_response())
for plan in tree.getiterator('subscriber'):
data = {
'customer_id': int(plan.findtext('customer-id')),
'first_name': plan.findtext('billing-first-name'),
'last_name': plan.findtext('billing-last-name'),
'active': True if plan.findtext('active') == 'true' else False,
'trial_active': \
True if plan.findtext('on-trial') == 'true' else False,
'trial_elegible': \
True if plan.findtext('eligible-for-free-trial') == 'true' \
else False,
'lifetime': \
True if plan.findtext('lifetime-subscription') == 'true' \
else False,
'recurring': \
True if plan.findtext('recurring') == 'true' \
else False,
'card_expires_before_next_auto_renew': \
True if plan.findtext('card-expires-before-next-auto-renew') == 'true' \
else False,
'token': plan.findtext('token'),
'name': plan.findtext('subscription-plan-name'),
'feature_level': plan.findtext('feature-level'),
'store_credit': Decimal(plan.findtext('store-credit')),
'created_at': datetime.strptime(
plan.findtext('created-at'), '%Y-%m-%dT%H:%M:%SZ'
),
'date_changed': datetime.strptime(
plan.findtext('updated-at'), '%Y-%m-%dT%H:%M:%SZ'
),
'active_until': datetime.strptime(
plan.findtext('active-until'), '%Y-%m-%dT%H:%M:%SZ'
) if plan.findtext('active-until') else None,
'on_trial': True if plan.findtext('on-trial') == 'true' \
else False,
}
result.append(data)
return result[0]
def add_store_credit(self, subscriber_id, amount):
'''
Adds store credit to a users subscription account
'''
data = '''
<credit>
<amount>%f</amount>
</credit>
''' % amount
self.set_url('subscribers/%d/credits.xml' % subscriber_id)
self.query(data)
return self.get_response()
def cleanup(self):
'''
Removes ALL subscribers. NEVER USE IN PRODUCTION!
'''
if 'test' in self.base_path:
headers = {'Authorization': 'Basic %s' % self.auth}
conn = httplib.HTTPSConnection(self.base_host)
conn.request(
'DELETE', '%s/subscribers.xml' % self.base_path,
'',
headers
)
response = conn.getresponse()
return response.status
return
def get_info(self, subscriber_id):
self.set_url('subscribers/%d.xml' % subscriber_id)
self.query('')
# Parse
result = []
tree = fromstring(self.get_response())
for plan in tree.getiterator('subscriber'):
data = {
'customer_id': int(plan.findtext('customer-id')),
'first_name': plan.findtext('billing-first-name'),
'last_name': plan.findtext('billing-last-name'),
'active': True if plan.findtext('active') == 'true' else False,
'trial_active': \
True if plan.findtext('on-trial') == 'true' else False,
'trial_elegible': \
True if plan.findtext('eligible-for-free-trial') == 'true' \
else False,
'lifetime': \
True if plan.findtext('lifetime-subscription') == 'true' \
else False,
'recurring': \
True if plan.findtext('recurring') == 'true' \
else False,
'card_expires_before_next_auto_renew': \
True if plan.findtext('card-expires-before-next-auto-renew') == 'true' \
else False,
'token': plan.findtext('token'),
'name': plan.findtext('subscription-plan-name'),
'feature_level': plan.findtext('feature-level'),
'store_credit': Decimal(plan.findtext('store-credit')),
'created_at': datetime.strptime(
plan.findtext('created-at'), '%Y-%m-%dT%H:%M:%SZ'
),
'date_changed': datetime.strptime(
plan.findtext('updated-at'), '%Y-%m-%dT%H:%M:%SZ'
),
'active_until': datetime.strptime(
plan.findtext('active-until'), '%Y-%m-%dT%H:%M:%SZ'
) if plan.findtext('active-until') else None,
'on_trial': True if plan.findtext('on-trial') == 'true' \
else False,
}
result.append(data)
return result[0]
def stop_auto_renew(self, subscriber_id):
self.set_url('subscribers/%d/stop_auto_renew.xml' % subscriber_id)
data = '''
<subscriber>
<customer-id>%d</customer-id>
</subscriber>
''' % (subscriber_id)
self.query(data)
return self.get_response()
def allow_free_trial(self, subscriber_id):
self.set_url('subscribers/%d/allow_free_trial.xml' % subscriber_id)
data = '''
<subscriber>
<customer-id>%d</customer-id>
</subscriber>
''' % (subscriber_id)
self.query(data)
return self.get_response()
def get_or_create_subscriber(self, subscriber_id, screen_name):
try:
return self.get_info(subscriber_id)
except urllib2.HTTPError, e:
if e.code == 404:
return self.create_subscriber(subscriber_id, screen_name)
| 41.50503
| 106
| 0.509793
| 2,042
| 20,628
| 5.020078
| 0.085211
| 0.149839
| 0.072383
| 0.079017
| 0.793288
| 0.748122
| 0.736904
| 0.713199
| 0.691152
| 0.679641
| 0
| 0.001804
| 0.355051
| 20,628
| 496
| 107
| 41.58871
| 0.768716
| 0.003733
| 0
| 0.735577
| 0
| 0
| 0.268147
| 0.09095
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.012019
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b76df48be021cc2f6ba6af949df23b63c04c00e9
| 10,717
|
py
|
Python
|
tests/test_SSLChecker.py
|
Mohib-hub/SSLChecker
|
e4f3357fc9950ca8978878591197e0c8eeaf903e
|
[
"Apache-2.0"
] | null | null | null |
tests/test_SSLChecker.py
|
Mohib-hub/SSLChecker
|
e4f3357fc9950ca8978878591197e0c8eeaf903e
|
[
"Apache-2.0"
] | null | null | null |
tests/test_SSLChecker.py
|
Mohib-hub/SSLChecker
|
e4f3357fc9950ca8978878591197e0c8eeaf903e
|
[
"Apache-2.0"
] | null | null | null |
import json
import azure.functions as func
import SSLChecker.SSLChecker.main as _main
main = _main.main
def test_policy_external_no_violations():
# Construct a mock HTTP request
req = func.HttpRequest(
method='GET',
body=None,
url='/api/',
route_params={'scan': 'policy',
'view': 'external',
'target': 'api.metlife.com'}
)
# Call the function
resp = main(req)
# Convert resp string to dict
results = json.loads(resp)
# Check the output to ensure there are no violations
assert results["Results"] == 'No Policy Violations'
def test_full_external():
# Construct a mock HTTP request
req = func.HttpRequest(
method='GET',
body=None,
url='/api/',
route_params={'scan': 'full',
'view': 'external',
'target': 'github.com'}
)
# Call the function
resp = main(req)
# Convert resp string to dict
results = json.loads(resp)
# Check the output to ensure there are no violations
assert results["Results"] != 'No Policy Violations'
def test_policy_external_violations():
# Construct a mock HTTP request
req = func.HttpRequest(
method='GET',
body=None,
url='/api/',
route_params={'scan': 'policy',
'view': 'external',
'target': 'espn.com'}
)
# Call the function.
resp = main(req)
# Convert resp string to dict
results = json.loads(resp)
# Check the output to ensure there are violations
assert results["Results"] != 'No Policy Violations'
def test_external_dns_name_not_resolved():
# Construct a mock HTTP request
req = func.HttpRequest(
method='GET',
body=None,
url='/api/',
route_params={'scan': 'policy',
'view': 'external',
'target': 'joegatt.com'}
)
# Call the function.
resp = main(req)
# Convert resp string to dict
results = json.loads(resp)
# Check the output to ensure the DNS name could not resolve
assert 'No Answer for joegatt.com using nameserver ' in results["Message"]
def test_external_dns_name_not_exist():
# Construct a mock HTTP request
req = func.HttpRequest(
method='GET',
body=None,
url='/api/',
route_params={'scan': 'policy',
'view': 'external',
'target': 'jeogatt.com'}
)
# Call the function.
resp = main(req)
# Convert resp string to dict
results = json.loads(resp)
# Check the output to ensure the DNS name could not resolve
assert "Domain doesn't exist for jeogatt.com" in results["Message"]
def test_external_sslyze_timeout():
# Construct a mock HTTP request
name = 'bbbbbbbbbbbbbbb.com'
req = func.HttpRequest(
method='GET',
body=None,
url='/api/',
route_params={'scan': 'policy',
'view': 'external',
'target': name}
)
# Call the function
resp = main(req)
# Convert resp string to dict
results = json.loads(resp)
# Check the output to ensure the DNS name could not resolve
assert results["Message"] == f'TCP connection to {name}:443 timed-out'
def test_external_missing_dns_name():
# Construct a mock HTTP request
req = func.HttpRequest(
method='GET',
body=None,
url='/api/',
route_params={'scan': 'policy',
'view': 'external',
'target': None}
)
# Call the function.
resp = main(req)
# Convert resp string to dict
results = json.loads(resp)
# Ensure error handling is working properly
assert results['Error Type'] == 'Missing Parameter(s)'
assert results["Message"] == _main.ERROR_MSG_MISSING_PARAMETERS
def test_bad_dns_view_input():
# Construct a mock HTTP request
view_name = 'badinput'
req = func.HttpRequest(
method='GET',
body=None,
url=f'/api/',
route_params={'scan': 'policy',
'view': view_name,
'target': 'microsoft.com'}
)
# Call the function.
resp = main(req)
# Convert resp string to dict
results = json.loads(resp)
# Ensure error handling is working properly
assert results['Error Type'] == f"Invalid View '{view_name}'"
assert results["Message"] == _main.ERROR_MSG_INVALID_VIEW
def test_bad_policy_input():
# Construct a mock HTTP request
policy_type = 'pppppp'
req = func.HttpRequest(
method='GET',
body=None,
url=f'/api/',
route_params={'scan': policy_type,
'view': 'external',
'target': 'microsoft.com'}
)
# Call the function.
resp = main(req)
# Convert resp string to dict
results = json.loads(resp)
# Ensure error handling is working properly
assert results["Error Type"] == f"Invalid scanner type '{policy_type}'"
assert results["Message"] == _main.ERROR_MSG_INVALID_SCANNER_TYPE
def test_missing_dns_view():
# Construct a mock HTTP request
req = func.HttpRequest(
method='GET',
body=None,
url='/api/',
route_params={'scan': 'policy',
'view': None,
'target': None}
)
# Call the function.
resp = main(req)
# Convert resp string to dict
results = json.loads(resp)
# Ensure error handling is working properly
assert results["Error Type"] == 'Missing Parameter(s)'
assert results["Message"] == _main.ERROR_MSG_MISSING_PARAMETERS
def test_bad_dns_name():
# Construct a mock HTTP request
dns_name = 'bbbbbbbbb'
req = func.HttpRequest(
method='GET',
body=None,
url=f'/api/',
route_params={'scan': 'policy',
'view': 'external',
'target': dns_name}
)
# Call the function.
resp = main(req)
# Convert resp string to dict
results = json.loads(resp)
# Ensure error handling is working properly
assert results["Error Type"] == 'Invalid FQDN'
assert ' is not a valid FQDN' in results["Message"]
def test_missing_policy_view_dns_name():
# Construct a mock HTTP request
req = func.HttpRequest(
method='GET',
body=None,
url='/api/',
route_params={'scan': None,
'view': None,
'target': None}
)
# Call the function.
resp = main(req)
# Convert resp string to dict
results = json.loads(resp)
print(results)
assert results["Error Type"] == 'Missing Parameter(s)'
assert results["Message"] == _main.ERROR_MSG_MISSING_PARAMETERS
def test_external_bad_port():
# Construct a mock HTTP request
dns_name = 'yahoo.com'
port = 'a'
req = func.HttpRequest(
method='GET',
body=None,
url=f'/api/',
route_params={'scan': 'policy',
'view': 'external',
'target': dns_name,
'port': port}
)
# Call the function
resp = main(req)
# Convert resp string to dict
results = json.loads(resp)
# Check the output to ensure the DNS name could not resolve
assert results['Error Type'] == f"Invalid Port '{port}'"
assert results["Message"] == _main.ERROR_MSG_INVALID_PORT
def test_external_port_timeout():
# Construct a mock HTTP request
dns_name = 'yahoo.com'
port = '8443'
req = func.HttpRequest(
method='GET',
body=None,
url='/api/',
route_params={'scan': 'policy',
'view': 'external',
'target': dns_name,
'port': '8443'}
)
# Call the function
resp = main(req)
# Convert resp string to dict
results = json.loads(resp)
# Check the output to ensure the DNS name could not resolve
assert results['Error Type'] == 'Connection Timeout'
assert results["Message"] == f'TCP connection to {dns_name}:{port} timed-out'
def test_external_port_not_in_range():
# Construct a mock HTTP request
port = '123456'
req = func.HttpRequest(
method='GET',
body=None,
url='/api/',
route_params={'scan': 'policy',
'view': 'external',
'target': 'espn.com',
'port': port}
)
# Call the function
resp = main(req)
# Convert resp string to dict
results = json.loads(resp)
# Check the output to ensure the DNS name could not resolve
assert results['Error Type'] == f"Invalid Port '{port}'"
assert results["Message"] == _main.ERROR_MSG_INVALID_PORT
def test_query_api():
req = func.HttpRequest(
method='GET',
body=None,
url=f'/api/tls',
params={'target': 'www.google.com', 'nameserver': '8.8.8.8'}
)
resp = main(req)
assert 'Results' in resp
def test_query_api_by_ip():
req = func.HttpRequest(
method='GET',
body=None,
url='/api/tls',
params={'target': '140.82.113.4', 'nameserver': '8.8.8.8'}
)
resp = main(req)
assert 'Results' in resp
def test_query_api_error_handling():
req = func.HttpRequest(
method='GET',
body=None,
url='/api/tls',
params={'nameserver': '8.8.8.8'}
)
resp = main(req)
results = json.loads(resp)
assert results['Error Type'] == "Missing required parameter"
def test_policy_external_by_ip_no_violations():
req = func.HttpRequest(
method='GET',
body=None,
url='/api/',
route_params={'scan': 'policy',
'view': 'external',
'target': '216.163.251.205'}
)
# Call the function.
resp = main(req)
# Convert resp string to dict
results = json.loads(resp)
# Check the output to ensure there are violations
assert results["Results"] == 'No Policy Violations'
| 27.063131
| 81
| 0.543716
| 1,186
| 10,717
| 4.802698
| 0.10371
| 0.057058
| 0.060042
| 0.080056
| 0.86552
| 0.843926
| 0.810744
| 0.780372
| 0.775983
| 0.774754
| 0
| 0.007114
| 0.344219
| 10,717
| 395
| 82
| 27.131646
| 0.803358
| 0.182047
| 0
| 0.670732
| 0
| 0
| 0.179134
| 0
| 0
| 0
| 0
| 0
| 0.113821
| 1
| 0.077236
| false
| 0
| 0.012195
| 0
| 0.089431
| 0.004065
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b7e5e4bb7f28fb832dd05ba641a16b202d068708
| 408
|
py
|
Python
|
configuration.py
|
Ichabond/TVDownload
|
d58a0143e208b5300ab19116c02dbd161da5364e
|
[
"MIT"
] | 5
|
2017-01-01T15:47:29.000Z
|
2018-12-22T00:52:48.000Z
|
configuration.py
|
Ichabond/TVDownload
|
d58a0143e208b5300ab19116c02dbd161da5364e
|
[
"MIT"
] | 1
|
2017-01-29T09:21:38.000Z
|
2017-01-29T09:22:58.000Z
|
configuration.py
|
Ichabond/TVDownload
|
d58a0143e208b5300ab19116c02dbd161da5364e
|
[
"MIT"
] | null | null | null |
import json
class Config(object):
def __init__(self):
self.config_dict = {}
def load(self, filename):
with open(filename) as data:
self.config_dict = json.load(data)
def add(self, key, value):
self.config_dict[key] = value
def keys(self):
return self.config_dict.keys()
def __getitem__(self, item):
return self.config_dict[item]
| 19.428571
| 46
| 0.610294
| 53
| 408
| 4.45283
| 0.415094
| 0.211864
| 0.29661
| 0.169492
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.279412
| 408
| 20
| 47
| 20.4
| 0.802721
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.384615
| false
| 0
| 0.076923
| 0.153846
| 0.692308
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
4d0c000927e1244ea2b0d8700159ccb6096b8d36
| 10,231
|
py
|
Python
|
torch_dataset.py
|
aimbrain/vqa-project
|
341122a267293017b55db4f033fbe81445af03ea
|
[
"Apache-2.0"
] | 145
|
2018-06-26T01:28:04.000Z
|
2021-11-21T04:18:05.000Z
|
torch_dataset.py
|
Detsuptwang/vqa-project
|
341122a267293017b55db4f033fbe81445af03ea
|
[
"Apache-2.0"
] | 11
|
2018-06-24T11:16:59.000Z
|
2020-11-15T18:21:39.000Z
|
torch_dataset.py
|
Detsuptwang/vqa-project
|
341122a267293017b55db4f033fbe81445af03ea
|
[
"Apache-2.0"
] | 30
|
2018-06-20T16:20:11.000Z
|
2021-06-01T03:32:59.000Z
|
# Copyright 2018 AimBrain Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import os
import json
import numpy as np
import zarr
import pandas as pd
from torch.utils.data import Dataset
from torch.utils.data import dataloader
try:
import cPickle as pickle
except:
import pickle as pickle
def collate_fn(batch):
# put question lengths in descending order so that we can use packed sequences later
batch.sort(key=lambda x: x[-1], reverse=True)
return dataloader.default_collate(batch)
class VQA_Dataset(Dataset):
def __init__(self, data_dir, emb_dim=300, train=True):
# Set parameters
self.data_dir = data_dir # directory where the data is stored
self.emb_dim = emb_dim # question embedding dimension
self.train = train # train (True) or eval (False) mode
self.seqlen = 14 # maximum question sequence length
# Load training question dictionary
q_dict = pickle.load(
open(os.path.join(data_dir, 'train_q_dict.p'), 'rb'))
self.q_itow = q_dict['itow']
self.q_wtoi = q_dict['wtoi']
self.q_words = len(self.q_itow) + 1
# Load training answer dictionary
a_dict = pickle.load(
open(os.path.join(data_dir, 'train_a_dict.p'), 'rb'))
self.a_itow = a_dict['itow']
self.a_wtoi = a_dict['wtoi']
self.n_answers = len(self.a_itow) + 1
# Load image features and bounding boxes
self.i_feat = zarr.open(os.path.join(
data_dir, 'trainval.zarr'), mode='r')
self.bbox = zarr.open(os.path.join(
data_dir, 'trainval_boxes.zarr'), mode='r')
self.sizes = pd.read_csv(os.path.join(
data_dir, 'trainval_image_size.csv'))
# Load questions
if train:
self.vqa = json.load(
open(os.path.join(data_dir, 'vqa_train_final_3000.json')))
else:
self.vqa = json.load(
open(os.path.join(data_dir, 'vqa_val_final_3000.json')))
self.n_questions = len(self.vqa)
print('Loading done')
self.feat_dim = self.i_feat[list(self.i_feat.keys())[
0]].shape[1] + 4 # + bbox
self.init_pretrained_wemb(emb_dim)
def init_pretrained_wemb(self, emb_dim):
"""
From blog.keras.io
Initialises words embeddings with pre-trained GLOVE embeddings
"""
embeddings_index = {}
f = open(os.path.join(self.data_dir, 'glove.6B.') +
str(emb_dim) + 'd.txt')
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype=np.float32)
embeddings_index[word] = coefs
f.close()
embedding_mat = np.zeros((self.q_words, emb_dim), dtype=np.float32)
for word, i in self.q_wtoi.items():
embedding_v = embeddings_index.get(word)
if embedding_v is not None:
embedding_mat[i] = embedding_v
self.pretrained_wemb = embedding_mat
def __len__(self):
return self.n_questions
def __getitem__(self, idx):
# question sample
qlen = len(self.vqa[idx]['question_toked'])
q = [0] * 100
for i, w in enumerate(self.vqa[idx]['question_toked']):
try:
q[i] = self.q_wtoi[w]
except:
q[i] = 0 # validation questions may contain unseen word
# soft label answers
a = np.zeros(self.n_answers, dtype=np.float32)
for w, c in self.vqa[idx]['answers_w_scores']:
try:
a[self.a_wtoi[w]] = c
except:
continue
# number of votes for each answer
n_votes = np.zeros(self.n_answers, dtype=np.float32)
for w, c in self.vqa[idx]['answers']:
try:
n_votes[self.a_wtoi[w]] = c
except:
continue
# id of the question
qid = self.vqa[idx]['question_id']
# image sample
iid = self.vqa[idx]['image_id']
img = self.i_feat[str(iid)]
bboxes = np.asarray(self.bbox[str(iid)])
imsize = self.sizes[str(iid)]
if np.logical_not(np.isfinite(img)).sum() > 0:
raise ValueError
# number of image objects
k = 36
# scale bounding boxes by image dimensions
for i in range(k):
bbox = bboxes[i]
bbox[0] /= imsize[0]
bbox[1] /= imsize[1]
bbox[2] /= imsize[0]
bbox[3] /= imsize[1]
bboxes[i] = bbox
# format variables
q = np.asarray(q)
a = np.asarray(a).reshape(-1)
n_votes = np.asarray(n_votes).reshape(-1)
qid = np.asarray(qid).reshape(-1)
i = np.concatenate([img, bboxes], axis=1)
k = np.asarray(k).reshape(1)
return q, a, n_votes, qid, i, k, qlen
class VQA_Dataset_Test(Dataset):
def __init__(self, data_dir, emb_dim=300, train=True):
self.data_dir = data_dir
self.emb_dim = emb_dim
self.train = train
self.seqlen = 14 # hard set based on paper
q_dict = pickle.load(
open(os.path.join(data_dir, 'train_q_dict.p'), 'rb'))
self.q_itow = q_dict['itow']
self.q_wtoi = q_dict['wtoi']
self.q_words = len(self.q_itow) + 1
a_dict = pickle.load(
open(os.path.join(data_dir, 'train_a_dict.p'), 'rb'))
self.a_itow = a_dict['itow']
self.a_wtoi = a_dict['wtoi']
self.n_answers = len(self.a_itow) + 1
if train:
self.vqa = json.load(open(os.path.join(data_dir, 'vqa_train_final_3000.json'))) + \
json.load(
open(os.path.join(data_dir, 'vqa_val_final_3000.json')))
self.i_feat = zarr.open(os.path.join(
data_dir, 'trainval.zarr'), mode='r')
self.bbox = zarr.open(os.path.join(
data_dir, 'trainval_boxes.zarr'), mode='r')
self.sizes = pd.read_csv(os.path.join(
data_dir, 'trainval_image_size.csv'))
else:
self.vqa = json.load(
open(os.path.join(data_dir, 'vqa_test_toked.json')))
self.i_feat = zarr.open(os.path.join(
data_dir, 'test.zarr'), mode='r')
self.bbox = zarr.open(os.path.join(
data_dir, 'test_boxes.zarr'), mode='r')
self.sizes = pd.read_csv(os.path.join(
data_dir, 'test_image_size.csv'))
self.n_questions = len(self.vqa)
print('Loading done')
self.feat_dim = self.i_feat[list(self.i_feat.keys())[
0]].shape[1] + 4 # + bbox
self.init_pretrained_wemb(emb_dim)
def init_pretrained_wemb(self, emb_dim):
"""From blog.keras.io"""
embeddings_index = {}
f = open(os.path.join(self.data_dir, 'glove.6B.') +
str(emb_dim) + 'd.txt')
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype=np.float32)
embeddings_index[word] = coefs
f.close()
embedding_mat = np.zeros((self.q_words, emb_dim), dtype=np.float32)
for word, i in self.q_wtoi.items():
embedding_v = embeddings_index.get(word)
if embedding_v is not None:
embedding_mat[i] = embedding_v
self.pretrained_wemb = embedding_mat
def __len__(self):
return self.n_questions
def __getitem__(self, idx):
# question sample
qlen = len(self.vqa[idx]['question_toked'])
q = [0] * 100
for i, w in enumerate(self.vqa[idx]['question_toked']):
try:
q[i] = self.q_wtoi[w]
except:
q[i] = 0 # validation questions may contain unseen word
# soft label answers
if self.train:
a = np.zeros(self.n_answers, dtype=np.float32)
for w, c in self.vqa[idx]['answers_w_scores']:
try:
a[self.a_wtoi[w]] = c
except:
continue
a = np.asarray(a).reshape(-1)
else:
# return 0's for unknown test set answers
a = 0
# votes
if self.train:
n_votes = np.zeros(self.n_answers, dtype=np.float32)
for w, c in self.vqa[idx]['answers']:
try:
n_votes[self.a_wtoi[w]] = c
except:
continue
n_votes = np.asarray(n_votes).reshape(-1)
else:
# return 0's for unknown test set answers
n_votes = 0
# id of the question
qid = self.vqa[idx]['question_id']
# image sample
iid = self.vqa[idx]['image_id']
img = self.i_feat[str(iid)]
bboxes = np.asarray(self.bbox[str(iid)])
imsize = self.sizes[str(iid)]
if np.logical_not(np.isfinite(img)).sum() > 0:
raise ValueError
# k sample
k = 36
# scale bounding boxes by image dimensions
for i in range(k):
bbox = bboxes[i]
bbox[0] /= imsize[0]
bbox[1] /= imsize[1]
bbox[2] /= imsize[0]
bbox[3] /= imsize[1]
bboxes[i] = bbox
# format
q = np.asarray(q)
qid = np.asarray(qid).reshape(-1)
i = np.concatenate([img, bboxes], axis=1)
k = np.asarray(k).reshape(1)
return q, a, n_votes, qid, i, k, qlen
| 33.003226
| 95
| 0.555566
| 1,390
| 10,231
| 3.931655
| 0.183453
| 0.033303
| 0.036597
| 0.046112
| 0.762855
| 0.741629
| 0.735407
| 0.735407
| 0.724794
| 0.724794
| 0
| 0.016441
| 0.328218
| 10,231
| 309
| 96
| 33.110032
| 0.778699
| 0.1515
| 0
| 0.88785
| 0
| 0
| 0.065394
| 0.016523
| 0
| 0
| 0
| 0
| 0
| 1
| 0.042056
| false
| 0
| 0.046729
| 0.009346
| 0.121495
| 0.014019
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4d0cd4b064231a64daad90e8c573db67af8ad29d
| 27,720
|
py
|
Python
|
lobpy/handler.py
|
mm842/lobpy
|
059af586c47ee346302fcf637aa3b05d9872cc2b
|
[
"BSD-3-Clause"
] | 20
|
2018-12-31T20:34:52.000Z
|
2021-11-30T08:07:56.000Z
|
lobpy/handler.py
|
tonylibing/lobpy
|
059af586c47ee346302fcf637aa3b05d9872cc2b
|
[
"BSD-3-Clause"
] | 1
|
2021-04-27T17:51:45.000Z
|
2021-04-27T17:51:45.000Z
|
lobpy/handler.py
|
tonylibing/lobpy
|
059af586c47ee346302fcf637aa3b05d9872cc2b
|
[
"BSD-3-Clause"
] | 15
|
2018-12-14T21:43:36.000Z
|
2022-01-13T05:42:57.000Z
|
"""
Copyright (c) 2018, University of Oxford, Rama Cont and ETH Zurich, Marvin S. Mueller
handler.py
contains some functions to work with the model
"""
import math
import sys
import numpy as np
import pandas as pd
import lobpy.datareader.lobster as lobr
import lobpy.models.calibration as cal
import lobpy.models.estimators as est
import lobpy.models.plots as lobp
import lobpy.models.price as lobprice
# def calibrate_mrevdynamics_lobster_rf_f(
# ticker_str,
# date_str,
# time_start_data,
# time_end_data,
# time_start_calc,
# time_end_calc,
# num_levels_data,
# num_levels_calc,
# timegrid_size,
# ntimesteps_cal,
# ntimesteps_nextcal
# ):
# """ Calibrates mean reverting model to order book volume loaded from lobster data """
# # read files from lobster to uniform grid
# print('Extracting total volume process on uniform grid.')
# dt, time_stamps, volume_bid, volume_ask = lob.load_volume_process(
# ticker_str,
# date_str,
# time_start_data,
# time_end_data,
# time_start_calc,
# time_end_calc,
# num_levels_data,
# num_levels_calc,
# timegrid_size
# )
# print("Finished.")
# print('Start calibration on time frame')
# # Create calibrator object with id inherited from lobster notation and estimator for correlation based on realized covariance
# ov_cal = cal.OrderVolumeCalibrator(
# calibratorid=lob.create_lobster_filename(ticker_str, date_str, str(time_start_calc), str(time_end_calc), "cal_ordervolume-f", str(num_levels_calc)),
# estimator_dynamics=est.estimate_recgamma_diff,
# estimator_corr=est.estimate_log_corr_rv
# )
# ov_cal.calibrate_running_frame(
# time_stamps[0],
# dt,
# volume_bid,
# volume_ask,
# ntimesteps_cal,
# ntimesteps_nextcal
# )
# # save history as csv file
# print('Calibration finished. Saving csv file.')
# ov_cal.savef_history(csv=True)
# print('Calibration history saved.')
# # create plots
# lobp.plot_calibration_history_volume(ov_cal.history, filename=ov_cal.calibratorid, titlestr=" ".join((ticker_str, date_str, str(num_levels_calc))))
# print('Plots saved')
def calibrate_mrevdynamics_lobster_rf(
ticker_str,
date_str,
time_start_data,
time_end_data,
time_start_calc,
time_end_calc,
num_levels_data,
num_levels_calc,
ntimepoints_grid,
ntimesteps_cal,
ntimesteps_nextcal,
cal_to_average=False,
cal_to_average_classic=False,
):
""" Calibrates mean reverting model to order book volume loaded from lobster data
-----------
args:
ticker_str,
date_str,
time_start_data,
time_end_data,
time_start_calc,
time_end_calc,
num_levels_data,
num_levels_calc,
ntimepoints_grid,
ntimesteps_cal,
ntimesteps_nextcal,
cal_to_average=False calibration to total volume (if False) in the first buckets or average
cal_to_average_classic=False calibration to total volume (if False) in the first buckets or average - by just averaging after extraction
"""
# read files from lobster to uniform grid
lobreader = lobr.LOBSTERReader(
ticker_str,
date_str,
str(time_start_data),
str(time_end_data),
str(num_levels_data),
str(time_start_calc),
str(time_end_calc)
)
print('Extracting total volume process on uniform grid.')
if cal_to_average:
dt, time_stamps, volume_bid, volume_ask = lobreader.load_marketdepth(
num_observations=ntimepoints_grid,
num_levels_calc_str=str(num_levels_calc),
write_output=False
)
else:
dt, time_stamps, volume_bid, volume_ask = lobreader.load_ordervolume(
num_observations=ntimepoints_grid,
num_levels_calc_str=str(num_levels_calc),
write_output=False
)
if cal_to_average_classic:
volume_bid = np.true_divide(volume_bid, num_levels_calc)
volume_ask = np.true_divide(volume_ask, num_levels_calc)
print("Finished.")
print('Start calibration on time frame')
# Create calibrator object with id inherited from lobster notation and estimator for correlation based on realized covariance
ov_cal = cal.OrderVolumeCalibrator(
calibratorid=lobreader.create_filestr(identifier_str="cal_ordervolume",
num_levels=str(num_levels_calc)),
estimator_dynamics=est.estimate_recgamma_diff,
estimator_corr=est.estimate_log_corr_rv
)
ov_cal.calibrate_running_frame(
time_stamps[0],
dt,
volume_bid,
volume_ask,
ntimesteps_cal,
ntimesteps_nextcal
)
# save history as csv file
print('Calibration finished. Saving csv file.')
ov_cal.savef_history(csv=True)
print('Calibration history saved.')
# create plots
lobp.plot_calibration_history_volume(ov_cal.history, filename=ov_cal.calibratorid,
titlestr=" ".join(
(ticker_str, date_str, str(num_levels_calc))))
print('Plots saved')
def calibrate_profile_lobster(
ticker_str,
date_str,
time_start_data,
time_end_data,
time_start_calc,
time_end_calc,
num_levels_data,
num_levels_calc,
time_cal
):
""" Splits the day into time intervals of specified size and fits the model profile to the average profile of the order book from lobster data. 4 different fitting methods are used. In addition, the average profile for the whole day will be fitted.
-----------
args:
ticker_str,
date_str,
time_start_data,
time_end_data,
time_start_calc,
time_end_calc,
num_levels_data,
num_levels_calc,
time_cal time for averageing in ms
"""
gamma_bids_LSQ = []
gamma_asks_LSQ = []
gamma_bids_LSQF = []
gamma_asks_LSQF = []
gamma_bids_ArgMax = []
gamma_asks_ArgMax = []
gamma_bids_RMax1 = []
gamma_asks_RMax1 = []
lobreader = lobr.LOBSTERReader(
ticker_str,
date_str,
str(time_start_data),
str(time_end_data),
str(num_levels_data),
str(time_start_calc),
str(time_end_calc)
)
computation_interval = int(time_end_calc) - int(time_start_calc)
num_calibration = computation_interval / int(time_cal)
if num_calibration == 0:
return 0
time_starts = np.arange(int(time_start_calc), int(time_end_calc), int(time_cal))
time_ends = time_starts + int(time_cal)
if time_ends[-1] > time_end_calc:
time_ends[-1] = time_end_calc
for time_start, time_end in zip(time_starts, time_ends):
print("Calibrate")
lobreader.set_timecalc(str(time_start), str(time_end))
filename = lobreader.create_filestr(lobr.AV_ORDERBOOK_FILE_ID, str(num_levels_calc))
av_profile_bid, av_profile_ask = lobr.get_data_from_file(filename)
if (av_profile_bid is None) or (av_profile_ask is None):
av_profile_bid, av_profile_ask = lobreader.average_profile(str(num_levels_calc),
write_outputfile=True)
lobp.plot_av_profile(av_profile_bid, av_profile_ask, filename, ticker_str, date_str,
str(time_start), str(time_end))
tvbid = np.sum(av_profile_bid)
tvask = np.sum(av_profile_ask)
modelLSQ, modelLSQF, modelArgMax, modelRMax1 = cal.fit_profile_to_data(
np.array(av_profile_bid), np.array(av_profile_ask))
modelLSQ.set_modelid(lobreader.create_filestr("Model-LSQ", str(num_levels_calc)))
modelLSQF.set_modelid(lobreader.create_filestr("Model-LSQF", str(num_levels_calc)))
modelArgMax.set_modelid(lobreader.create_filestr("Model-ArgMax", str(num_levels_calc)))
modelRMax1.set_modelid(lobreader.create_filestr("Model-RMax1", str(num_levels_calc)))
models = [modelLSQ, modelLSQF, modelArgMax, modelRMax1]
print("Save model parameters to files")
for model, gamma_bids, gamma_asks in zip(models, (
gamma_bids_LSQ, gamma_bids_LSQF, gamma_bids_ArgMax, gamma_bids_RMax1), (
gamma_asks_LSQ, gamma_asks_LSQF,
gamma_asks_ArgMax,
gamma_asks_RMax1)):
model.savef()
gb, ga = model.get_gamma()
gamma_bids.append(gb)
gamma_asks.append(ga)
print("|--------------------------------------------------------\n|")
print(" Model parameters for modelid %s" % (model.get_modelid()))
print(" gamma_bid: %f, gamma_ask: %f" % (model.get_gamma()))
print(" z0_bid: %f, z0_ask: %f" % (model.get_z0()))
print(" TV_bid: %f, TV_ask: %f" % (tvbid, tvask))
print("|\n|--------------------------------------------------------")
lobp.plot_avprofile_fits(av_profile_bid, av_profile_ask, models,
labels_leg=["data", "LSQ", "LSQF", "ArgMax", "$R_{\infty, 1}$"],
title_str=(
'Average Profile\nTicker: {0}, Date: {1}\n Time: {2} to {3}'.format(
ticker_str, date_str, time_start, time_end)),
filename=lobreader.create_filestr("av-orderbook-fits",
str(num_levels_calc)))
lobreader.set_timecalc(str(time_start_calc), str(time_end_calc))
filename = lobreader.create_filestr("gamma", str(num_levels_calc))
gammas = np.array(
[time_ends, gamma_bids_LSQ, gamma_bids_LSQF, gamma_bids_ArgMax, gamma_bids_RMax1,
gamma_asks_LSQ, gamma_asks_LSQF, gamma_asks_ArgMax, gamma_asks_RMax1])
print(str(gammas.transpose()))
np.savetxt(
".".join((filename, "csv")),
(gammas.transpose()),
fmt='%.10f',
delimiter=',',
header="Time, gamma_bid LSQ,LSQ,gamma_bid LSQF,gamma_bid ArgMax,gamma_bid RMax1,gamma_ask LSQ,gamma_ask LSQF,gamma_bid ArgMax,gamma_ask RMax1",
comments=""
)
print("Estimators for gamma written......")
print("Creating plot.")
title_str = "Estimated $\gamma_b$ and $\gamma_a$\nTicker: {0}, Date: {1}\nStart Time: {2}s, End Time: {3}s".format(
ticker_str, date_str, str(int(int(time_start) / 1000)), str(int(int(time_end) / 1000)))
lobp.plot_avprofile_gamma(filename, time_ends, gammas[1:5, :], gammas[5:, :],
labels_leg=["LSQ", "LSQF", "ArgMax", "$R_{\infty, 1}$"],
title_str=title_str)
# lobp.plot_avprofile_fittings
print("Plots saved.")
print("Finished.")
def calibrate_profile_lobster_new(
ticker_str,
date_str,
time_start_data,
time_end_data,
time_start_calc,
time_end_calc,
num_levels_data,
num_levels_calc,
time_cal,
cal_method_profile="LSQF",
):
""" Splits the day into time intervals of specified size and fits the model profile to the
average profile of the order book in the respective periods, reading lobster data. In
addition, the average profile for the whole day will be fi -
----------
args:
ticker_str,
date_str,
time_start_data,
time_end_data,
time_start_calc,
time_end_calc,
num_levels_data,
num_levels_calc,
cal_method_profile="LSQF",
time_cal time for averageing in ms
"""
gamma_bids_LSQ = []
gamma_bids_LSQF = []
gamma_bids_ArgMax = []
gamma_bids_RMax1 = []
gamma_asks_LSQ = []
gamma_asks_LSQF = []
gamma_asks_ArgMax = [],
gamma_asks_RMax1 = []
lobreader = lobr.LOBSTERReader(
ticker_str,
date_str,
str(time_start_data),
str(time_end_data),
str(num_levels_data),
str(time_start_calc),
str(time_end_calc)
)
computation_interval = int(time_end_calc) - int(time_start_calc)
num_calibration = computation_interval / int(time_cal)
if num_calibration == 0:
return 0
time_starts = np.arange(int(time_start_calc), int(time_end_calc), int(time_cal))
time_ends = time_starts + int(time_cal)
if time_ends[-1] > time_end_calc:
time_ends[-1] = time_end_calc
for time_start, time_end in zip(time_starts, time_ends):
print("Calibrate")
lobreader.set_timecalc(str(time_start), str(time_end))
filename = lobreader.create_filestr(lobr.AV_ORDERBOOK_FILE_ID, str(num_levels_calc))
av_profile_bid, av_profile_ask = lobr.get_data_from_file(filename)
if (av_profile_bid is None) or (av_profile_ask is None):
av_profile_bid, av_profile_ask = lobreader.average_profile(str(num_levels_calc),
write_outputfile=True)
lobp.plot_av_profile(av_profile_bid, av_profile_ask, filename, ticker_str, date_str,
str(time_start), str(time_end))
tvbid = np.sum(av_profile_bid)
tvask = np.sum(av_profile_ask)
modelLSQ, modelLSQF, modelArgMax, modelRMax1 = cal.fit_profile_to_data(
np.array(av_profile_bid), np.array(av_profile_ask))
modelLSQ.set_modelid(lobreader.create_filestr("Model-LSQ", str(num_levels_calc)))
modelLSQF.set_modelid(lobreader.create_filestr("Model-LSQF", str(num_levels_calc)))
modelArgMax.set_modelid(lobreader.create_filestr("Model-ArgMax", str(num_levels_calc)))
modelRMax1.set_modelid(lobreader.create_filestr("Model-RMax1", str(num_levels_calc)))
models = [modelLSQ, modelLSQF, modelArgMax, modelRMax1]
print("Save model parameters to files")
for model, gamma_bids, gamma_asks in zip(models, (
gamma_bids_LSQ, gamma_bids_LSQF, gamma_bids_ArgMax, gamma_bids_RMax1), (
gamma_asks_LSQ, gamma_asks_LSQF,
gamma_asks_ArgMax,
gamma_asks_RMax1)):
model.savef()
gb, ga = model.get_gamma()
gamma_bids.append(gb)
gamma_asks.append(ga)
print("|--------------------------------------------------------\n|")
print(" Model parameters for modelid %s" % (model.get_modelid()))
print(" gamma_bid: %f, gamma_ask: %f" % (model.get_gamma()))
print(" z0_bid: %f, z0_ask: %f" % (model.get_z0()))
print(" TV_bid: %f, TV_ask: %f" % (tvbid, tvask))
print("|\n|--------------------------------------------------------")
lobp.plot_avprofile_fits(av_profile_bid, av_profile_ask, models,
labels_leg=["data", "LSQ", "LSQF", "ArgMax", "$R_{\infty, 1}$"],
title_str=(
'Average Profile\nTicker: {0}, Date: {1}\n Time: {2} to {3}'.format(
ticker_str, date_str, time_start, time_end)),
filename=lobreader.create_filestr("av-orderbook-fits",
str(num_levels_calc)))
lobreader.set_timecalc(str(time_start_calc), str(time_end_calc))
filename = lobreader.create_filestr("gamma", str(num_levels_calc))
gammas = np.array(
[time_ends, gamma_bids_LSQ, gamma_bids_LSQF, gamma_bids_ArgMax, gamma_bids_RMax1,
gamma_asks_LSQ, gamma_asks_LSQF, gamma_asks_ArgMax, gamma_asks_RMax1])
print(str(gammas.transpose()))
np.savetxt(
".".join((filename, "csv")),
(gammas.transpose()),
fmt='%.10f',
delimiter=',',
header="Time, gamma_bid LSQ,LSQ,gamma_bid LSQF,gamma_bid ArgMax,gamma_bid RMax1,gamma_ask LSQ,gamma_ask LSQF,gamma_bid ArgMax,gamma_ask RMax1",
comments=""
)
print("Estimators for gamma written......")
print("Creating plot.")
title_str = "Estimated $\gamma_b$ and $\gamma_a$\nTicker: {0}, Date: {1}\nStart Time: {2}s, End Time: {3}s".format(
ticker_str, date_str, str(int(int(time_start) / 1000)), str(int(int(time_end) / 1000)))
lobp.plot_avprofile_gamma(filename, time_ends, gammas[1:5, :], gammas[5:, :],
labels_leg=["LSQ", "LSQF", "ArgMax", "$R_{\infty, 1}$"],
title_str=title_str)
# lobp.plot_avprofile_fittings
print("Plots saved.")
print("Finished.")
def extract_volume_lobster(
ticker_str,
date_str,
time_start_data,
time_end_data,
time_start_calc,
time_end_calc,
num_levels_data,
num_levels_calc,
ntimepoints_grid
):
""" Extract the volume process from lobster and save as csv and plots
----------
args:
ticker_str,
date_str,
time_start_data,
time_end_data,
time_start_calc,
time_end_calc,
num_levels_data,
num_levels_calc,
ntimepoints_grid if None, then the process is extracted for all time points in the data, else a uniform grid is created
OUTPUT:
produces files with identifier: volume
"""
# read files from lobster to uniform grid
lobreader = lobr.LOBSTERReader(
ticker_str,
date_str,
str(time_start_data),
str(time_end_data),
str(num_levels_data),
str(time_start_calc),
str(time_end_calc)
)
print('Extracting total volume process.')
dt, time_stamps, volume_bid, volume_ask = lobreader.load_ordervolume(
num_observations=ntimepoints_grid,
write_output=True
)
print('Plotting data')
title_str = "Order volume in first {0} buckets\n ticker: {1}, Date: {2}".format(num_levels_data,
ticker_str,
date_str)
filename = "_".join((ticker_str, date_str, str(time_start_calc), str(time_end_calc),
"ordervolume", str(num_levels_data)))
lobp.plot_bidaskdata(time_stamps, volume_bid, volume_ask, title_str=title_str,
filename=filename)
print("Finished.")
def extract_price_lobster(
ticker_str,
date_str,
time_start_data,
time_end_data,
time_start_calc,
time_end_calc,
num_levels_data,
ntimepoints_grid
):
""" Extract the volume process from lobster and save as csv and plots
----------
args:
ticker_str,
date_str,
time_start_data,
time_end_data,
time_start_calc,
time_end_calc,
num_levels_data,
ntimepoints_grid if None, then the process is extracted for all time points in the data, else a uniform grid is created
OUTPUT:
produces files with identifier: volume
"""
lobreader = lobr.LOBSTERReader(
ticker_str,
date_str,
str(time_start_data),
str(time_end_data),
str(num_levels_data),
str(time_start_calc),
str(time_end_calc)
)
print('Extracting total volume process.')
dt, time_stamps, prices_bid, prices_ask = lobreader.load_prices(ntimepoints_grid,
write_output=True)
print('Plotting data')
title_str = "Order volume in first {0} buckets\n ticker: {1}, Date: {2}".format(num_levels_data,
ticker_str,
date_str)
filename = "_".join((ticker_str, date_str, str(time_start_calc), str(time_end_calc),
"best_prices", str(num_levels_data)))
lobp.plot_bidaskdata(time_stamps, prices_bid, prices_ask, title_str=title_str,
filename=filename)
print("Finished.")
def _prediction_rvar_running_frame(
time_start,
time_discr,
data_price,
data_bid,
data_ask,
num_timepoints_calib,
num_timepoints_recal=1,
latex=False
):
"""
This function creates a price model induced by the mean reverting order book model and calibrates this model on a defined running time fram
----------------
args:
time_start: float time point at which data starts (calibration will start num_timepoints_calib later)
time_discr: float time between 2 time points
data_bid: data bid side (uniform time grid, starting at time_start)
data_ask: data ask side (uniform time grid, starting at time_start)
num_timepoints_calib: number of time points to be used for each calibration
num_timepoints_recal=1: number of time points after which recalibration starts
"""
# Convert to correct data type
time_start = float(time_start)
time_discr = float(time_discr)
num_timepoints_calib = int(num_timepoints_calib)
num_timepoints_recal = int(num_timepoints_recal)
price_volpred_rvar = []
price_volpred_rcg = []
rel_err_rcg = []
rel_err_rvar = []
price_vol = []
# Set up model and calibrator using realized variance
model1 = lobprice.PriceModel(modelid="price-model-rcg", impact_coefficient=1 / float(100.))
cal1 = cal.OrderVolumeCalibrator(
calibratorid="cal-price-model-rcg",
model=model1
)
# Set up model and calibrator using autocorrelation
model2 = lobprice.PriceModel(modelid="price-model-rvar", impact_coefficient=1 / float(100.))
cal2 = cal.OrderVolumeCalibrator(
calibratorid="cal-price-model-rvar",
model=model2,
estimator_dynamics=None,
estimator_dyn_corr=est.estimate_vol_gBM
)
print("Start calibration in time frame")
for ctr_now in range(num_timepoints_calib, len(data_bid), num_timepoints_recal):
# Calibrate
# time_now = time_start + (ctr_start + num_timepoints_calib) * time_discr
# calibrate on frame and set
par = cal2.calibrate(
time_start + ctr_now * time_discr,
time_discr,
data_bid[ctr_now - num_timepoints_calib:ctr_now:],
data_ask[ctr_now - num_timepoints_calib:ctr_now:]
)
par2_bid, par2_ask, rho = cal1.calibrate(
time_start + ctr_now * time_discr,
time_discr,
data_bid[ctr_now - num_timepoints_calib:ctr_now:],
data_ask[ctr_now - num_timepoints_calib:ctr_now:]
)
# add correlation to model 1
# calc price vol via 3 different way
model1.set_rho(model2.get_rho())
price_vol1 = est.estimate_vol_rv(
data_price[ctr_now - num_timepoints_calib:ctr_now:],
(num_timepoints_calib - 1) * time_discr
)
price_vol2 = model1.get_vol()
price_vol3 = model2.get_vol()
price_vol.append(price_vol1)
price_volpred_rcg.append(price_vol2)
price_volpred_rvar.append(price_vol3)
rel_err_rcg.append(math.fabs(price_vol2 - price_vol1) / float(price_vol1))
rel_err_rvar.append(math.fabs(price_vol3 - price_vol1) / float(price_vol1))
progress = (ctr_now - num_timepoints_calib) / float(len(data_bid) - num_timepoints_calib)
sys.stdout.write("\r{0:.1f}%".format(progress * 100))
sys.stdout.flush()
# Save calibration history
sys.stdout.write("\r{0:.1f}%".format(100))
sys.stdout.flush()
print("\n")
results = cal1.history.to_list() + cal2.history.to_list()
results.append(['price_volpred_rcg'] + price_volpred_rcg)
results.append(['price_volpred_rvar'] + price_volpred_rvar)
results.append(['price_vol'] + price_vol)
results.append(['rel_error_rcg'] + rel_err_rcg)
results.append(['rel_error_rvar'] + rel_err_rvar)
return results
def vol_estimation(
ticker_str,
date_str,
time_start_data,
time_end_data,
time_start_calc,
time_end_calc,
num_levels_data,
num_levels_calc,
ntimepoints_grid,
ntimesteps_cal,
ntimesteps_nextcal,
ntimesteps_snapshot=None
):
""" Predicts the volatility of the price by volatility of the log market depths
-----------
args:
ticker_str,
date_str,
time_start_data,
time_end_data,
time_start_calc,
time_end_calc,
num_levels_data,
num_levels_calc,
ntimepoints_grid,
ntimesteps_cal,
ntimesteps_nextcal,
cal_to_average=False calibration to total volume (if False) in the first buckets or average
cal_to_average_classic=False calibration to total volume (if False) in the first buckets or average - by just averaging after extraction
"""
# Step 1: Load data
print("Load data.....")
# read files from lobster to uniform grid
lobreader = lobr.LOBSTERReader(
ticker_str,
date_str,
str(time_start_data),
str(time_end_data),
str(num_levels_data),
str(time_start_calc),
str(time_end_calc)
)
print('Extracting market depth and price processes on uniform grid.')
dt, time_stamps, depth_bid, depth_ask = lobreader.load_marketdepth(
num_observations=ntimepoints_grid,
num_levels_calc_str=str(num_levels_calc),
write_output=False
)
__, __, prices_bid, prices_ask = lobreader.load_prices(ntimepoints_grid, write_output=False)
prices_mid = (prices_bid + prices_ask) / float(2)
# Step 2: Calculation, returns list of lists
print("Data loaded. Start calculations......")
# Create pandas frame from the lists and transpose to column oriented
# time_discr
results = _prediction_rvar_running_frame(
float(time_start_calc) / float(1000),
dt,
prices_mid,
depth_bid,
depth_ask,
ntimesteps_cal,
ntimesteps_nextcal,
latex=False
)
# Step 3: Output
print("Finished. Saving output.")
df = pd.DataFrame(results, columns=None, index=None).transpose()
# df = pd.DataFrame(results, columns=False, index)
filename = lobr.create_lobster_filename(ticker_str, date_str, str(time_start_calc),
str(time_end_calc), "cal-vol-pred",
str(num_levels_calc))
"_".join((ticker_str, date_str, str(time_start_calc), str(time_end_calc), "best-prices",
str(num_levels_data)))
df.to_csv(".".join((filename, "csv")), index=False)
####
# if not (ntimesteps_snapshot is None):
# ind_snapshots = range(0, len(results[0]), ntimesteps_snapshot)
# df[ind_snapshots].tolatex(".".join(filename, "tex"))
print("Data saved in files with name: {}.".format(filename))
| 37.30821
| 252
| 0.610209
| 3,359
| 27,720
| 4.702888
| 0.104198
| 0.040451
| 0.033741
| 0.03545
| 0.806925
| 0.773881
| 0.754827
| 0.74799
| 0.737102
| 0.730835
| 0
| 0.008071
| 0.289286
| 27,720
| 742
| 253
| 37.358491
| 0.793767
| 0.234632
| 0
| 0.681818
| 0
| 0.012987
| 0.112622
| 0.01161
| 0
| 0
| 0
| 0
| 0
| 1
| 0.015152
| false
| 0
| 0.019481
| 0
| 0.041126
| 0.097403
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4d22c3b3fa0906d971a038fd9c161a7f3ecf98ef
| 13,306
|
py
|
Python
|
newrelic_cli/tests/test_synthetics.py
|
NativeInstruments/newrelic-cli
|
abe19b18ec76436912e0662c8fca528f4be2c43d
|
[
"MIT"
] | 26
|
2017-03-14T12:54:23.000Z
|
2020-10-18T21:13:47.000Z
|
newrelic_cli/tests/test_synthetics.py
|
NativeInstruments/newrelic-cli
|
abe19b18ec76436912e0662c8fca528f4be2c43d
|
[
"MIT"
] | 3
|
2017-03-17T11:19:10.000Z
|
2018-01-10T15:02:06.000Z
|
newrelic_cli/tests/test_synthetics.py
|
NativeInstruments/newrelic-cli
|
abe19b18ec76436912e0662c8fca528f4be2c43d
|
[
"MIT"
] | 3
|
2017-12-22T01:12:05.000Z
|
2020-06-13T13:40:54.000Z
|
import base64
from unittest import TestCase
import newrelic_cli.exceptions
from newrelic_cli.synthetics import SyntheticsClient
import requests_mock
@requests_mock.mock()
class NewRelicSyntheticsClientTests(TestCase):
def setUp(self):
super(NewRelicSyntheticsClientTests, self).setUp()
self.client = SyntheticsClient(api_key='dummy_key')
# Number of pre-cooked values we will use in most of the tests
self.monitor_name = 'I am a monitor without an alert condition'
self.monitor_id = 'aff9f4f2-57b9-49de-9f88-83efa059bca4'
self.first_monitor = {
'id': self.monitor_id,
'name': self.monitor_name,
'type': 'SCRIPT_API',
'frequency': 10,
'locations': [
'AWS_US_WEST_1',
],
'status': 'ENABLED',
'slaThreshold': 7,
'options': {},
"modifiedAt": "2017-01-05T15:51:54.603+0000",
"createdAt": "2017-01-02T16:33:50.160+0000",
"userId": 0,
"apiVersion": "0.4.1"
}
self.second_monitor = {
'id': '4f1ee15f-ed13-47ba-9ab8-6ce1ac27a8ba',
'name': 'You never need me alone',
'type': 'SCRIPT_API',
'frequency': 10,
'locations': [
'AWS_US_WEST_1',
],
'status': 'ENABLED',
'slaThreshold': 7,
'options': {},
"modifiedAt": "2017-01-05T15:51:54.603+0000",
"createdAt": "2017-01-02T16:33:50.160+0000",
"userId": 0,
"apiVersion": "0.4.1"
}
self.all_monitors_response = {
'monitors': [
self.first_monitor,
self.second_monitor
]
}
self.monitor_location = '{}/v3/monitors/{}'.format(
self.client.base_url,
self.monitor_id
)
self.monitor_script_plain = (
'console.log("Hello, Username!");'
)
self.monitor_script_base64 = (
base64.b64encode(self.monitor_script_plain)
)
def test_get_monitor_by_id_success(self, mock):
monitors_url = '{}/v3/monitors'.format(self.client.base_url)
mock.get(
url=monitors_url,
status_code=200,
json=self.all_monitors_response
)
res = self.client.get_monitor_by_id(self.first_monitor['id'])
self.assertDictEqual(self.first_monitor, res)
def test_get_nonexistent_monitor_by_id(self, mock):
response = {'monitors': []}
monitors_url = '{}/v3/monitors'.format(self.client.base_url)
mock.get(
url=monitors_url,
status_code=200,
json=response
)
res = self.client.get_monitor_by_id(self.first_monitor['id'])
self.assertIsNone(res)
def test_create_monitor_success(self, mock):
create_monitor_endpoint = '{}/v3/monitors'.format(self.client.base_url)
mock.post(
url=create_monitor_endpoint,
status_code=201,
headers={'Location': self.monitor_location}
)
mock.get(
url=self.monitor_location,
status_code=200,
json=self.first_monitor
)
r = self.client.create_monitor(self.monitor_name)
self.assertDictEqual(r, self.first_monitor)
def test_create_monitor_with_sla_success(self, mock):
create_monitor_endpoint = '{}/v3/monitors'.format(self.client.base_url)
monitor_data = self.first_monitor
monitor_data['slaThreshold'] = 42
mock.post(
url=create_monitor_endpoint,
status_code=201,
headers={'Location': self.monitor_location}
)
mock.get(
url=self.monitor_location,
status_code=200,
json=monitor_data
)
r = self.client.create_monitor(self.monitor_name, slaThreshold=42)
self.assertDictEqual(r, self.first_monitor)
def test_update_nonexistent_monitor(self, mock):
get_all_monitors_endpoint = '{}/v3/monitors'.format(
self.client.base_url
)
monitor_endpooint = '{}/v3/monitors/{}'.format(
self.client.base_url,
self.monitor_id
)
# we don't have any monitors
monitors_list = {'monitors': []}
mock.get(
url=get_all_monitors_endpoint,
status_code=200,
json=monitors_list
)
mock.put(
url=monitor_endpooint,
status_code=204
)
# we don't need any more endpoints
with self.assertRaisesRegexp(
newrelic_cli.exceptions.ItemNotFoundError,
'{}'.format(self.monitor_name)
):
self.client.update_monitor(
current_name=self.monitor_name,
new_name='I am changed name'
)
def test_update_monitor_name(self, mock):
get_all_monitors_endpoint = '{}/v3/monitors'.format(
self.client.base_url
)
monitor_endpooint = '{}/v3/monitors/{}'.format(
self.client.base_url,
self.monitor_id
)
mock.put(
url=monitor_endpooint,
status_code=204
)
mock.get(
url=get_all_monitors_endpoint,
status_code=200,
json=self.all_monitors_response
)
mock.get(
url=self.monitor_location,
status_code=200,
json=self.first_monitor
)
# We don't expect any response here.
# Just make sure no exceptions raised
self.client.update_monitor(
current_name=self.monitor_name,
new_name='I am changed name'
)
def test_update_monitor_everything_but_name(self, mock):
get_all_monitors_endpoint = '{}/v3/monitors'.format(
self.client.base_url
)
monitor_endpooint = '{}/v3/monitors/{}'.format(
self.client.base_url,
self.monitor_id
)
mock.put(
url=monitor_endpooint,
status_code=204
)
mock.get(
url=get_all_monitors_endpoint,
status_code=200,
json=self.all_monitors_response
)
mock.get(
url=self.monitor_location,
status_code=200,
json=self.first_monitor
)
# We don't expect any response here.
# Just make sure no exceptions raised
self.client.update_monitor(
self.monitor_name,
frequency=42,
locations=['AWS_US_EAST_1'],
status='DISABLED',
monitor_type='SIMPLE',
slaThreshold=8
)
def test_update_monitor_no_changes(self, mock):
get_all_monitors_endpoint = '{}/v3/monitors'.format(
self.client.base_url
)
monitor_endpooint = '{}/v3/monitors/{}'.format(
self.client.base_url,
self.monitor_id
)
mock.put(
url=monitor_endpooint,
status_code=204
)
mock.get(
url=get_all_monitors_endpoint,
status_code=200,
json=self.all_monitors_response
)
mock.get(
url=self.monitor_location,
status_code=200,
json=self.first_monitor
)
with self.assertRaisesRegexp(
newrelic_cli.exceptions.NewRelicException,
'No changes requested.'
):
self.client.update_monitor(
current_name=self.monitor_name
)
def test_delete_monitor_success(self, mock):
get_all_monitors_endpoint = '{}/v3/monitors'.format(
self.client.base_url
)
monitor_endpooint = '{}/v3/monitors/{}'.format(
self.client.base_url,
self.monitor_id
)
mock.get(
url=get_all_monitors_endpoint,
status_code=200,
json=self.all_monitors_response
)
mock.delete(
url=monitor_endpooint,
status_code=204
)
# We don't expect any response here.
# Just make sure no exceptions raised
self.client.delete_monitor(self.monitor_name)
def test_delete_nonexistent_monitor(self, mock):
get_all_monitors_endpoint = '{}/v3/monitors'.format(
self.client.base_url
)
monitor_endpooint = '{}/v3/monitors/{}'.format(
self.client.base_url,
self.monitor_id
)
# we don't have any monitors
monitors_list = {'monitors': []}
mock.get(
url=get_all_monitors_endpoint,
status_code=200,
json=monitors_list
)
mock.delete(
url=monitor_endpooint,
status_code=204
)
with self.assertRaisesRegexp(
newrelic_cli.exceptions.ItemNotFoundError,
'{}'.format(self.monitor_name)
):
self.client.delete_monitor(self.monitor_name)
def test_get_monitor_script_success(self, mock):
get_all_monitors_endpoint = '{}/v3/monitors'.format(
self.client.base_url
)
monitor_script_endpoint = '{}/v3/monitors/{}/script'.format(
self.client.base_url,
self.monitor_id
)
mock.get(
url=get_all_monitors_endpoint,
status_code=200,
json=self.all_monitors_response
)
mock.get(
url=monitor_script_endpoint,
status_code=200,
json={'scriptText': self.monitor_script_base64}
)
script = self.client.get_monitor_script(self.monitor_name)
self.assertEquals(self.monitor_script_plain, script)
def test_get_nonexistent_monitor_script(self, mock):
get_all_monitors_endpoint = '{}/v3/monitors'.format(
self.client.base_url
)
monitor_script_endpoint = '{}/v3/monitors/{}/script'.format(
self.client.base_url,
self.monitor_id
)
# we don't have any monitors
monitors_list = {'monitors': []}
mock.get(
url=get_all_monitors_endpoint,
status_code=200,
json=monitors_list
)
mock.get(
url=monitor_script_endpoint,
status_code=200,
json={'scriptText': self.monitor_script_base64}
)
with self.assertRaisesRegexp(
newrelic_cli.exceptions.ItemNotFoundError,
'{}'.format(self.monitor_name)
):
self.client.get_monitor_script(self.monitor_name)
def test_get_monitor_without_script(self, mock):
get_all_monitors_endpoint = '{}/v3/monitors'.format(
self.client.base_url
)
monitor_script_endpoint = '{}/v3/monitors/{}/script'.format(
self.client.base_url,
self.monitor_id
)
mock.get(
url=get_all_monitors_endpoint,
status_code=200,
json=self.all_monitors_response
)
mock.get(
url=monitor_script_endpoint,
status_code=404,
)
with self.assertRaisesRegexp(
newrelic_cli.exceptions.ItemNotFoundError,
'{}'.format(self.monitor_name)
):
self.client.get_monitor_script(self.monitor_name)
def test_upload_monitor_script_success(self, mock):
get_all_monitors_endpoint = '{}/v3/monitors'.format(
self.client.base_url
)
monitor_script_endpoint = '{}/v3/monitors/{}/script'.format(
self.client.base_url,
self.monitor_id
)
mock.get(
url=get_all_monitors_endpoint,
status_code=200,
json=self.all_monitors_response
)
mock.put(
url=monitor_script_endpoint,
status_code=204
)
# We don't expect any response here.
# Just make sure no exceptions raised
self.client.upload_monitor_script(
self.monitor_name,
self.monitor_script_plain
)
def test_upload_nonexistent_monitor_script(self, mock):
get_all_monitors_endpoint = '{}/v3/monitors'.format(
self.client.base_url
)
monitor_script_endpoint = '{}/v3/monitors/{}/script'.format(
self.client.base_url,
self.monitor_id
)
# we don't have any monitors
monitors_list = {'monitors': []}
mock.get(
url=get_all_monitors_endpoint,
status_code=200,
json=monitors_list
)
mock.put(
url=monitor_script_endpoint,
status_code=204
)
# We don't expect any response here.
# Just make sure no exceptions raised
with self.assertRaisesRegexp(
newrelic_cli.exceptions.ItemNotFoundError,
'{}'.format(self.monitor_name)
):
self.client.upload_monitor_script(
self.monitor_name,
self.monitor_script_plain
)
| 31.985577
| 79
| 0.56358
| 1,385
| 13,306
| 5.13935
| 0.118412
| 0.077269
| 0.060691
| 0.075864
| 0.835909
| 0.82748
| 0.812588
| 0.810761
| 0.773672
| 0.749368
| 0
| 0.030994
| 0.340448
| 13,306
| 415
| 80
| 32.062651
| 0.780082
| 0.041786
| 0
| 0.657754
| 0
| 0
| 0.092358
| 0.023875
| 0
| 0
| 0
| 0
| 0.029412
| 1
| 0.042781
| false
| 0
| 0.013369
| 0
| 0.058824
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1276bfefb6ecdda5568b4229c1b2ad4c01ec300f
| 81
|
py
|
Python
|
synthqc/__init__.py
|
jcreinhold/synthqc
|
a08d4c925a81c397195fb87f6691893858fc0792
|
[
"Apache-2.0"
] | null | null | null |
synthqc/__init__.py
|
jcreinhold/synthqc
|
a08d4c925a81c397195fb87f6691893858fc0792
|
[
"Apache-2.0"
] | null | null | null |
synthqc/__init__.py
|
jcreinhold/synthqc
|
a08d4c925a81c397195fb87f6691893858fc0792
|
[
"Apache-2.0"
] | null | null | null |
from .errors import *
from .plot import *
from .util import *
from . import exec
| 16.2
| 21
| 0.716049
| 12
| 81
| 4.833333
| 0.5
| 0.517241
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.197531
| 81
| 4
| 22
| 20.25
| 0.892308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1294f07399e4ab473e34cea80690fd5828ac34c7
| 1,215
|
py
|
Python
|
activity_recognition/spatial_transforms.py
|
jmhuer/computer-vision-framework
|
f58ad9db765abb7b9ab78c7ead18c7c812e29061
|
[
"MIT"
] | 1
|
2020-12-12T17:56:25.000Z
|
2020-12-12T17:56:25.000Z
|
activity_recognition/spatial_transforms.py
|
jmhuer/computer-vision-framework
|
f58ad9db765abb7b9ab78c7ead18c7c812e29061
|
[
"MIT"
] | 1
|
2021-10-21T11:23:06.000Z
|
2021-10-21T11:23:06.000Z
|
activity_recognition/spatial_transforms.py
|
guilhermesurek/computer-vision-framework
|
508c8efe0bf4d983d533f0547210b2732d5e9620
|
[
"MIT"
] | null | null | null |
from torchvision.transforms import transforms
# spatial transformations
class Compose(transforms.Compose):
def randomize_parameters(self):
for t in self.transforms:
t.randomize_parameters()
class ToTensor(transforms.ToTensor):
def randomize_parameters(self):
pass
class Normalize(transforms.Normalize):
def randomize_parameters(self):
pass
class ScaleValue(object):
def __init__(self, s):
self.s = s
def __call__(self, tensor):
tensor *= self.s
return tensor
def randomize_parameters(self):
pass
class Resize(transforms.Resize):
def randomize_parameters(self):
pass
class Scale(transforms.Scale):
def randomize_parameters(self):
pass
class CenterCrop(transforms.CenterCrop):
def randomize_parameters(self):
pass
def get_normalize_method(mean, std, no_mean_norm, no_std_norm):
if no_mean_norm:
if no_std_norm:
return Normalize([0, 0, 0], [1, 1, 1])
else:
return Normalize([0, 0, 0], std)
else:
if no_std_norm:
return Normalize(mean, [1, 1, 1])
else:
return Normalize(mean, std)
| 19.285714
| 63
| 0.63786
| 143
| 1,215
| 5.223776
| 0.265734
| 0.203481
| 0.206158
| 0.243641
| 0.410977
| 0.362784
| 0
| 0
| 0
| 0
| 0
| 0.01359
| 0.273251
| 1,215
| 63
| 64
| 19.285714
| 0.83239
| 0.01893
| 0
| 0.461538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25641
| false
| 0.153846
| 0.025641
| 0
| 0.589744
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
129b364be29a0e338d4ed5b25d4c30b79558fbfc
| 46
|
py
|
Python
|
deepclustering/loss/__init__.py
|
jizongFox/deep-clustering-toolbox
|
0721cbbb278af027409ed4c115ccc743b6daed1b
|
[
"MIT"
] | 34
|
2019-08-05T03:48:36.000Z
|
2022-03-29T03:04:51.000Z
|
deepclustering/loss/__init__.py
|
jizongFox/deep-clustering-toolbox
|
0721cbbb278af027409ed4c115ccc743b6daed1b
|
[
"MIT"
] | 10
|
2019-05-03T21:02:50.000Z
|
2021-12-23T08:01:30.000Z
|
deepclustering/loss/__init__.py
|
ETS-Research-Repositories/deep-clustering-toolbox
|
0721cbbb278af027409ed4c115ccc743b6daed1b
|
[
"MIT"
] | 5
|
2019-09-29T07:56:03.000Z
|
2021-04-22T12:08:50.000Z
|
# from .loss import *
from .kl_losses import *
| 23
| 24
| 0.717391
| 7
| 46
| 4.571429
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 46
| 2
| 24
| 23
| 0.842105
| 0.413043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
12c603893176173477a100454470acdbacfd6ddc
| 226
|
py
|
Python
|
lib/oeqa/runtime/cases/rubygems_rubygems_puppet_resource_api.py
|
tuxable-ltd/meta-rubygems
|
e80630e79b64e1be8339e1add0ab07644ec99425
|
[
"BSD-2-Clause"
] | null | null | null |
lib/oeqa/runtime/cases/rubygems_rubygems_puppet_resource_api.py
|
tuxable-ltd/meta-rubygems
|
e80630e79b64e1be8339e1add0ab07644ec99425
|
[
"BSD-2-Clause"
] | 141
|
2021-02-04T16:22:13.000Z
|
2022-03-27T08:29:40.000Z
|
lib/oeqa/runtime/cases/rubygems_rubygems_puppet_resource_api.py
|
tuxable-ltd/meta-rubygems
|
e80630e79b64e1be8339e1add0ab07644ec99425
|
[
"BSD-2-Clause"
] | 3
|
2021-02-04T14:02:01.000Z
|
2022-02-02T16:46:52.000Z
|
from rubygems_utils import RubyGemsTestUtils
class RubyGemsTestrubygems_puppet_resource_api(RubyGemsTestUtils):
def test_gem_list_rubygems_puppet_resource_api(self):
self.gem_is_installed("puppet-resource_api")
| 28.25
| 66
| 0.840708
| 27
| 226
| 6.555556
| 0.62963
| 0.237288
| 0.288136
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106195
| 226
| 7
| 67
| 32.285714
| 0.876238
| 0
| 0
| 0
| 0
| 0
| 0.084444
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
12c788d826bf22a93264f50a5216b5d45387274a
| 34
|
py
|
Python
|
cm/app/api_v1/my_calculation_module_directory/__init__.py
|
HotMaps/CM_DH_Test_CREM
|
5c0a2f218aba36ff53c94e1cce6d979d79cd30af
|
[
"Apache-2.0"
] | null | null | null |
cm/app/api_v1/my_calculation_module_directory/__init__.py
|
HotMaps/CM_DH_Test_CREM
|
5c0a2f218aba36ff53c94e1cce6d979d79cd30af
|
[
"Apache-2.0"
] | null | null | null |
cm/app/api_v1/my_calculation_module_directory/__init__.py
|
HotMaps/CM_DH_Test_CREM
|
5c0a2f218aba36ff53c94e1cce6d979d79cd30af
|
[
"Apache-2.0"
] | null | null | null |
from AD import *
from CM import *
| 11.333333
| 16
| 0.705882
| 6
| 34
| 4
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.235294
| 34
| 3
| 17
| 11.333333
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
12e1f37a845120171a5a2b2220450a9a64ae60bb
| 72
|
py
|
Python
|
sqlenergy/core/__init__.py
|
kjph/sqlenergy
|
f58c88f74f0f93be128acabb9750d6e6a8178fdb
|
[
"MIT"
] | null | null | null |
sqlenergy/core/__init__.py
|
kjph/sqlenergy
|
f58c88f74f0f93be128acabb9750d6e6a8178fdb
|
[
"MIT"
] | null | null | null |
sqlenergy/core/__init__.py
|
kjph/sqlenergy
|
f58c88f74f0f93be128acabb9750d6e6a8178fdb
|
[
"MIT"
] | null | null | null |
from . import fetchInputs
from . import hquery
from . import TimeSeries
| 18
| 25
| 0.791667
| 9
| 72
| 6.333333
| 0.555556
| 0.526316
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 72
| 3
| 26
| 24
| 0.95
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
12ea1901b5a40b736434978b145a6d9110a16470
| 37,911
|
py
|
Python
|
yang/binding_topology.py
|
rvilalta/OFC2019_SC472
|
c0bcbd05bb6c90eb9d8ab5abdc10b04d65a8a5d3
|
[
"Apache-2.0"
] | 2
|
2018-11-28T15:03:08.000Z
|
2019-02-04T15:19:58.000Z
|
yang/binding_topology.py
|
rvilalta/OFC2019_SC472
|
c0bcbd05bb6c90eb9d8ab5abdc10b04d65a8a5d3
|
[
"Apache-2.0"
] | null | null | null |
yang/binding_topology.py
|
rvilalta/OFC2019_SC472
|
c0bcbd05bb6c90eb9d8ab5abdc10b04d65a8a5d3
|
[
"Apache-2.0"
] | 2
|
2021-09-28T15:31:03.000Z
|
2021-11-16T17:53:59.000Z
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class yc_port_topology__topology_node_port(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module topology - based on the path /topology/node/port. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_path_helper', '_extmethods', '__port_id','__layer_protocol_name',)
_yang_name = 'port'
_yang_namespace = 'urn:topology'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__port_id = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="port-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:topology', defining_module='topology', yang_type='string', is_config=True)
self.__layer_protocol_name = YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ETH': {}, 'OPTICAL': {}},), is_leaf=True, yang_name="layer-protocol-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:topology', defining_module='topology', yang_type='layer-protocol-name', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['topology', 'node', 'port']
def _get_port_id(self):
"""
Getter method for port_id, mapped from YANG variable /topology/node/port/port_id (string)
"""
return self.__port_id
def _set_port_id(self, v, load=False):
"""
Setter method for port_id, mapped from YANG variable /topology/node/port/port_id (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_port_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_port_id() directly.
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name="port-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:topology', defining_module='topology', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """port_id must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="port-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:topology', defining_module='topology', yang_type='string', is_config=True)""",
})
self.__port_id = t
if hasattr(self, '_set'):
self._set()
def _unset_port_id(self):
self.__port_id = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="port-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:topology', defining_module='topology', yang_type='string', is_config=True)
def _get_layer_protocol_name(self):
"""
Getter method for layer_protocol_name, mapped from YANG variable /topology/node/port/layer_protocol_name (layer-protocol-name)
"""
return self.__layer_protocol_name
def _set_layer_protocol_name(self, v, load=False):
"""
Setter method for layer_protocol_name, mapped from YANG variable /topology/node/port/layer_protocol_name (layer-protocol-name)
If this variable is read-only (config: false) in the
source YANG file, then _set_layer_protocol_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_layer_protocol_name() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ETH': {}, 'OPTICAL': {}},), is_leaf=True, yang_name="layer-protocol-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:topology', defining_module='topology', yang_type='layer-protocol-name', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """layer_protocol_name must be of a type compatible with layer-protocol-name""",
'defined-type': "topology:layer-protocol-name",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ETH': {}, 'OPTICAL': {}},), is_leaf=True, yang_name="layer-protocol-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:topology', defining_module='topology', yang_type='layer-protocol-name', is_config=True)""",
})
self.__layer_protocol_name = t
if hasattr(self, '_set'):
self._set()
def _unset_layer_protocol_name(self):
self.__layer_protocol_name = YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ETH': {}, 'OPTICAL': {}},), is_leaf=True, yang_name="layer-protocol-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:topology', defining_module='topology', yang_type='layer-protocol-name', is_config=True)
port_id = __builtin__.property(_get_port_id, _set_port_id)
layer_protocol_name = __builtin__.property(_get_layer_protocol_name, _set_layer_protocol_name)
_pyangbind_elements = OrderedDict([('port_id', port_id), ('layer_protocol_name', layer_protocol_name), ])
class yc_node_topology__topology_node(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module topology - based on the path /topology/node. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_path_helper', '_extmethods', '__node_id','__port',)
_yang_name = 'node'
_yang_namespace = 'urn:topology'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__node_id = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="node-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:topology', defining_module='topology', yang_type='string', is_config=True)
self.__port = YANGDynClass(base=YANGListType("port_id",yc_port_topology__topology_node_port, yang_name="port", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='port-id', extensions=None), is_container='list', yang_name="port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='urn:topology', defining_module='topology', yang_type='list', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['topology', 'node']
def _get_node_id(self):
"""
Getter method for node_id, mapped from YANG variable /topology/node/node_id (string)
"""
return self.__node_id
def _set_node_id(self, v, load=False):
"""
Setter method for node_id, mapped from YANG variable /topology/node/node_id (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_node_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_node_id() directly.
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name="node-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:topology', defining_module='topology', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """node_id must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="node-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:topology', defining_module='topology', yang_type='string', is_config=True)""",
})
self.__node_id = t
if hasattr(self, '_set'):
self._set()
def _unset_node_id(self):
self.__node_id = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="node-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:topology', defining_module='topology', yang_type='string', is_config=True)
def _get_port(self):
"""
Getter method for port, mapped from YANG variable /topology/node/port (list)
"""
return self.__port
def _set_port(self, v, load=False):
"""
Setter method for port, mapped from YANG variable /topology/node/port (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_port is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_port() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("port_id",yc_port_topology__topology_node_port, yang_name="port", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='port-id', extensions=None), is_container='list', yang_name="port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='urn:topology', defining_module='topology', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """port must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("port_id",yc_port_topology__topology_node_port, yang_name="port", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='port-id', extensions=None), is_container='list', yang_name="port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='urn:topology', defining_module='topology', yang_type='list', is_config=True)""",
})
self.__port = t
if hasattr(self, '_set'):
self._set()
def _unset_port(self):
self.__port = YANGDynClass(base=YANGListType("port_id",yc_port_topology__topology_node_port, yang_name="port", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='port-id', extensions=None), is_container='list', yang_name="port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='urn:topology', defining_module='topology', yang_type='list', is_config=True)
node_id = __builtin__.property(_get_node_id, _set_node_id)
port = __builtin__.property(_get_port, _set_port)
_pyangbind_elements = OrderedDict([('node_id', node_id), ('port', port), ])
class yc_link_topology__topology_link(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module topology - based on the path /topology/link. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_path_helper', '_extmethods', '__link_id','__source_node','__target_node','__source_port','__target_port',)
_yang_name = 'link'
_yang_namespace = 'urn:topology'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__link_id = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="link-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:topology', defining_module='topology', yang_type='string', is_config=True)
self.__source_node = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="source-node", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:topology', defining_module='topology', yang_type='leafref', is_config=True)
self.__target_node = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="target-node", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:topology', defining_module='topology', yang_type='leafref', is_config=True)
self.__source_port = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="source-port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:topology', defining_module='topology', yang_type='leafref', is_config=True)
self.__target_port = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="target-port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:topology', defining_module='topology', yang_type='leafref', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['topology', 'link']
def _get_link_id(self):
"""
Getter method for link_id, mapped from YANG variable /topology/link/link_id (string)
"""
return self.__link_id
def _set_link_id(self, v, load=False):
"""
Setter method for link_id, mapped from YANG variable /topology/link/link_id (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_link_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_link_id() directly.
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name="link-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:topology', defining_module='topology', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """link_id must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="link-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:topology', defining_module='topology', yang_type='string', is_config=True)""",
})
self.__link_id = t
if hasattr(self, '_set'):
self._set()
def _unset_link_id(self):
self.__link_id = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="link-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:topology', defining_module='topology', yang_type='string', is_config=True)
def _get_source_node(self):
"""
Getter method for source_node, mapped from YANG variable /topology/link/source_node (leafref)
"""
return self.__source_node
def _set_source_node(self, v, load=False):
"""
Setter method for source_node, mapped from YANG variable /topology/link/source_node (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_source_node is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_source_node() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name="source-node", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:topology', defining_module='topology', yang_type='leafref', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """source_node must be of a type compatible with leafref""",
'defined-type': "leafref",
'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="source-node", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:topology', defining_module='topology', yang_type='leafref', is_config=True)""",
})
self.__source_node = t
if hasattr(self, '_set'):
self._set()
def _unset_source_node(self):
self.__source_node = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="source-node", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:topology', defining_module='topology', yang_type='leafref', is_config=True)
def _get_target_node(self):
"""
Getter method for target_node, mapped from YANG variable /topology/link/target_node (leafref)
"""
return self.__target_node
def _set_target_node(self, v, load=False):
"""
Setter method for target_node, mapped from YANG variable /topology/link/target_node (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_target_node is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_target_node() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name="target-node", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:topology', defining_module='topology', yang_type='leafref', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """target_node must be of a type compatible with leafref""",
'defined-type': "leafref",
'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="target-node", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:topology', defining_module='topology', yang_type='leafref', is_config=True)""",
})
self.__target_node = t
if hasattr(self, '_set'):
self._set()
def _unset_target_node(self):
self.__target_node = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="target-node", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:topology', defining_module='topology', yang_type='leafref', is_config=True)
def _get_source_port(self):
"""
Getter method for source_port, mapped from YANG variable /topology/link/source_port (leafref)
"""
return self.__source_port
def _set_source_port(self, v, load=False):
"""
Setter method for source_port, mapped from YANG variable /topology/link/source_port (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_source_port is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_source_port() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name="source-port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:topology', defining_module='topology', yang_type='leafref', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """source_port must be of a type compatible with leafref""",
'defined-type': "leafref",
'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="source-port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:topology', defining_module='topology', yang_type='leafref', is_config=True)""",
})
self.__source_port = t
if hasattr(self, '_set'):
self._set()
def _unset_source_port(self):
self.__source_port = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="source-port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:topology', defining_module='topology', yang_type='leafref', is_config=True)
def _get_target_port(self):
"""
Getter method for target_port, mapped from YANG variable /topology/link/target_port (leafref)
"""
return self.__target_port
def _set_target_port(self, v, load=False):
"""
Setter method for target_port, mapped from YANG variable /topology/link/target_port (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_target_port is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_target_port() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name="target-port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:topology', defining_module='topology', yang_type='leafref', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """target_port must be of a type compatible with leafref""",
'defined-type': "leafref",
'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="target-port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:topology', defining_module='topology', yang_type='leafref', is_config=True)""",
})
self.__target_port = t
if hasattr(self, '_set'):
self._set()
def _unset_target_port(self):
self.__target_port = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="target-port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:topology', defining_module='topology', yang_type='leafref', is_config=True)
link_id = __builtin__.property(_get_link_id, _set_link_id)
source_node = __builtin__.property(_get_source_node, _set_source_node)
target_node = __builtin__.property(_get_target_node, _set_target_node)
source_port = __builtin__.property(_get_source_port, _set_source_port)
target_port = __builtin__.property(_get_target_port, _set_target_port)
_pyangbind_elements = OrderedDict([('link_id', link_id), ('source_node', source_node), ('target_node', target_node), ('source_port', source_port), ('target_port', target_port), ])
class yc_topology_topology__topology(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module topology - based on the path /topology. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_path_helper', '_extmethods', '__node','__link',)
_yang_name = 'topology'
_yang_namespace = 'urn:topology'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__node = YANGDynClass(base=YANGListType("node_id",yc_node_topology__topology_node, yang_name="node", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='node-id', extensions=None), is_container='list', yang_name="node", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='urn:topology', defining_module='topology', yang_type='list', is_config=True)
self.__link = YANGDynClass(base=YANGListType("link_id",yc_link_topology__topology_link, yang_name="link", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='link-id', extensions=None), is_container='list', yang_name="link", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='urn:topology', defining_module='topology', yang_type='list', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['topology']
def _get_node(self):
"""
Getter method for node, mapped from YANG variable /topology/node (list)
"""
return self.__node
def _set_node(self, v, load=False):
"""
Setter method for node, mapped from YANG variable /topology/node (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_node is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_node() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("node_id",yc_node_topology__topology_node, yang_name="node", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='node-id', extensions=None), is_container='list', yang_name="node", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='urn:topology', defining_module='topology', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """node must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("node_id",yc_node_topology__topology_node, yang_name="node", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='node-id', extensions=None), is_container='list', yang_name="node", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='urn:topology', defining_module='topology', yang_type='list', is_config=True)""",
})
self.__node = t
if hasattr(self, '_set'):
self._set()
def _unset_node(self):
self.__node = YANGDynClass(base=YANGListType("node_id",yc_node_topology__topology_node, yang_name="node", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='node-id', extensions=None), is_container='list', yang_name="node", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='urn:topology', defining_module='topology', yang_type='list', is_config=True)
def _get_link(self):
"""
Getter method for link, mapped from YANG variable /topology/link (list)
"""
return self.__link
def _set_link(self, v, load=False):
"""
Setter method for link, mapped from YANG variable /topology/link (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_link is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_link() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("link_id",yc_link_topology__topology_link, yang_name="link", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='link-id', extensions=None), is_container='list', yang_name="link", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='urn:topology', defining_module='topology', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """link must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("link_id",yc_link_topology__topology_link, yang_name="link", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='link-id', extensions=None), is_container='list', yang_name="link", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='urn:topology', defining_module='topology', yang_type='list', is_config=True)""",
})
self.__link = t
if hasattr(self, '_set'):
self._set()
def _unset_link(self):
self.__link = YANGDynClass(base=YANGListType("link_id",yc_link_topology__topology_link, yang_name="link", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='link-id', extensions=None), is_container='list', yang_name="link", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='urn:topology', defining_module='topology', yang_type='list', is_config=True)
node = __builtin__.property(_get_node, _set_node)
link = __builtin__.property(_get_link, _set_link)
_pyangbind_elements = OrderedDict([('node', node), ('link', link), ])
class topology(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module topology - based on the path /topology. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Basic example of network topology
"""
__slots__ = ('_path_helper', '_extmethods', '__topology',)
_yang_name = 'topology'
_yang_namespace = 'urn:topology'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__topology = YANGDynClass(base=yc_topology_topology__topology, is_container='container', yang_name="topology", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='urn:topology', defining_module='topology', yang_type='container', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return []
def _get_topology(self):
"""
Getter method for topology, mapped from YANG variable /topology (container)
"""
return self.__topology
def _set_topology(self, v, load=False):
"""
Setter method for topology, mapped from YANG variable /topology (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_topology is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_topology() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=yc_topology_topology__topology, is_container='container', yang_name="topology", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='urn:topology', defining_module='topology', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """topology must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=yc_topology_topology__topology, is_container='container', yang_name="topology", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='urn:topology', defining_module='topology', yang_type='container', is_config=True)""",
})
self.__topology = t
if hasattr(self, '_set'):
self._set()
def _unset_topology(self):
self.__topology = YANGDynClass(base=yc_topology_topology__topology, is_container='container', yang_name="topology", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='urn:topology', defining_module='topology', yang_type='container', is_config=True)
topology = __builtin__.property(_get_topology, _set_topology)
_pyangbind_elements = OrderedDict([('topology', topology), ])
| 53.022378
| 493
| 0.714357
| 5,110
| 37,911
| 5.005088
| 0.037182
| 0.050829
| 0.061855
| 0.042227
| 0.918596
| 0.898538
| 0.895566
| 0.89107
| 0.882077
| 0.875196
| 0
| 0.001107
| 0.165968
| 37,911
| 714
| 494
| 53.096639
| 0.807774
| 0.161563
| 0
| 0.656818
| 0
| 0.027273
| 0.261022
| 0.073757
| 0
| 0
| 0
| 0
| 0
| 1
| 0.104545
| false
| 0
| 0.034091
| 0
| 0.284091
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
421580dce2736cfde8f3d4c754f0cb4b7e86802f
| 56
|
py
|
Python
|
once-for-all-GM/filereader/__init__.py
|
skhu101/GM-NAS
|
cf2c8f8201690d929ec1d286c7f7cbc53e76012b
|
[
"MIT"
] | 4
|
2022-03-17T09:06:42.000Z
|
2022-03-24T02:38:01.000Z
|
once-for-all-GM/filereader/__init__.py
|
skhu101/GM-NAS
|
cf2c8f8201690d929ec1d286c7f7cbc53e76012b
|
[
"MIT"
] | null | null | null |
once-for-all-GM/filereader/__init__.py
|
skhu101/GM-NAS
|
cf2c8f8201690d929ec1d286c7f7cbc53e76012b
|
[
"MIT"
] | null | null | null |
from .direct_reader import *
from .lmdb_reader import *
| 18.666667
| 28
| 0.785714
| 8
| 56
| 5.25
| 0.625
| 0.571429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 56
| 2
| 29
| 28
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
421a22ecbf36db7ef977c1b87ebae6ce0d646b65
| 10,948
|
py
|
Python
|
psec/pinblock.py
|
knovichikhin/psec
|
6b549c650cde21191539cdfc6a9d95e99f9268fc
|
[
"MIT"
] | 7
|
2021-07-04T13:56:58.000Z
|
2022-02-22T10:46:02.000Z
|
psec/pinblock.py
|
knovichikhin/psec
|
6b549c650cde21191539cdfc6a9d95e99f9268fc
|
[
"MIT"
] | 2
|
2021-06-04T02:25:36.000Z
|
2021-08-08T17:21:44.000Z
|
psec/pinblock.py
|
knovichikhin/psec
|
6b549c650cde21191539cdfc6a9d95e99f9268fc
|
[
"MIT"
] | null | null | null |
r"""PIN blocks are data blocks that contain PIN, pad characters and sometimes
other additional information, such as the leght of the PIN.
"""
import binascii as _binascii
import secrets as _secrets
from psec import tools as _tools
__all__ = [
"encode_pinblock_iso_0",
"encode_pinblock_iso_2",
"encode_pinblock_iso_3",
"decode_pinblock_iso_0",
"decode_pinblock_iso_2",
"decode_pinblock_iso_3",
]
def encode_pinblock_iso_0(pin: str, pan: str) -> bytes:
r"""Encode ISO 9564 PIN block format 0 aka ANSI PIN block.
ISO format 0 PIN block is an 8 byte value that consits of
- Control field. A 4 bit hex value set to 0.
- PIN length. A 4 bit hex value in the range from 4 to C.
- PIN digits. Each digit is a 4 bit hex value in the range from 0 to 9.
- Pad character. A 4 bit hex value set to F.
The PIN block is then XOR'd by an ANSI PAN block that consists of
- 4 pad characters. Each is 4 bit hex value set to 0.
- 12 rightmost digits of the PAN excluding the check digit.
Parameters
----------
pin : str
ASCII Personal Identification Number.
pan : str
ASCII Personal Account Number.
Returns
-------
pinblock : bytes
Binary 8-byte PIN block.
Raises
------
ValueError
PIN must be between 4 and 12 digits long
PAN must be at least 13 digits long
Examples
--------
>>> from psec.pinblock import encode_pinblock_iso_0
>>> encode_pinblock_iso_0("1234", "5544332211009966").hex().upper()
'041277CDDEEFF669'
"""
if len(pin) < 4 or len(pin) > 12 or not _tools.ascii_numeric(pin):
raise ValueError("PIN must be between 4 and 12 digits long")
if len(pan) < 13 or not _tools.ascii_numeric(pan):
raise ValueError("PAN must be at least 13 digits long")
pinblock = len(pin).to_bytes(1, "big") + _binascii.a2b_hex(
pin + "F" * (14 - len(pin))
)
pan_block = b"\x00\x00" + _binascii.a2b_hex(pan[-13:-1])
return _tools.xor(pinblock, pan_block)
def encode_pinblock_iso_2(pin: str) -> bytes:
r"""Encode ISO 9564 PIN block format 2.
ISO format 2 PIN block is an 8 byte value that consits of
- Control field. A 4 bit hex value set to 2.
- PIN length. A 4 bit hex value in the range from 4 to C.
- PIN digits. Each digit is a 4 bit hex value in the range from 0 to 9.
- Pad character. A 4 bit hex value set to F.
Parameters
----------
pin : str
ASCII Personal Identification Number.
Returns
-------
pinblock : bytes
Binary 8-byte PIN block.
Raises
------
ValueError
PIN must be between 4 and 12 digits long
Examples
--------
>>> from psec.pinblock import encode_pinblock_iso_2
>>> encode_pinblock_iso_2("1234").hex().upper()
'241234FFFFFFFFFF'
"""
if len(pin) < 4 or len(pin) > 12 or not _tools.ascii_numeric(pin):
raise ValueError("PIN must be between 4 and 12 digits long")
return (len(pin) + 32).to_bytes(1, "big") + _binascii.a2b_hex(
pin + "F" * (14 - len(pin))
)
def encode_pinblock_iso_3(pin: str, pan: str) -> bytes:
r"""Encode ISO 9564 PIN block format 3.
ISO format 3 PIN block is an 8 byte value that consits of
- Control field. A 4 bit hex value set to 3.
- PIN length. A 4 bit hex value in the range from 4 to C.
- PIN digits. Each digit is a 4 bit hex value in the range from 0 to 9.
- Random pad character. A 4 bit hex value in the range from A to F.
The PIN block is then XOR'd by an ANSI PAN block that consists of
- 4 pad characters. Each is 4 bit hex value set to 0.
- 12 rightmost digits of the PAN excluding the check digit.
Parameters
----------
pin : str
ASCII Personal Identification Number.
pan : str
ASCII Personal Account Number.
Returns
-------
pinblock : bytes
Binary 8-byte PIN block.
Raises
------
ValueError
PIN must be between 4 and 12 digits long
PAN must be at least 13 digits long
Examples
--------
>>> from psec.pinblock import encode_pinblock_iso_3
>>> encode_pinblock_iso_3("1234", "5544332211009966").hex().upper()[:6]
'341277'
"""
if len(pin) < 4 or len(pin) > 12 or not _tools.ascii_numeric(pin):
raise ValueError("PIN must be between 4 and 12 digits long")
if len(pan) < 13 or not _tools.ascii_numeric(pan):
raise ValueError("PAN must be at least 13 digits long")
random_pad = "".join(_secrets.choice("ABCDEF") for _ in range(10))
pinblock = (len(pin) + 48).to_bytes(1, "big") + _binascii.a2b_hex(
pin + random_pad[: 14 - len(pin)]
)
pan_block = b"\x00\x00" + _binascii.a2b_hex(pan[-13:-1])
return _tools.xor(pinblock, pan_block)
def decode_pinblock_iso_0(pinblock: bytes, pan: str) -> str:
r"""Decode ISO 9564 PIN block format 0 aka ANSI PIN block.
ISO format 0 PIN block is an 8 byte value that consits of
- Control field. A 4 bit hex value set to 0.
- PIN length. A 4 bit hex value in the range from 4 to C.
- PIN digits. Each digit is a 4 bit hex value in the range from 0 to 9.
- Pad character. A 4 bit hex value set to F.
The PIN block is then XOR'd by an ANSI PAN block that consists of
- 4 pad characters. Each is 4 bit hex value set to 0.
- 12 rightmost digits of the PAN excluding the check digit.
Parameters
----------
pinblock : bytes
Binary 8-byte PIN block.
pan : str
ASCII Personal Account Number.
Returns
-------
pin : str
ASCII Personal Identification Number.
Raises
------
ValueError
PIN block must be 8 bytes long
PIN block must be 16 hexchars long
PIN block is not ISO format 0: control field `X`
PIN block filler is incorrect: `filler`
PIN is not numeric: `pin`
Examples
--------
>>> from psec.pinblock import decode_pinblock_iso_0
>>> decode_pinblock_iso_0(
... bytes.fromhex("041277CDDEEFF669"),
... "5544332211009966")
'1234'
"""
if len(pan) < 13 or not _tools.ascii_numeric(pan):
raise ValueError("PAN must be at least 13 digits long")
if len(pinblock) != 8:
raise ValueError("PIN block must be 8 bytes long")
pan_block = b"\x00\x00" + _binascii.a2b_hex(pan[-13:-1])
block = _tools.xor(pinblock, pan_block).hex().upper()
if block[0] != "0":
raise ValueError(f"PIN block is not ISO format 0: control field `{block[0]}`")
pin_len = int(block[1], 16)
if pin_len < 4 or pin_len > 12:
raise ValueError(f"PIN length must be between 4 and 12: `{pin_len}`")
if block[pin_len + 2 :] != ("F" * (14 - pin_len)):
raise ValueError(f"PIN block filler is incorrect: `{block[pin_len + 2 :]}`")
pin = block[2 : pin_len + 2]
if not _tools.ascii_numeric(pin):
raise ValueError(f"PIN is not numeric: `{pin}`")
return pin
def decode_pinblock_iso_2(pinblock: bytes) -> str:
r"""Decode ISO 9564 PIN block format 2.
ISO format 2 PIN block is 8 byte value that consits of
- Control field. A 4 bit hex value set to 2.
- PIN length. A 4 bit hex value in the range from 4 to C.
- PIN digits. Each digit is a 4 bit hex value in the range from 0 to 9.
- Pad character set to F.
Parameters
----------
pinblock : bytes
Binary 8-byte PIN block.
Returns
-------
pin : str
ASCII Personal Identification Number.
Raises
------
ValueError
PIN block must be 8 bytes long
PIN block is not ISO format 2: control field `X`
PIN block filler is incorrect: `filler`
PIN is not numeric: `pin`
Examples
--------
>>> from psec.pinblock import decode_pinblock_iso_2
>>> decode_pinblock_iso_2(bytes.fromhex("2C123456789012FF"))
'123456789012'
"""
if len(pinblock) != 8:
raise ValueError("PIN block must be 8 bytes long")
block = pinblock.hex().upper()
if block[0] != "2":
raise ValueError(f"PIN block is not ISO format 2: control field `{block[0]}`")
pin_len = int(block[1], 16)
if pin_len < 4 or pin_len > 12:
raise ValueError(f"PIN length must be between 4 and 12: `{pin_len}`")
if block[pin_len + 2 :] != ("F" * (14 - pin_len)):
raise ValueError(f"PIN block filler is incorrect: `{block[pin_len + 2 :]}`")
pin = block[2 : pin_len + 2]
if not _tools.ascii_numeric(pin):
raise ValueError(f"PIN is not numeric: `{pin}`")
return pin
def decode_pinblock_iso_3(pinblock: bytes, pan: str) -> str:
r"""Decode ISO 9564 PIN block format 3.
ISO format 3 PIN block is an 8 byte value that consits of
- Control field. A 4 bit hex value set to 3.
- PIN length. A 4 bit hex value in the range from 4 to C.
- PIN digits. Each digit is a 4 bit hex value in the range from 0 to 9.
- Random pad character. A 4 bit hex value in the range from A to F.
The PIN block is then XOR'd by an ANSI PAN block that consists of
- 4 pad characters. Each is 4 bit hex value set to 0.
- 12 rightmost digits of the PAN excluding the check digit.
Parameters
----------
pinblock : bytes
Binary 8-byte PIN block.
pan : str
ASCII Personal Account Number.
Returns
-------
pin : str
ASCII Personal Identification Number.
Raises
------
ValueError
PIN block must be 8 bytes long
PIN block must be 16 hexchars long
PIN block is not ISO format 3: control field `X`
PIN block filler is incorrect: `filler`
PIN is not numeric: `pin`
Examples
--------
>>> from psec.pinblock import decode_pinblock_iso_3
>>> decode_pinblock_iso_3(
... bytes.fromhex("341277EEEFCCB43C"),
... "5544332211009966")
'1234'
"""
if len(pan) < 13 or not _tools.ascii_numeric(pan):
raise ValueError("PAN must be at least 13 digits long")
if len(pinblock) != 8:
raise ValueError("PIN block must be 8 bytes long")
pan_block = b"\x00\x00" + _binascii.a2b_hex(pan[-13:-1])
block = _tools.xor(pinblock, pan_block).hex().upper()
if block[0] != "3":
raise ValueError(f"PIN block is not ISO format 3: control field `{block[0]}`")
pin_len = int(block[1], 16)
if pin_len < 4 or pin_len > 12:
raise ValueError(f"PIN length must be between 4 and 12: `{pin_len}`")
if not set(block[pin_len + 2 :]).issubset(frozenset("ABCDEF")):
raise ValueError(f"PIN block filler is incorrect: `{block[pin_len + 2 :]}`")
pin = block[2 : pin_len + 2]
if not _tools.ascii_numeric(pin):
raise ValueError(f"PIN is not numeric: `{pin}`")
return pin
| 29.831063
| 86
| 0.61783
| 1,677
| 10,948
| 3.940966
| 0.079308
| 0.056892
| 0.028597
| 0.049024
| 0.898774
| 0.896353
| 0.893479
| 0.860039
| 0.847935
| 0.82539
| 0
| 0.057661
| 0.279229
| 10,948
| 366
| 87
| 29.912568
| 0.779876
| 0.532609
| 0
| 0.583333
| 0
| 0
| 0.2508
| 0.028807
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.03125
| 0
| 0.15625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
421fbcb5967e7cf13303482ef787f21d1c91c936
| 262
|
py
|
Python
|
webempresa/core/views.py
|
EduardoPerez5J/web-empresa-curso-django2
|
0d8f395a1a51fdb7e7a35d43aaaeca82d60f6827
|
[
"Unlicense"
] | null | null | null |
webempresa/core/views.py
|
EduardoPerez5J/web-empresa-curso-django2
|
0d8f395a1a51fdb7e7a35d43aaaeca82d60f6827
|
[
"Unlicense"
] | null | null | null |
webempresa/core/views.py
|
EduardoPerez5J/web-empresa-curso-django2
|
0d8f395a1a51fdb7e7a35d43aaaeca82d60f6827
|
[
"Unlicense"
] | null | null | null |
from django.shortcuts import render
# Create your views here.
def home (request):
return render(request, "core/home.html")
def about (request):
return render(request,"core/about.html")
def store (request):
return render(request,"core/store.html")
| 21.833333
| 44
| 0.721374
| 36
| 262
| 5.25
| 0.472222
| 0.206349
| 0.301587
| 0.412698
| 0.47619
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.152672
| 262
| 12
| 45
| 21.833333
| 0.851351
| 0.087786
| 0
| 0
| 0
| 0
| 0.184874
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.428571
| false
| 0
| 0.142857
| 0.428571
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
c40affcd56ecdf84e9f4b4e4118d1e313a917d71
| 125
|
py
|
Python
|
apps/__init__.py
|
zshengsheng/Swallow
|
dd5098e241c5b4e50e53abbb105fd45323abf4d5
|
[
"MIT"
] | 10
|
2018-03-18T14:22:31.000Z
|
2019-03-18T03:13:40.000Z
|
apps/__init__.py
|
zshengsheng/Swallow
|
dd5098e241c5b4e50e53abbb105fd45323abf4d5
|
[
"MIT"
] | null | null | null |
apps/__init__.py
|
zshengsheng/Swallow
|
dd5098e241c5b4e50e53abbb105fd45323abf4d5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/1/22 15:50
# @Author : ZJJ
# @Email : 597105373@qq.com
| 25
| 29
| 0.544
| 19
| 125
| 3.578947
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.21875
| 0.232
| 125
| 5
| 30
| 25
| 0.489583
| 0.896
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c40df273c19747a74924fff58fe22f8d8bb40916
| 3,424
|
py
|
Python
|
TestPicPosAdjust.py
|
neohope/NeoDemosImageProcess
|
ff08fb110464fef3433d74500792894408e26051
|
[
"BSD-3-Clause"
] | null | null | null |
TestPicPosAdjust.py
|
neohope/NeoDemosImageProcess
|
ff08fb110464fef3433d74500792894408e26051
|
[
"BSD-3-Clause"
] | null | null | null |
TestPicPosAdjust.py
|
neohope/NeoDemosImageProcess
|
ff08fb110464fef3433d74500792894408e26051
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python
# -*- coding:utf-8 -*-
import cv2
import numpy as np
import matplotlib.pyplot as plt
def adjust(img, height, width):
#获取图像大小
rows, cols = img.shape[:2]
#将源图像高斯模糊
img_gaus = cv2.GaussianBlur(img, (3,3), 0)
#进行灰度化处理
gray = cv2.cvtColor(img_gaus,cv2.COLOR_BGR2GRAY)
#边缘检测(检测出图像的边缘信息)
edges = cv2.Canny(gray,40,250,apertureSize = 3)
#cv2.imwrite("out/canny.jpg", edges)
kernel = np.ones((3,3), np.uint8)
expansion = cv2.dilate(edges, kernel, iterations=1)
#通过霍夫变换得到A4纸边缘
lines = cv2.HoughLinesP(expansion,1,np.pi/180,50,minLineLength=60,maxLineGap=10)
#下面输出的四个点分别为四个顶点
for x1,y1,x2,y2 in lines[0]:
print(x1,y1),(x2,y2)
for x1,y1,x2,y2 in lines[1]:
print(x1,y1),(x2,y2)
#绘制边缘
for x in range(0,3,2):
for x1,y1,x2,y2 in lines[x]:
cv2.line(img, (x1,y1), (x2,y2), (0,255,0), 5)
x1,y1,x2,y2 = lines[0][0]
x3,y3,x4,y4 = lines[2][0]
#根据四个顶点设置图像透视变换矩阵
pos1 = np.float32([[x1, y1], [x2, y2], [x3, y3], [x4, y4]])
pos2 = np.float32([[0, 0], [0, height], [width, 0], [width, height]])
M = cv2.getPerspectiveTransform(pos1, pos2)
#图像透视变换
result = cv2.warpPerspective(img, M, (width, height ))
#显示图像
images = [img, img_gaus, gray, edges, result]
titles = ['img', 'img_gaus', 'gray', 'edges', 'result']
for i in range(5):
plt.subplot(2, 3, i + 1), plt.imshow(images[i], 'gray')
plt.title(titles[i])
plt.xticks([]), plt.yticks([])
plt.show()
cv2.waitKey(0)
cv2.destroyAllWindows()
def adjustPhone(img, height, width):
#获取图像大小
rows, cols = img.shape[:2]
#将源图像高斯模糊
img_gaus = cv2.GaussianBlur(img, (5,5), 0)
#进行灰度化处理
gray = cv2.cvtColor(img_gaus,cv2.COLOR_BGR2GRAY)
#边缘检测(检测出图像的边缘信息)
edges = cv2.Canny(gray,40,250,apertureSize = 3)
#cv2.imwrite("out/canny.jpg", edges)
kernel = np.ones((3,3), np.uint8)
expansion = cv2.dilate(edges, kernel, iterations=2)
#通过霍夫变换得到A4纸边缘
lines = cv2.HoughLinesP(expansion,1,np.pi/180,50,minLineLength=60,maxLineGap=10)
#下面输出的四个点分别为四个顶点
for x1,y1,x2,y2 in lines[0]:
print(x1,y1),(x2,y2)
for x1,y1,x2,y2 in lines[1]:
print(x1,y1),(x2,y2)
#绘制边缘
for x in range(0,2):
for x1,y1,x2,y2 in lines[x]:
cv2.line(img, (x1,y1), (x2,y2), (0,255,0), 5)
print(lines[0][0])
#根据四个顶点设置图像透视变换矩阵
x1,y1,x2,y2 = lines[0][0]
x3,y3,x4,y4 = lines[1][0]
pos1 = np.float32([[x1, y1], [x2, y2], [x3, y3], [x4, y4]])
pos2 = np.float32([[0, 0], [0, height], [width, 0], [width, height]])
M = cv2.getPerspectiveTransform(pos1, pos2)
#图像透视变换
result = cv2.warpPerspective(img, M, (width, height))
#显示图像
images = [img, img_gaus, gray, edges, expansion, result]
titles = ['img', 'img_gaus', 'gray', 'edges', 'expansion', 'result']
for i in range(6):
plt.subplot(2, 3, i + 1), plt.imshow(images[i], 'gray')
plt.title(titles[i])
plt.xticks([]), plt.yticks([])
plt.show()
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == '__main__':
#img = cv2.imread("images/Paper_297X511.png")
#adjust(img, 272, 190)
img = cv2.imread("images/Phone1_400X533.jpg")
adjustPhone(img, 250, 130)
# need adjust
img = cv2.imread("images/Phone_400X533.jpg")
adjustPhone(img, 250, 130)
| 25.939394
| 84
| 0.589661
| 506
| 3,424
| 3.948617
| 0.229249
| 0.032032
| 0.048048
| 0.064064
| 0.865866
| 0.854855
| 0.821822
| 0.778779
| 0.778779
| 0.778779
| 0
| 0.099583
| 0.22868
| 3,424
| 131
| 85
| 26.137405
| 0.656948
| 0.109229
| 0
| 0.666667
| 0
| 0
| 0.04168
| 0.016209
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028986
| false
| 0
| 0.043478
| 0
| 0.072464
| 0.072464
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c4673447874778e24c2a53a8f3f6d64f0a47c579
| 36
|
py
|
Python
|
mod.py
|
kingRovo/PythonCodingChalenge
|
b62938592df10ccafec9930b69c14c778e19ad37
|
[
"bzip2-1.0.6"
] | 1
|
2021-08-02T16:52:55.000Z
|
2021-08-02T16:52:55.000Z
|
mod.py
|
kingRovo/PythonCodingChalenge
|
b62938592df10ccafec9930b69c14c778e19ad37
|
[
"bzip2-1.0.6"
] | null | null | null |
mod.py
|
kingRovo/PythonCodingChalenge
|
b62938592df10ccafec9930b69c14c778e19ad37
|
[
"bzip2-1.0.6"
] | null | null | null |
import pyFun
print(pyFun.add(3,4))
| 9
| 21
| 0.722222
| 7
| 36
| 3.714286
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0625
| 0.111111
| 36
| 3
| 22
| 12
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
6734ad798312b6faf772a6733081c2e6688909c8
| 504
|
py
|
Python
|
keract/__init__.py
|
najiji/keract
|
16125c286e1539650c6d2283cca7b1523bf4d765
|
[
"MIT"
] | null | null | null |
keract/__init__.py
|
najiji/keract
|
16125c286e1539650c6d2283cca7b1523bf4d765
|
[
"MIT"
] | null | null | null |
keract/__init__.py
|
najiji/keract
|
16125c286e1539650c6d2283cca7b1523bf4d765
|
[
"MIT"
] | null | null | null |
from keract.keract import display_activations # noqa
from keract.keract import display_gradients_of_trainable_weights # noqa
from keract.keract import display_heatmaps # noqa
from keract.keract import get_activations # noqa
from keract.keract import get_gradients_of_activations # noqa
from keract.keract import get_gradients_of_trainable_weights # noqa
from keract.keract import load_activations_from_json_file # noqa
from keract.keract import persist_to_json_file # noqa
__version__ = '3.0.1'
| 45.818182
| 72
| 0.839286
| 73
| 504
| 5.452055
| 0.287671
| 0.201005
| 0.321608
| 0.442211
| 0.834171
| 0.69598
| 0.494975
| 0.494975
| 0.494975
| 0
| 0
| 0.006757
| 0.119048
| 504
| 10
| 73
| 50.4
| 0.88964
| 0.077381
| 0
| 0
| 0
| 0
| 0.010965
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.888889
| 0
| 0.888889
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6744335220aa89282c90f8ffe13e1eebb30f6078
| 24
|
py
|
Python
|
eisen/utils/artifacts/__init__.py
|
dasturge/eisen-core
|
09056f1e6aff450ef402b35b10ef96a7d4a3ff87
|
[
"MIT"
] | null | null | null |
eisen/utils/artifacts/__init__.py
|
dasturge/eisen-core
|
09056f1e6aff450ef402b35b10ef96a7d4a3ff87
|
[
"MIT"
] | null | null | null |
eisen/utils/artifacts/__init__.py
|
dasturge/eisen-core
|
09056f1e6aff450ef402b35b10ef96a7d4a3ff87
|
[
"MIT"
] | null | null | null |
from .savemodel import *
| 24
| 24
| 0.791667
| 3
| 24
| 6.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 24
| 1
| 24
| 24
| 0.904762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6761560815c5ddb35a71ee0dd7e5d323884ea5a9
| 2,409
|
py
|
Python
|
asinstaller/partitions/auto/gpt.py
|
ArchStrike/archstrike-installer
|
2f271c873794ceb7bb86613af8c1f84323b71ba3
|
[
"MIT"
] | 8
|
2016-09-16T10:12:41.000Z
|
2021-07-07T21:38:46.000Z
|
asinstaller/partitions/auto/gpt.py
|
ArchStrike/archstrike-installer
|
2f271c873794ceb7bb86613af8c1f84323b71ba3
|
[
"MIT"
] | 13
|
2016-07-16T22:23:04.000Z
|
2020-12-01T01:57:36.000Z
|
asinstaller/partitions/auto/gpt.py
|
ArchStrike/archstrike-installer
|
2f271c873794ceb7bb86613af8c1f84323b71ba3
|
[
"MIT"
] | 5
|
2017-05-17T18:53:17.000Z
|
2019-09-18T21:31:16.000Z
|
from asinstaller.utils import system, system_output
from asinstaller.config import usr_cfg, get_logger
__all__ = ["uefi", "non_uefi"]
logger = get_logger(__name__)
def uefi():
if usr_cfg['swap_space']:
system('echo -e "n\n\n\n512M\nef00\nn\n3\n\n'
f'+{usr_cfg["swap_space"]}\n8200\nn\n\n'
f'\n\n\nw\ny" | gdisk {usr_cfg["drive"]}')
SWAP = system_output("fdisk -l | "
f" grep {usr_cfg['drive'][-3:]}"
" | awk '{ if (NR==4) print substr ($1,6) }'")
system("wipefs -afq /dev/{0}".format(SWAP))
system("mkswap /dev/{0}".format(SWAP))
system("swapon /dev/{0}".format(SWAP))
usr_cfg['swap'] = SWAP
else:
system('echo -e "n\n\n\n512M\nef00\nn\n\n\n\n\nw\ny" | '
f'gdisk {usr_cfg["drive"]}')
usr_cfg['boot'] = system_output("fdisk -l | "
f"grep {usr_cfg['drive'][-3:]} | "
"awk '{ if (NR==2) print substr ($1,6) }' ")
usr_cfg['root'] = system_output("fdisk -l | "
f"grep {usr_cfg['drive'][-3:]} | "
"awk '{ if (NR==3) print substr ($1,6) }' ")
def non_uefi():
if usr_cfg['swap_space']:
system('echo -e "o\ny\nn\n1\n\n+100M\n\nn\n2\n\n+1M\nEF02\nn\n4\n\n'
f'+{usr_cfg["swap_space"]}\n8200\nn'
f'\n3\n\n\n\nw\ny" | gdisk {usr_cfg["drive"]}')
SWAP = system_output("fdisk -l | "
f" grep {usr_cfg['drive'][-3:]}"
" | awk '{ if (NR==5) print substr ($1,6) }'")
system("wipefs -afq /dev/{0}".format(SWAP))
system("mkswap /dev/{0}".format(SWAP))
system("swapon /dev/{0}".format(SWAP))
usr_cfg['swap'] = SWAP
else:
system('echo -e "o\ny\nn\n1\n\n+100M\n\nn\n2\n\n+1M\nEF02\nn'
f'\n3\n\n\n\nw\ny" | gdisk {usr_cfg["drive"]}')
usr_cfg['boot'] = system_output("fdisk -l | "
f"grep {usr_cfg['drive'][-3:]} | "
"awk '{ if (NR==2) print substr ($1,6) }' ")
usr_cfg['root'] = system_output("fdisk -l | "
f"grep {usr_cfg['drive'][-3:]} | "
"awk '{ if (NR==4) print substr ($1,6) }' ")
| 44.611111
| 80
| 0.449979
| 326
| 2,409
| 3.190184
| 0.190184
| 0.121154
| 0.105769
| 0.103846
| 0.853846
| 0.847115
| 0.847115
| 0.847115
| 0.847115
| 0.721154
| 0
| 0.04334
| 0.348692
| 2,409
| 53
| 81
| 45.45283
| 0.619503
| 0
| 0
| 0.608696
| 0
| 0.086957
| 0.444168
| 0.152345
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0.043478
| 0
| 0.086957
| 0.130435
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
67735ad508ee9d803b9ab2caa2570306434cf65f
| 66
|
py
|
Python
|
match/models/__init__.py
|
QLMX/DeepRecall
|
832068d5effd38c6c3933e41c5b6701b700b4a83
|
[
"Apache-2.0"
] | 4
|
2021-12-01T17:04:17.000Z
|
2022-03-10T08:07:20.000Z
|
match/models/__init__.py
|
QLMX/RecMatch
|
832068d5effd38c6c3933e41c5b6701b700b4a83
|
[
"Apache-2.0"
] | null | null | null |
match/models/__init__.py
|
QLMX/RecMatch
|
832068d5effd38c6c3933e41c5b6701b700b4a83
|
[
"Apache-2.0"
] | null | null | null |
from match.models.fm import FM
from match.models.dssm import DSSM
| 22
| 34
| 0.818182
| 12
| 66
| 4.5
| 0.5
| 0.333333
| 0.555556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 66
| 2
| 35
| 33
| 0.931034
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
67c058c4c937e426623046e87ebd311295eb4f1a
| 3,466
|
py
|
Python
|
tests/utils/test_iterator.py
|
spack971/kado
|
7776fd17637a1c9b7d60e7f2572fe7dfec2e7305
|
[
"MIT"
] | null | null | null |
tests/utils/test_iterator.py
|
spack971/kado
|
7776fd17637a1c9b7d60e7f2572fe7dfec2e7305
|
[
"MIT"
] | null | null | null |
tests/utils/test_iterator.py
|
spack971/kado
|
7776fd17637a1c9b7d60e7f2572fe7dfec2e7305
|
[
"MIT"
] | null | null | null |
# tests/utils/test_iterator.py
# ============================
#
# Copying
# -------
#
# Copyright (c) 2018 kado authors.
#
# This file is part of the *kado* project.
#
# kado is a free software project. You can redistribute it and/or
# modify if under the terms of the MIT License.
#
# This software project is distributed *as is*, WITHOUT WARRANTY OF ANY
# KIND; including but not limited to the WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE and NONINFRINGEMENT.
#
# You should have received a copy of the MIT License along with kado.
# If not, see <http://opensource.org/licenses/MIT>.
#
import unittest
from kado.utils import iterator
class TestXLast(unittest.TestCase):
"""Test case for :func:`kado.utils.iterator.xlast`."""
def test_xlast_list_empty(self):
"""Given an empty list no items should be returned."""
TEST_DATA = []
EXPECTED = []
self.assertEqual(list(iterator.xlast(TEST_DATA)), EXPECTED)
def test_xlast_list_one(self):
"""A one item list should return no item."""
TEST_DATA = [1]
EXPECTED = []
self.assertEqual(list(iterator.xlast(TEST_DATA)), EXPECTED)
def test_xlast_list_two(self):
"""A two item list should return one item."""
TEST_DATA = [1, 2]
EXPECTED = [1]
self.assertEqual(list(iterator.xlast(TEST_DATA)), EXPECTED)
def test_xlast_string_empty(self):
"""Given an empty string no items should be returned."""
TEST_DATA = ''
EXPECTED = ''
self.assertEqual(''.join(iterator.xlast(TEST_DATA)), EXPECTED)
def test_xlast_string_one(self):
"""A one character strin should return no item."""
TEST_DATA = '1'
EXPECTED = ''
self.assertEqual(''.join(iterator.xlast(TEST_DATA)), EXPECTED)
def test_xlast_string_two(self):
"""A two item string should return one item."""
TEST_DATA = '12'
EXPECTED = '1'
self.assertEqual(''.join(iterator.xlast(TEST_DATA)), EXPECTED)
class TestOneXLast(unittest.TestCase):
"""Test case for :func:`kado.utils.iterator.onexlast`."""
def test_onexlast_list_empty(self):
"""Given an empty list no items should be returned."""
TEST_DATA = []
EXPECTED = []
self.assertEqual(list(iterator.onexlast(TEST_DATA)), EXPECTED)
def test_onexlast_list_one(self):
"""A one item list should return one item."""
TEST_DATA = [1]
EXPECTED = [1]
self.assertEqual(list(iterator.onexlast(TEST_DATA)), EXPECTED)
def test_onexlast_list_two(self):
"""A two item list should return one item."""
TEST_DATA = [1, 2]
EXPECTED = [1]
self.assertEqual(list(iterator.onexlast(TEST_DATA)), EXPECTED)
def test_onexlast_string_empty(self):
"""Given an empty string no items should be returned."""
TEST_DATA = ''
EXPECTED = ''
self.assertEqual(''.join(iterator.onexlast(TEST_DATA)), EXPECTED)
def test_onexlast_string_one(self):
"""A one character strin should return no item."""
TEST_DATA = '1'
EXPECTED = '1'
self.assertEqual(''.join(iterator.onexlast(TEST_DATA)), EXPECTED)
def test_onexlast_string_two(self):
"""A two item string should return one item."""
TEST_DATA = '12'
EXPECTED = '1'
self.assertEqual(''.join(iterator.onexlast(TEST_DATA)), EXPECTED)
| 27.507937
| 73
| 0.635892
| 440
| 3,466
| 4.870455
| 0.220455
| 0.089594
| 0.119459
| 0.088661
| 0.75035
| 0.75035
| 0.75035
| 0.75035
| 0.740551
| 0.664956
| 0
| 0.008318
| 0.236872
| 3,466
| 125
| 74
| 27.728
| 0.80189
| 0.347375
| 0
| 0.692308
| 0
| 0
| 0.004144
| 0
| 0
| 0
| 0
| 0
| 0.230769
| 1
| 0.230769
| false
| 0
| 0.038462
| 0
| 0.307692
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
db1242c7744cae598ae68ec6ea22b9a56d0e8f7a
| 133,145
|
py
|
Python
|
code/IBM_sample_information.py
|
snifflesnrumjum/IBM_GOM
|
1e5484e429a96271297e8a3da270361671ded8dc
|
[
"MIT"
] | null | null | null |
code/IBM_sample_information.py
|
snifflesnrumjum/IBM_GOM
|
1e5484e429a96271297e8a3da270361671ded8dc
|
[
"MIT"
] | 1
|
2017-03-29T18:54:52.000Z
|
2017-03-29T18:54:52.000Z
|
code/IBM_sample_information.py
|
snifflesnrumjum/IBM_GOM
|
1e5484e429a96271297e8a3da270361671ded8dc
|
[
"MIT"
] | null | null | null |
#this file should contain all the sample date, location, concentration, species information for samples wanting to
#be included in the HYCOM model runs
#common locations:
#IFCB in Port Aransas, TX: -97.0318, 27.8304
#South Padre Island, TX: -97.05, 26.078
#Galveston, TX: -94.66, 29.28
#I'm having to add a fifth element to the list of sample information
#[lon, lat, depth, concentration, species]
species_list = {'Kbrevis': 0,
'Dinophysis': 1,
'Ptexanum': 2,
'Pminimum': 3,
'Asterionellopsis': 4,
'Thalassionema': 5,
}
extra_bloom_dates = {(2009, 183, 3) : [[-97.0318, 27.8304, 3, 3, 4], [-97.0318, 27.8304, 3, 5, 5]],
# (2009, 184, 3) : [[-97.0318, 27.8304, 3, 5, 5]],
# (2009, 185, 3) : [[-97.0318, 27.8304, 3, 4, 4], [-97.0318, 27.8304, 3, 8, 5]],
# (2009, 186, 3) : [[-97.0318, 27.8304, 3, 5, 4], [-97.0318, 27.8304, 3, 7, 5]],
# (2009, 187, 3) : [[-97.0318, 27.8304, 3, 11, 4], [-97.0318, 27.8304, 3, 11, 5]],
# (2009, 190, 3) : [[-97.0318, 27.8304, 3, 2, 4], [-97.0318, 27.8304, 3, 3, 5]],
# (2009, 191, 3) : [[-97.0318, 27.8304, 3, 4, 4], [-97.0318, 27.8304, 3, 6, 5]], #7/10/2009
# (2009, 192, 3) : [[-97.0318, 27.8304, 3, 2, 4], [-97.0318, 27.8304, 3, 3, 5]],
# (2009, 193, 3) : [[-97.0318, 27.8304, 3, 4, 5]],
# (2009, 195, 3) : [[-97.0318, 27.8304, 3, 3, 4], [-97.0318, 27.8304, 3, 5, 5]],
# (2009, 196, 3) : [[-97.0318, 27.8304, 3, 2, 4], [-97.0318, 27.8304, 3, 26, 5]],
# (2009, 197, 3) : [[-97.0318, 27.8304, 3, 5, 4], [-97.0318, 27.8304, 3, 19, 5]],
# (2009, 199, 3) : [[-97.0318, 27.8304, 3, 2, 4], [-97.0318, 27.8304, 3, 13, 5]],
# (2009, 200, 3) : [[-97.0318, 27.8304, 3, 5, 4], [-97.0318, 27.8304, 3, 32, 5]],
# (2009, 201, 3) : [[-97.0318, 27.8304, 3, 3, 5]],
# (2009, 202, 3) : [],
# (2009, 203, 3) : [[-97.0318, 27.8304, 3, 3, 4], [-97.0318, 27.8304, 3, 9, 5]],
# (2009, 205, 3) : [[-97.0318, 27.8304, 3, 3, 4], [-97.0318, 27.8304, 3, 3, 5]],
# (2009, 206, 3) : [[-97.0318, 27.8304, 3, 2, 5]],
# (2009, 207, 3) : [],
# (2009, 208, 3) : [[-97.0318, 27.8304, 3, 2, 5]],
# (2009, 209, 3) : [[-97.0318, 27.8304, 3, 2, 4], [-97.0318, 27.8304, 3, 9, 5]],
# (2009, 210, 3) : [[-97.0318, 27.8304, 3, 3, 4], [-97.0318, 27.8304, 3, 11, 5]],
# (2009, 211, 3) : [[-97.0318, 27.8304, 3, 3, 4], [-97.0318, 27.8304, 3, 11, 5]],
# (2009, 212, 3) : [[-97.0318, 27.8304, 3, 9, 5]],
# (2009, 213, 3) : [[-97.0318, 27.8304, 3, 4, 4], [-97.0318, 27.8304, 3, 9, 5]], #8/1/2009
# (2009, 214, 3) : [[-97.0318, 27.8304, 3, 3, 4], [-97.0318, 27.8304, 3, 9, 5]],
# (2009, 215, 3) : [[-97.0318, 27.8304, 3, 9, 5]],
# (2009, 216, 3) : [[-97.0318, 27.8304, 3, 8, 5]],
# (2009, 217, 3) : [[-97.0318, 27.8304, 3, 11, 5]],
# (2009, 218, 3) : [[-97.0318, 27.8304, 3, 29, 5]],
# (2009, 219, 3) : [[-97.0318, 27.8304, 3, 17, 5]],
# (2009, 220, 3) : [[-97.0318, 27.8304, 3, 3, 5]],
# (2009, 224, 3) : [[-97.0318, 27.8304, 3, 7, 5]],
# (2009, 225, 3) : [[-97.0318, 27.8304, 3, 7, 5]],
# (2009, 226, 3) : [[-97.0318, 27.8304, 3, 6, 5]],
# (2009, 227, 3) : [[-97.0318, 27.8304, 3, 11, 5]],
# (2009, 228, 3) : [[-97.0318, 27.8304, 3, 14, 5]],
# (2009, 229, 3) : [[-97.0318, 27.8304, 3, 3, 4], [-97.0318, 27.8304, 3, 12, 5]],
# (2009, 230, 3) : [[-97.0318, 27.8304, 3, 3, 4], [-97.0318, 27.8304, 3, 14, 5]],
# (2009, 233, 3) : [[-97.0318, 27.8304, 3, 5, 5]], #8/21/2009
# (2009, 234, 3) : [[-97.0318, 27.8304, 3, 2, 4], [-97.0318, 27.8304, 3, 13, 5]],
# (2009, 236, 3) : [[-97.0318, 27.8304, 3, 5, 5]],
# (2009, 237, 3) : [[-97.0318, 27.8304, 3, 3, 5]],
# (2009, 238, 3) : [[-97.0318, 27.8304, 3, 2, 4], [-97.0318, 27.8304, 3, 3, 5]],
# (2009, 241, 3) : [[-97.0318, 27.8304, 3, 2, 4], [-97.0318, 27.8304, 3, 4, 5]],
# (2009, 242, 3) : [[-97.0318, 27.8304, 3, 2, 4], [-97.0318, 27.8304, 3, 5, 5]],
# (2009, 243, 3) : [[-97.0318, 27.8304, 3, 3, 4], [-97.0318, 27.8304, 3, 7, 5]],
# (2009, 244, 3) : [[-97.0318, 27.8304, 3, 5, 4], [-97.0318, 27.8304, 3, 9, 5]],
# (2009, 245, 3) : [[-97.0318, 27.8304, 3, 5, 5]], #9/2/2009
# (2009, 246, 3) : [[-97.0318, 27.8304, 3, 3, 5]],
# (2009, 247, 3) : [[-97.0318, 27.8304, 3, 4, 5]],
# (2009, 249, 3) : [[-97.0318, 27.8304, 3, 3, 4]],
# (2009, 256, 3) : [[-97.0318, 27.8304, 3, 3, 5]],
# (2009, 257, 3) : [[-97.0318, 27.8304, 3, 10, 5]],
# (2009, 258, 3) : [[-97.0318, 27.8304, 3, 7, 5]],
# (2009, 259, 3) : [[-97.0318, 27.8304, 3, 4, 5]],
# (2009, 260, 3) : [[-97.0318, 27.8304, 3, 4, 5]],
# (2009, 261, 3) : [[-97.0318, 27.8304, 3, 2, 4], [-97.0318, 27.8304, 3, 9, 5]],
# (2009, 262, 3) : [[-97.0318, 27.8304, 3, 2, 5]],
# (2009, 263, 3) : [[-97.0318, 27.8304, 3, 2, 0], [-97.0318, 27.8304, 3, 3, 5]], #9/20/09
# (2009, 264, 3) : [[-97.0318, 27.8304, 3, 7, 0], [-97.0318, 27.8304, 3, 7, 5]], #9/21/09
# (2009, 265, 3) : [[-97.0318, 27.8304, 3, 5, 0], [-97.0318, 27.8304, 3, 8, 5]], #TX 9/22/09 cytobot sample, #9/22/2009
# (2009, 266, 3) : [[-97.0318, 27.8304, 3, 2, 0], [-97.0318, 27.8304, 3, 11, 5]], #9/23/09
# (2009, 267, 3) : [[-97.0318, 27.8304, 3, 13, 5]],
# (2009, 268, 3) : [[-97.0318, 27.8304, 3, 22, 5]],
# (2009, 269, 3) : [[-97.0318, 27.8304, 3, 3, 3], [-97.0318, 27.8304, 3, 25, 5]], #9/26/09
# (2009, 270, 3) : [[-97.0318, 27.8304, 3, 3, 4], [-97.0318, 27.8304, 3, 9, 5]],
# (2009, 271, 3) : [[-82.4688, 27.11138, 1, 2, 0], [-97.0318, 27.8304, 3, 4, 5]], #FL sampleID 99053, #9/28/2009
# (2009, 272, 3) : [[-97.0318, 27.8304, 3, 5, 5]],
# (2009, 273, 3) : [[-97.0318, 27.8304, 3, 4, 4], [-97.0318, 27.8304, 3, 6, 5]],
# (2009, 274, 3) : [[-97.0318, 27.8304, 3, 2, 4], [-97.0318, 27.8304, 3, 3, 5]],
# (2009, 275, 3) : [[-97.0318, 27.8304, 3, 5, 4], [-97.0318, 27.8304, 3, 3, 5]],
# (2009, 276, 3) : [[-82.2765, 26.5304, 1, 7, 0], [-82.5175, 26.3229, 1, 2, 0], [-82.3064, 26.3519, 1, 9, 0], [-82.3064, 26.3519, 13, 16, 0], [-82.2577, 26.2585, 1, 5, 0], [-82.2577, 26.2585, 14, 11, 0], [-97.0318, 27.8304, 3, 5, 0], [-97.0318, 27.8304, 3, 8, 4], [-97.0318, 27.8304, 3, 6, 5]], #10/3/2009, #10/3/2009
# (2009, 277, 3) : [[-82.2577, 26.2585, 1, 3, 0], [-82.2577, 26.2585, 14, 11, 0], [-97.0318, 27.8304, 3, 3, 0], [-97.0318, 27.8304, 3, 3, 3], [-97.0318, 27.8304, 3, 10, 4]], #10/4/2009, #10/4/2009
# (2009, 278, 3) : [[-97.1575, 26.0765, 1, 25, 0], [-97.1667, 26.1233, 1, 25, 0], [-97.27, 26.5622, 1, 22, 0], [-97.2514, 26.4938, 1, 25, 0], [-97.2285, 26.4226, 1, 25, 0], [-97.1506, 26.0631, 1, 10, 0], [-97.0318, 27.8304, 3, 5, 0], [-97.0318, 27.8304, 3, 10, 4]], #10/5/2009, #10/5/2009
# (2009, 279, 3) : [[-97.1567, 26.0686, 1, 293, 0], [-97.0318, 27.8304, 3, 20, 4], [-97.0318, 27.8304, 3, 3, 5]], #10/6/2009, #10/6/2009
# (2009, 280, 3) : [[-97.0318, 27.8304, 1, 3, 3]], #10/7/2009
# (2009, 281, 3) : [[-97.0318, 27.8304, 3, 15, 0], [-97.0318, 27.8304, 3, 11, 4], [-97.0318, 27.8304, 3, 3, 5]], #10/8/09
# (2009, 282, 3) : [[-82.3909, 26.2612, 1, 25, 0], [-82.3063, 26.3564, 1, 4, 0], [-82.1915, 26.2943, 1,23, 0], [-82.1915, 26.2943, 5, 15, 0], [-82.1915, 26.2943, 10, 4, 0], [-97.0318, 27.8304, 3, 35, 0], [-97.0318, 27.8304, 3, 45, 4], [-97.0318, 27.8304, 3, 3, 5]], #10/9/2009, #10/9/2009
# (2009, 283, 3) : [[-82.3666, 26.3044, 1, 46, 0], [-82.3666, 26.3044, 15, 4, 0], [-82.4513, 26.372, 1, 25, 0], [-82.4513, 26.372, 12, 11, 0], [-82.4513, 26.372, 16, 5, 0], [-82.4484, 26.4619, 1, 25, 0], [-82.4508, 26.4122, 1, 47, 0], [-82.4508, 26.4122, 5, 46, 0], [-82.4508, 26.4122, 10, 24, 0], [-82.4508, 26.4122, 13, 2, 0], [-97.0318, 27.8304, 3, 38, 0], [-97.0318, 27.8304, 3, 28, 4], [-97.0318, 27.8304, 3, 5, 5]], #10/10/2009
# (2009, 284, 3) : [[-97.0318, 27.8304, 3, 198, 0], [-97.0318, 27.8304, 3, 7, 4], [-97.0318, 27.8304, 3, 4, 5]], #10/11/09
# (2009, 285, 3) : [[-97.0318, 27.8304, 3, 2, 0], [-97.0318, 27.8304, 3, 5, 5]], #10/12/09
# (2009, 286, 3) : [[-97.0318, 27.8304, 3, 3, 0], [-97.0318, 27.8304, 3, 6, 4], [-97.0318, 27.8304, 3, 4, 5]], #10/13/09
# (2009, 287, 3) : [[-97.0318, 27.8304, 3, 15, 0], [-97.0318, 27.8304, 3, 5, 4], [-97.0318, 27.8304, 3, 4, 5]], #10/14/09
# (2009, 288, 3) : [[-97.0942, 26.0144, 1, 462, 0], [-97.1317, 26.3307, 1, 241, 0], [-97.135, 26.1664, 1, 1000, 0], [-97.0318, 27.8304, 3, 68, 0], [-97.0318, 27.8304, 3, 7, 3], [-97.0318, 27.8304, 3, 6, 4], [-97.0318, 27.8304, 3, 4, 5]], #10/15/2009, #10/15/2009
# (2009, 289, 3) : [[-97.1567, 26.0686, 1, 24, 0], [-82.2854, 26.4894, 1, 23, 0], [-82.6596, 26.6065, 1, 11, 0], [-97.0318, 27.8304, 3, 375, 0], [-97.0318, 27.8304, 3, 4, 3], [-97.0318, 27.8304, 3, 4, 4], [-97.0318, 27.8304, 3, 5, 5]], #10/16/2009, #10/16/2009
# (2009, 292, 3) : [[-97.1575, 26.0765, 1, 1000, 0], [-97.0318, 27.8304, 3, 3, 0], [-97.0318, 27.8304, 3, 2, 3]], #10/19/2009, #10/19/2009
# (2009, 293, 3) : [[-97.0318, 27.8304, 3, 1, 0], [-97.1575, 26.0765, 1, 960, 0], [-97.1567, 26.0686, 1, 330, 0]], #10/20/2009, #10/20/2009
# (2009, 294, 3) : [[-97.0318, 27.8304, 3, 2, 0], [-97.0318, 27.8304, 3, 2, 5]], #10/21/2009, #10/21/2009
# (2009, 295, 3) : [[-97.0318, 27.8304, 3, 2, 0]], #10/22/2009, #10/22/2009
# (2009, 296, 3) : [[-82.5793, 27.3337, 1, 2, 0], [-97.1575, 26.0765, 1, 25, 0], [-97.0318, 27.8304, 3, 13, 0]], #10/23/2009, #10/23/2009
# (2009, 297, 3) : [[-82.3311, 26.3656, 1, 25, 0], [-82.3311, 26.3656, 16, 17, 0], [-97.0318, 27.8304, 3, 4, 2], [-97.0318, 27.8304, 3, 3, 4]], #10/24/2009, #10/24/2009
# (2009, 298, 3) : [[-97.0318, 27.8304, 3, 2, 2], [-97.0318, 27.8304, 3, 3, 4]], #10/25/2009
# (2009, 299, 3) : [[-97.0318, 27.8304, 3, 2, 0]], #10/26/2009
# (2009, 300, 3) : [[-97.0318, 27.8304, 3, 3, 0], [-97.0318, 27.8304, 3, 3, 3], [-97.0318, 27.8304, 3, 8, 4], [-97.0318, 27.8304, 3, 8, 5]], #10/27/2009, #10/27/2009
# (2009, 301, 3) : [[-97.1778, 26.2079, 1, 1000, 0], [-97.1719, 26.1652, 1, 312, 0], [-97.0318, 27.8304, 3, 6, 3], [-82.1938, 26.5263, 1, 25, 0], [-82.0801, 26.4230, 1, 24, 0], [-82.0154, 26.4515, 1, 2, 0], [-81.96, 26.4266, 3, 5, 0], [-82.0, 26.3833, 1, 13, 0], [-82.0433, 26.3416, 1, 25, 0], [-82.1333, 26.2583, 1, 25, 0], [-97.0318, 27.8304, 3, 3, 0], [-97.0318, 27.8304, 3, 2, 2], [-97.0318, 27.8304, 3, 4, 4], [-97.0318, 27.8304, 3, 6, 5]], #10/28/2009, #10/28/2009
# (2009, 302, 3) : [[-97.0318, 27.8304, 3, 81, 0], [-97.1567, 26.0686, 1, 122, 0], [-97.2058, 26.0788, 1, 365, 0], [-97.0318, 27.8304, 3, 3, 3], [-97.0318, 27.8304, 3, 2, 4], [-97.0318, 27.8304, 3, 3, 5]], #10/29/2009, #10/29/2009
# (2009, 303, 3) : [[-82.042, 26.4349, 1, 25, 0], [-82.01588, 26.4508, 1, 25, 0], [-82.42, 26.71, 1, 25, 0], [-82.42, 26.71, 2, 19, 0], [-82.221, 26.6092, 1, 25, 0], [-82.1090, 26.4769, 1, 9, 0], [-82.1765, 26.4926, 1, 25, 0], [-82.2005, 26.5535, 1, 17, 0], [-82.0357, 26.453812, 1, 25, 0], [-97.2058, 26.0788, 1, 19, 0], [-97.1566, 26.0686, 1, 34, 0]], #10/30/2009, #10/30/2009
# (2009, 304, 3) : [[-82.01303, 26.4758, 1, 25, 0], [-82.0149, 26.4831, 1, 25, 0], [-82.0136, 26.4532, 1, 10, 0], [-82.0121, 26.4487, 1, 15, 0], [-82.0803, 26.3963, 1, 25, 0], [-82.0803, 26.3963, 1, 25, 0], [-82.22855, 26.303933, 13, 25, 0], [-82.2285, 26.3039, 1, 25, 0], [-81.94741, 26.34415, 7, 25, 0], [-81.94741, 26.34415, 1, 25, 0]], #10/31/2009, #10/31/2009
# (2009, 305, 3) : [[-82.01588, 26.4508, 1, 25, 0], [-82.4412, 26.4708, 1, 25, 0], [-82.4411, 26.4666, 1, 25, 0], [-82.4407, 26.4427, 1, 25, 0], [-82.4404, 26.4265, 1, 25, 0], [-82.4405, 26.427, 1, 25, 0], [-82.4403, 26.4171, 1, 25, 0], [-82.4404, 26.4215, 1, 25, 0], [-82.4403, 26.417, 1, 25, 0], [-82.4402, 26.4143, 1, 25, 0], [-82.4402, 26.4096, 1, 25, 0], [-82.44, 26.4024, 1, 25, 0], [-82.4399, 26.3923, 1, 25, 0], [-82.4399, 26.3939, 1, 25, 0], [-82.4401, 26.4051, 1, 25, 0], [-82.4403, 26.4154, 1, 25, 0], [-82.4404, 26.4258, 1, 25, 0], [-82.4406, 26.4381, 1, 25, 0], [-82.4408, 26.4448, 1, 25, 0], [-97.1566, 26.0686, 1, 454, 0]], #11/1/2009, #11/1/2009
# (2009, 306, 3) : [[-82.251, 26.6092, 1, 5, 0], [-82.2505, 26.5535, 1, 5, 0], [-82.22654, 26.49263, 1, 13, 0], [-82.15909, 26.47695, 1, 5, 0], [-82.1111, 26.7614, 1, 5, 0], [-97.1516, 26.0133, 1, 240, 0], [-97.0318, 27.8304, 3, 5, 0]], #11/2/2009, #11/2/2009
# (2009, 307, 3) : [[-97.0318, 27.8304, 3, 2, 2], [-97.0318, 27.8304, 1, 4, 3]], #11/3/2009
# (2009, 308, 3) : [[-97.0318, 27.8304, 1, 3, 3]], #11/4/2009
# (2009, 309, 3) : [[-97.0318, 27.8304, 3, 3, 4]], #11/5/2009
# (2009, 310, 3) : [[-97.0318, 27.8304, 3, 4, 4]], #11/6/2009
# (2009, 311, 3) : [[-97.0318, 27.8304, 3, 2, 0], [-97.2058, 26.0788, 1, 26, 0], [-97.1575, 26.0755, 1, 16, 0]], #11/7/2009, #11/7/2009
# (2009, 312, 3) : [[-97.0318, 27.8304, 3, 3, 0], [-82.0357, 26.4538, 1, 25, 0]], #11/8/2009, #11/8/2009
# (2009, 313, 3) : [[-97.0318, 27.8304, 3, 5, 0], [-82.0357, 26.4538, 1, 25, 0], [-81.7275, 25.9733, 1, 9, 0], [-97.1719, 26.1652, 1, 87, 0], [-97.1758, 26.1936, 1, 13, 0],[-97.2058, 26.0788, 1, 14, 0],[-97.1575, 26.0755, 1, 33, 0]], #11/9/2009, #11/9/2009
# (2009, 314, 3) : [[-97.0318, 27.8304, 3, 2, 0], [-97.0318, 27.8304, 3, 4, 4]], #11/10/2009, #11/10/2009
# (2009, 317, 3) : [[-97.0318, 27.8304, 3, 4, 3], [-97.0318, 27.8304, 3, 6, 4]], #11/13/2009
# (2009, 318, 3) : [[-97.0318, 27.8304, 3, 4, 3]], #11/14/2009
# (2009, 319, 3) : [[-97.0318, 27.8304, 3, 3, 3]], #11/15/2009
# (2009, 320, 3) : [[-97.0318, 27.8304, 3, 2, 0], [-97.2058, 26.0788, 1, 48, 0], [-97.1575, 26.0755, 1, 1000, 0], [-81.7280, 25.9116, 1, 3, 0], [-97.0318, 27.8304, 3, 2, 4]], #11/16/2009
# (2009, 321, 3) : [], #11/17/2009
# (2009, 322, 3) : [[-82.6029, 27.1787, 1, 12, 0], [-97.0318, 27.8304, 3, 5, 0], [-97.2058, 26.0788, 1, 17, 0], [-97.1719, 26.1652, 1, 1000, 0], [-97.1758, 26.1936, 1, 764, 0], [-97.1575, 26.0755, 1, 372, 0], [-97.0318, 27.8304, 3, 2, 4]], #11/18/2009, #11/18/2009
# (2009, 325, 3) : [[-97.0318, 27.8304, 3, 2, 0], [-97.0318, 27.8304, 3, 3, 3]], #11/21/2009
# (2009, 326, 3) : [[-97.0318, 27.8304, 3, 2, 0], [-97.0318, 27.8304, 3, 3, 3]], #11/22/2009
# (2009, 327, 3) : [[-97.0318, 27.8304, 3, 3, 0], [-97.0318, 27.8304, 3, 3, 3], [-97.0318, 27.8304, 3, 5, 4], [-97.0318, 27.8304, 3, 4, 5]], #11/23/2009
# (2009, 328, 3) : [[-97.0318, 27.8304, 3, 3, 3]], #11/24/2009
# (2009, 329, 3) : [[-97.2058, 26.0788, 1, 55, 0], [-97.1719, 26.1652, 1, 816, 0], [-97.1758, 26.1936, 1, 87, 0], [-97.1575, 26.0755, 1, 1000, 0]], #11/25/2009
# (2009, 331, 3) : [[-97.0318, 27.8304, 3, 2, 0], [-97.0318, 27.8304, 3, 2, 3], [-97.0318, 27.8304, 3, 12, 4], [-97.0318, 27.8304, 3, 4, 5]], #11/27/2009
# (2009, 333, 3) : [[-97.0318, 27.8304, 3, 3, 0], [-97.2058, 26.0788, 1, 181, 0], [-97.1575, 26.0788, 1, 51, 0]], #11/29/2009, #11/29/2009
# (2009, 334, 3) : [[-97.0318, 27.8304, 3, 3, 0]],
# (2009, 335, 3) : [[-97.2058, 26.0788, 1, 346, 0], [-97.0318, 27.8304, 3, 2, 0]], #12/1/2009
# (2009, 336, 3) : [[-97.0318, 27.8304, 3, 4, 0], [-97.0318, 27.8304, 3, 2, 3], [-97.0318, 27.8304, 3, 3, 5]],
# (2009, 337, 3) : [[-97.0318, 27.8304, 3, 4, 0], [-97.2058, 26.0788, 1, 220, 0], [-97.0318, 27.8304, 3, 3, 4]], #12/3/2009, #12/3/2009
# (2009, 338, 3) : [[-97.0318, 27.8304, 3, 3, 4], [-97.0318, 27.8304, 3, 2, 5]], #12/4/2009
# (2009, 339, 3) : [[-97.0318, 27.8304, 3, 2, 0]], #12/5/2009
# (2009, 342, 3) : [[-97.0318, 27.8304, 3, 4, 3]], #12/8/2009
# (2009, 343, 3) : [[-97.0318, 27.8304, 3, 2, 0], [-97.0318, 27.8304, 3, 3, 3]], #12/9/2009
# (2009, 344, 3) : [[-97.0318, 27.8304, 3, 3, 0], [-97.0318, 27.8304, 3, 2, 2], [-97.0318, 27.8304, 3, 10, 3], [-97.0318, 27.8304, 3, 3, 4]], #12/10/2009
# (2009, 345, 3) : [[-97.0318, 27.8304, 3, 2, 2], [-97.0318, 27.8304, 3, 9, 3], [-97.0318, 27.8304, 3, 3, 4]], #12/11/2009
# (2009, 346, 3) : [[-97.0318, 27.8304, 3, 3, 0], [-97.0318, 27.8304, 3, 11, 4], [-97.0318, 27.8304, 3, 8, 5]], #12/12/2009
# (2009, 347, 3) : [[-97.0318, 27.8304, 3, 10, 0], [-97.0318, 27.8304, 3, 8, 4]], #12/13/2009
# (2009, 348, 3) : [[-97.0318, 27.8304, 3, 3, 0], [-82.4531, 27.0103, 1, 5, 0], [-82.498, 27.0663, 1, 5, 0], [-82.5009, 27.0735, 1, 25, 0], [-82.5029, 27.0783, 1, 25, 0], [-82.5109, 27.1003, 1, 25, 0], [-82.5188, 27.1138, 1, 17, 0], [-82.5212, 27.1233, 1, 25, 0], [-82.6031, 27.2652, 1, 25, 0], [-82.6124, 27.3268, 1, 5, 0], [-82.682, 27.3732, 1, 5, 0], [-97.0318, 27.8304, 3, 5, 0], [-97.0318, 27.8304, 3, 5, 4]], #12/14/2009, #12/14/2009
# (2009, 349, 3) : [[-82.5798, 27.3355, 1, 2, 0], [-82.5793, 27.3337, 1, 3, 0], [-82.5780, 27.3384, 1, 8, 0], [-82.6218, 27.3003, 1, 25, 0], [-82.6218, 27.3003, 8, 14, 0], [-82.5059, 27.0593, 1, 25, 0], [-82.5059, 27.0593, 10, 5, 0], [-82.5643, 27.1792, 1, 25, 0], [-82.5643, 27.1792, 8, 6, 0]], #12/15/2009, #12/15/2009
# (2009, 350, 3) : [[-97.0318, 27.8304, 3, 6, 0]], #12/16/2009
# (2009, 351, 3) : [[-97.0318, 27.8304, 3, 2, 0], [-97.0318, 27.8304, 3, 2, 2], [-97.0318, 27.8304, 3, 4, 4]], #12/17/2009
# (2009, 352, 3) : [[-97.0318, 27.8304, 3, 13, 4]], #12/18/2009
# (2009, 353, 3) : [[-97.0318, 27.8304, 3, 2, 2], [-97.0318, 27.8304, 3, 10, 4]], #12/19/2009
# (2009, 354, 3) : [[-97.0318, 27.8304, 3, 4, 0], [-97.0318, 27.8304, 3, 7, 4]], #12/20/2009
# (2009, 355, 3) : [[-97.0318, 27.8304, 3, 3, 0], [-97.0318, 27.8304, 3, 2, 2], [-97.0318, 27.8304, 3, 4, 3], [-82.3857, 26.9632, 1, 14, 0], [-82.4131, 27.0103, 1, 8, 0], [-82.4436, 27.0570, 1, 22, 0], [-82.4480, 27.0663, 1, 25, 0], [-82.4509, 27.0735, 1, 25, 0], [-82.4529, 27.0783, 1, 25, 0], [-82.4688, 27.1138, 1, 25, 0], [-82.5171, 27.2184, 1, 4, 0], [-82.5624, 27.3268, 1, 7, 0], [-97.0318, 27.8304, 3, 8, 4]], #12/21/2009, #12/21/2009
# (2009, 356, 3) : [[-97.0318, 27.8304, 3, 7, 0], [-97.0318, 27.8304, 3, 15, 3], [-82.5779, 27.3316, 1, 2, 0], [-82.3618, 26.9246, 1, 9, 0], [-82.2745, 26.8038, 1, 5, 0], [-81.4735, 25.6161, 1, 25, 0], [-97.0318, 27.8304, 3, 12, 4]], #12/22/2009, #12/22/2009
# (2009, 357, 3) : [[-97.0318, 27.8304, 3, 5, 0], [-97.0318, 27.8304, 3, 8, 3], [-97.0318, 27.8304, 3, 10, 4]], #12/23/2009
# (2009, 358, 3) : [[-97.0318, 27.8304, 3, 6, 3], [-82.2785, 26.8136, 1, 3, 0], [-82.2583, 26.7187, 1, 25, 0], [-82.2333, 26.7405, 1, 25, 0], [-82.2452, 26.7872, 1, 7, 0], [-82.2540, 26.8167, 1, 4, 0], [-97.0318, 27.8304, 3, 7, 4]], #12/24/2009, #12/24/2009
# (2009, 359, 3) : [[-97.0318, 27.8304, 3, 2, 0], [-97.0318, 27.8304, 3, 2, 2], [-97.0318, 27.8304, 3, 8, 3], [-97.0318, 27.8304, 3, 10, 4]], #12/25/2009
# (2009, 360, 3) : [[-97.0318, 27.8304, 3, 6, 0], [-97.0318, 27.8304, 3, 7, 2], [-97.0318, 27.8304, 3, 9, 3], [-97.0318, 27.8304, 3, 9, 4]], #12/26/2009
# (2009, 361, 3) : [[-97.0318, 27.8304, 3, 3, 2], [-97.0318, 27.8304, 3, 6, 3], [-97.0318, 27.8304, 3, 6, 4]], #12/27/2009
# (2009, 362, 3) : [[-97.0318, 27.8304, 3, 2, 0], [-97.0318, 27.8304, 3, 2, 2], [-97.0318, 27.8304, 3, 7, 3], [-97.0318, 27.8304, 3, 7, 4]], #12/28/2009
# (2009, 363, 3) : [[-97.0318, 27.8304, 3, 2, 0], [-97.0318, 27.8304, 3, 2, 2], [-97.0318, 27.8304, 3, 3, 3], [-97.0318, 27.8304, 3, 3, 4]], #12/29/2009
# (2009, 364, 3) : [[-97.0318, 27.8304, 3, 3, 3], [-97.0318, 27.8304, 3, 5, 4]], #12/30/2009
# (2009, 365, 3) : [[-97.0318, 27.8304, 3, 4, 0], [-97.0318, 27.8304, 3, 5, 3], [-97.0318, 27.8304, 3, 3, 4]], #12/31/2009
#
(2010, 1, 3) : [[-97.0318, 27.8304, 3, 5, 0], [-97.0318, 27.8304, 3, 7, 3], [-97.0318, 27.8304, 3, 3, 4]], #1/1/2010
(2010, 2, 3) : [[-97.0318, 27.8304, 3, 2, 0], [-97.0318, 27.8304, 3, 3, 3]],
(2010, 3, 3) : [[-97.0318, 27.8304, 3, 3, 3]],
(2010, 4, 3) : [[-97.0318, 27.8304, 3, 3, 3], [-97.0318, 27.8304, 3, 3, 4]],
(2010, 5, 3) : [[-97.0318, 27.8304, 3, 2, 0], [-97.0318, 27.8304, 3, 3, 3], [-97.0318, 27.8304, 3, 3, 4]], #1/5/2010
(2010, 6, 3) : [[-97.0318, 27.8304, 3, 8, 0], [-97.0318, 27.8304, 3, 2, 2], [-97.0318, 27.8304, 3, 24, 3], [-97.0318, 27.8304, 3, 5, 4]], #1/6/2010
(2010, 7, 3) : [[-97.0318, 27.8304, 3, 4, 0], [-97.0318, 27.8304, 3, 3, 3]],
(2010, 8, 3) : [[-97.0318, 27.8304, 3, 21, 3], [-97.0318, 27.8304, 3, 35, 4], [-97.0318, 27.8304, 3, 9, 5]],
(2010, 9, 3) : [],
(2010, 10, 3) : [],
(2010, 11, 3) : [],
(2010, 12, 3) : [[-97.0318, 27.8304, 3, 3, 0], [-97.0318, 27.8304, 3, 4, 2], [-97.0318, 27.8304, 3, 5, 3], [-97.0318, 27.8304, 3, 10, 4]],
(2010, 13, 3) : [[-97.0318, 27.8304, 3, 2, 0], [-97.0318, 27.8304, 3, 5, 2], [-97.0318, 27.8304, 3, 6, 3], [-97.0318, 27.8304, 3, 9, 4]],
(2010, 14, 3) : [[-97.0318, 27.8304, 3, 3, 2], [-97.0318, 27.8304, 3, 6, 3], [-97.0318, 27.8304, 3, 13, 4]],
(2010, 15, 3) : [[-97.0318, 27.8304, 3, 5, 2], [-97.0318, 27.8304, 3, 7, 3], [-97.0318, 27.8304, 3, 10, 4]],
(2010, 16, 3) : [[-97.0318, 27.8304, 3, 10, 2], [-97.0318, 27.8304, 3, 3, 3], [-97.0318, 27.8304, 3, 37, 4], [-97.0318, 27.8304, 3, 3, 5]],
(2010, 17, 3) : [[-97.0318, 27.8304, 3, 3, 2], [-97.0318, 27.8304, 3, 4, 3], [-97.0318, 27.8304, 3, 13, 4]],
(2010, 18, 3) : [[-97.0318, 27.8304, 3, 3, 3]],
(2010, 19, 3) : [[-97.0318, 27.8304, 3, 11, 4]],
(2010, 20, 3) : [[-97.0318, 27.8304, 3, 4, 3], [-97.0318, 27.8304, 3, 11, 4]],
(2010, 21, 3) : [[-97.0318, 27.8304, 3, 2, 2], [-97.0318, 27.8304, 3, 3, 3], [-97.0318, 27.8304, 3, 9, 4]], #1/21/2010
(2010, 22, 2) : [[-97.0318, 27.8304, 3, 4, 2]],
(2010, 23, 3) : [[-97.0318, 27.8304, 3, 7, 2], [-97.0318, 27.8304, 3, 6, 3], [-97.0318, 27.8304, 3, 12, 4]],
(2010, 24, 3) : [[-97.0318, 27.8304, 3, 3, 2], [-97.0318, 27.8304, 3, 5, 3], [-97.0318, 27.8304, 3, 6, 4]],
(2010, 25, 3) : [[-97.0318, 27.8304, 3, 4, 0], [-97.0318, 27.8304, 3, 3, 2], [-97.0318, 27.8304, 3, 3, 3], [-97.0318, 27.8304, 3, 3, 4]],
(2010, 26, 3) : [[-97.0318, 27.8304, 3, 2, 0], [-97.0318, 27.8304, 3, 4, 2], [-97.0318, 27.8304, 3, 4, 3], [-97.0318, 27.8304, 3, 4, 4]], #1/26/2010
(2010, 27, 3) : [[-97.0318, 27.8304, 3, 5, 2], [-97.0318, 27.8304, 3, 4, 3], [-97.0318, 27.8304, 3, 3, 4]],
(2010, 28, 3) : [[-97.0318, 27.8304, 3, 7, 2], [-97.0318, 27.8304, 3, 4, 3], [-97.0318, 27.8304, 3, 3, 4]], #1/28/2010
(2010, 29, 3) : [[-97.0318, 27.8304, 3, 5, 2], [-97.0318, 27.8304, 3, 6, 3], [-97.0318, 27.8304, 3, 2, 4]],
(2010, 30, 3) : [[-97.0318, 27.8304, 3, 13, 2], [-97.0318, 27.8304, 3, 12, 3], [-97.0318, 27.8304, 3, 8, 4]],
(2010, 31, 3) : [[-97.0318, 27.8304, 3, 3, 2], [-97.0318, 27.8304, 3, 6, 3], [-97.0318, 27.8304, 3, 3, 4]], #1/31/2010
(2010, 32, 3) : [[-97.0318, 27.8304, 3, 4, 4]], #2/1/2010
(2010, 33, 3) : [[-97.0318, 27.8304, 3, 3, 2], [-97.0318, 27.8304, 3, 4, 3]], #2/2/2010
(2010, 34, 3) : [[-97.0318, 27.8304, 3, 4, 2], [-97.0318, 27.8304, 3, 3, 3], [-97.0318, 27.8304, 3, 4, 4]],
(2010, 35, 3) : [[-97.0318, 27.8304, 3, 8, 2], [-97.0318, 27.8304, 3, 5, 3], [-97.0318, 27.8304, 3, 13, 4]], #2/4/2010
(2010, 36, 3) : [[-97.0318, 27.8304, 3, 6, 2], [-97.0318, 27.8304, 3, 3, 3], [-97.0318, 27.8304, 3, 32, 4]],
(2010, 37, 3) : [[-97.0318, 27.8304, 3, 26, 3], [-97.0318, 27.8304, 3, 2, 4]], #2/6/2010
(2010, 38, 3) : [[-97.0318, 27.8304, 3, 5, 2], [-97.0318, 27.8304, 3, 95, 3], [-97.0318, 27.8304, 3, 8, 4]],
(2010, 39, 3) : [[-97.0318, 27.8304, 3, 16, 2], [-97.0318, 27.8304, 3, 23, 3], [-97.0318, 27.8304, 3, 3, 4]], #2/8/2010
(2010, 40, 3) : [[-97.0318, 27.8304, 3, 32, 2], [-97.0318, 27.8304, 3, 13, 3], [-97.0318, 27.8304, 3, 3, 4]],
(2010, 41, 3) : [[-97.0318, 27.8304, 3, 7, 2], [-97.0318, 27.8304, 3, 13, 3]], #2/10/2010
(2010, 43, 3) : [[-97.0318, 27.8304, 3, 4, 2], [-97.0318, 27.8304, 3, 115, 3]], #2/12/2010
(2010, 46, 3) : [[-97.0318, 27.8304, 3, 6, 2]], #2/15/2010
(2010, 47, 3) : [[-97.0318, 27.8304, 3, 8, 2], [-97.0318, 27.8304, 3, 415, 3], [-97.0318, 27.8304, 3, 2, 4]], #2/16/2010
(2010, 48, 3) : [[-97.0318, 27.8304, 3, 11, 2], [-97.0318, 27.8304, 3, 60, 3], [-97.0318, 27.8304, 3, 4, 4]],
(2010, 49, 3) : [[-97.0318, 27.8304, 3, 20, 2], [-97.0318, 27.8304, 3, 160, 3], [-97.0318, 27.8304, 3, 2, 4]], #2/18/2010
(2010, 50, 3) : [[-97.0318, 27.8304, 3, 38, 2], [-97.0318, 27.8304, 3, 138, 3]],
(2010, 51, 3) : [[-97.0318, 27.8304, 3, 58, 2], [-97.0318, 27.8304, 3, 55, 3]], #2/20/2010
(2010, 52, 3) : [[-97.0318, 27.8304, 3, 11, 2], [-97.0318, 27.8304, 3, 43, 3], [-97.0318, 27.8304, 3, 2, 4]],
(2010, 53, 3) : [[-97.0318, 27.8304, 3, 20, 2], [-97.0318, 27.8304, 3, 44, 3]], #2/22/2010
(2010, 54, 3) : [[-97.0318, 27.8304, 3, 18, 2], [-97.0318, 27.8304, 3, 22, 3]],
(2010, 55, 3) : [[-97.0318, 27.8304, 3, 21, 2], [-97.0318, 27.8304, 3, 98, 3]], #2/24/2010
(2010, 56, 3) : [[-97.0318, 27.8304, 3, 59, 2], [-97.0318, 27.8304, 3, 64, 3]],
(2010, 57, 3) : [[-97.0318, 27.8304, 3, 36, 2], [-97.0318, 27.8304, 3, 11, 3]], #2/26/2010
(2010, 58, 3) : [[-97.0318, 27.8304, 3, 51, 2], [-97.0318, 27.8304, 3, 59, 3]],
(2010, 59, 3) : [[-97.0318, 27.8304, 3, 97, 2], [-97.0318, 27.8304, 3, 46, 3]], #2/28/2010
(2010, 60, 3) : [[-97.0318, 27.8304, 3, 34, 2], [-97.0318, 27.8304, 3, 46, 3]],
(2010, 61, 3) : [[-97.0318, 27.8304, 3, 46, 2], [-97.0318, 27.8304, 3, 64, 3]], #3/2/2010
(2010, 62, 3) : [[-97.0318, 27.8304, 3, 19, 2], [-97.0318, 27.8304, 3, 72, 3], [-97.0318, 27.8304, 3, 4, 4]], #3/3/2010
(2010, 63, 3) : [[-97.0318, 27.8304, 3, 55, 2], [-97.0318, 27.8304, 3, 72, 3]], #3/4/2010
(2010, 64, 3) : [[-97.0318, 27.8304, 3, 68, 2], [-97.0318, 27.8304, 3, 51, 3]],
(2010, 65, 3) : [[-97.0318, 27.8304, 3, 169, 2], [-97.0318, 27.8304, 3, 26, 3]], #3/6/2010
(2010, 66, 3) : [[-97.0318, 27.8304, 3, 145, 2], [-97.0318, 27.8304, 3, 90, 3], [-97.0318, 27.8304, 3, 1, 1]],
(2010, 67, 3) : [[-97.0318, 27.8304, 3, 28, 2]], #3/8/2010
(2010, 68, 3) : [[-97.0318, 27.8304, 3, 55, 2], [-97.0318, 27.8304, 3, 6, 3]],
(2010, 69, 3) : [[-97.0318, 27.8304, 3, 49, 2], [-97.0318, 27.8304, 3, 5, 3]], #3/10/2010
(2010, 70, 3) : [[-97.0318, 27.8304, 3, 68, 2]],
(2010, 71, 3) : [[-97.0318, 27.8304, 3, 205, 2], [-97.0318, 27.8304, 3, 3, 3], [-97.0318, 27.8304, 3, 1, 1]], #3/12/2010
(2010, 72, 3) : [[-97.0318, 27.8304, 3, 109, 2], [-97.0318, 27.8304, 3, 6, 3]],
(2010, 73, 3) : [[-97.0318, 27.8304, 3, 478, 2], [-97.0318, 27.8304, 3, 7, 3], [-97.0318, 27.8304, 3, 2, 1]], #3/14/2010
(2010, 74, 3) : [[-97.0318, 27.8304, 3, 754, 2], [-97.0318, 27.8304, 3, 3, 1]],
(2010, 75, 3) : [[-97.0318, 27.8304, 3, 165, 2], [-97.0318, 27.8304, 3, 4, 3], [-97.0318, 27.8304, 3, 1, 1]], #3/16/2010
(2010, 76, 3) : [[-97.0318, 27.8304, 3, 65, 2], [-97.0318, 27.8304, 3, 5, 3], [-97.0318, 27.8304, 3, 1, 1]],
(2010, 77, 3) : [[-97.0318, 27.8304, 3, 49, 2], [-97.0318, 27.8304, 3, 4, 3], [-97.0318, 27.8304, 3, 1, 1]], #3/18/2010
(2010, 78, 3) : [[-97.0318, 27.8304, 3, 586, 2], [-97.0318, 27.8304, 3, 12, 1]],
(2010, 79, 3) : [[-97.0318, 27.8304, 3, 204, 2], [-97.0318, 27.8304, 3, 5, 1]], #3/20/2010
(2010, 80, 3) : [[-97.0318, 27.8304, 3, 122, 2], [-97.0318, 27.8304, 3, 3, 4], [-97.0318, 27.8304, 3, 3, 1]], #3/21/2010
(2010, 81, 3) : [[-97.0318, 27.8304, 3, 297, 2], [-97.0318, 27.8304, 3, 12, 1]], #3/22/2010
(2010, 82, 3) : [[-97.0318, 27.8304, 3, 95, 2], [-97.0318, 27.8304, 3, 8, 1]],
(2010, 83, 3) : [[-97.0318, 27.8304, 3, 32, 2]], #3/24/2010
(2010, 84, 3) : [[-97.0318, 27.8304, 3, 15, 2]],
(2010, 85, 3) : [[-97.0318, 27.8304, 3, 35, 2]], #3/26/2010
(2010, 86, 3) : [[-97.0318, 27.8304, 3, 20, 2]],
(2010, 87, 3) : [[-97.0318, 27.8304, 3, 18, 2]], #3/28/2010
(2010, 88, 3) : [[-97.0318, 27.8304, 3, 18, 2]],
(2010, 89, 3) : [[-97.0318, 27.8304, 3, 10, 2]], #3/30/2010
(2010, 90, 3) : [[-97.0318, 27.8304, 3, 8, 2], [-97.0318, 27.8304, 3, 2, 3], [-97.0318, 27.8304, 3, 2, 4]], #3/31/2010
(2010, 91, 3) : [[-97.0318, 27.8304, 3, 10, 2], [-97.0318, 27.8304, 3, 2, 3], [-97.0318, 27.8304, 3, 5, 4]], #4/1/2010
(2010, 92, 3) : [[-97.0318, 27.8304, 3, 9, 2], [-97.0318, 27.8304, 3, 2, 3], [-97.0318, 27.8304, 3, 5, 4]],
(2010, 93, 3) : [[-97.0318, 27.8304, 3, 18, 2], [-97.0318, 27.8304, 3, 5, 4], [-97.0318, 27.8304, 3, 3, 5]], #4/3/2010
(2010, 94, 3) : [[-97.0318, 27.8304, 3, 23, 2], [-97.0318, 27.8304, 3, 2, 4]],
(2010, 95, 3) : [[-97.0318, 27.8304, 3, 10, 2], [-97.0318, 27.8304, 3, 3, 4]], #4/5/2010
(2010, 96, 3) : [[-97.0318, 27.8304, 3, 2, 2], [-97.0318, 27.8304, 3, 2, 4]],
(2010, 97, 3) : [[-97.0318, 27.8304, 3, 6, 2], [-97.0318, 27.8304, 3, 3, 3], [-97.0318, 27.8304, 3, 5, 4], [-97.0318, 27.8304, 3, 3, 5]], #4/7/2010
(2010, 98, 3) : [[-97.0318, 27.8304, 3, 5, 2], [-97.0318, 27.8304, 3, 4, 4]], #4/8/2010
(2010, 99, 3) : [[-97.0318, 27.8304, 3, 10, 2], [-97.0318, 27.8304, 3, 9, 4]], #4/9/2010
(2010, 100, 3) : [[-97.0318, 27.8304, 3, 3, 2], [-97.0318, 27.8304, 3, 12, 4]], #4/10/2010
(2010, 101, 3) : [[-97.0318, 27.8304, 3, 3, 2], [-97.0318, 27.8304, 3, 3, 3], [-97.0318, 27.8304, 3, 16, 4]],
(2010, 102, 3) : [[-97.0318, 27.8304, 3, 2, 2], [-97.0318, 27.8304, 3, 7, 3], [-97.0318, 27.8304, 3, 12, 4]], #4/12/2010
(2010, 103, 3) : [[-97.0318, 27.8304, 3, 2, 2], [-97.0318, 27.8304, 3, 10, 4]],
(2010, 104, 3) : [[-97.0318, 27.8304, 3, 4, 3], [-97.0318, 27.8304, 3, 3, 4]], #4/14/2010
(2010, 105, 3) : [[-97.0318, 27.8304, 3, 3, 3]], #4/15/2010
(2010, 106, 3) : [[-97.0318, 27.8304, 3, 6, 2], [-97.0318, 27.8304, 3, 5, 1]], #4/16/2010
(2010, 107, 3) : [[-97.0318, 27.8304, 3, 5, 2], [-97.0318, 27.8304, 3, 4, 4], [-97.0318, 27.8304, 3, 6, 1]], #4/17/2010
(2010, 108, 3) : [[-97.0318, 27.8304, 3, 2, 2], [-97.0318, 27.8304, 3, 2, 4], [-97.0318, 27.8304, 3, 2, 5], [-97.0318, 27.8304, 3, 10, 1]], #4/18/2010
(2010, 109, 3) : [[-97.0318, 27.8304, 3, 5, 2], [-97.0318, 27.8304, 3, 2, 4], [-97.0318, 27.8304, 3, 7, 1]], #4/19/2010
(2010, 110, 3) : [[-97.0318, 27.8304, 3, 7, 1]], #4/20/2010
# (2010, 111, 3) : [[-97.0318, 27.8304, 3, 3, 1]], #4/21/2010
# (2010, 112, 3) : [[-97.0318, 27.8304, 3, 4, 1]], #4/22/2010
# (2010, 113, 3) : [[-97.0318, 27.8304, 3, 11, 1]], #4/23/2010
# (2010, 114, 3) : [[-97.0318, 27.8304, 3, 3, 1]], #4/24/2010
# (2010, 115, 3) : [[-97.0318, 27.8304, 3, 1, 1]], #4/25/2010
# (2010, 116, 3) : [[-97.0318, 27.8304, 3, 6, 4], [-97.0318, 27.8304, 3, 3, 5], [-97.0318, 27.8304, 3, 2, 1]], #4/26/2010
# (2010, 117, 3) : [[-97.0318, 27.8304, 3, 12, 4], [-97.0318, 27.8304, 3, 4, 5], [-97.0318, 27.8304, 3, 1, 1]],
# (2010, 118, 3) : [[-97.0318, 27.8304, 3, 18, 4], [-97.0318, 27.8304, 3, 6, 5], [-97.0318, 27.8304, 3, 1, 1]], #4/28/2010
# (2010, 119, 3) : [[-97.0318, 27.8304, 3, 17, 4], [-97.0318, 27.8304, 3, 4, 5]],
# (2010, 120, 3) : [[-97.0318, 27.8304, 3, 8, 4], [-97.0318, 27.8304, 3, 3, 5]],
# (2010, 132, 3) : [[-97.0318, 27.8304, 3, 3, 5]],
# (2010, 133, 3) : [[-97.0318, 27.8304, 3, 51, 4], [-97.0318, 27.8304, 3, 4, 5]],
# (2010, 134, 3) : [[-97.0318, 27.8304, 3, 48, 4], [-97.0318, 27.8304, 3, 6, 5]],
# (2010, 135, 3) : [[-97.0318, 27.8304, 3, 57, 4], [-97.0318, 27.8304, 3, 3, 5]],
# (2010, 136, 3) : [[-97.0318, 27.8304, 3, 30, 4], [-97.0318, 27.8304, 3, 3, 5]],
# (2010, 137, 3) : [[-97.0318, 27.8304, 3, 24, 4]],
# (2010, 138, 3) : [[-97.0318, 27.8304, 3, 11, 4]],
# (2010, 139, 3) : [[-97.0318, 27.8304, 3, 11, 4]],
# (2010, 140, 3) : [[-97.0318, 27.8304, 3, 2, 4]],
# (2010, 141, 3) : [[-97.0318, 27.8304, 3, 14, 4]],
# (2010, 142, 3) : [[-97.0318, 27.8304, 3, 26, 4], [-97.0318, 27.8304, 3, 2, 5]],
# (2010, 143, 3) : [[-97.0318, 27.8304, 3, 14, 4], [-97.0318, 27.8304, 3, 4, 5]],
# (2010, 144, 3) : [[-97.0318, 27.8304, 3, 16, 4], [-97.0318, 27.8304, 3, 5, 5]],
# (2010, 145, 3) : [[-97.0318, 27.8304, 3, 12, 4], [-97.0318, 27.8304, 3, 6, 5]],
# (2010, 147, 3) : [[-97.0318, 27.8304, 3, 6, 4], [-97.0318, 27.8304, 3, 5, 5]],
# (2010, 152, 3) : [[-97.0318, 27.8304, 3, 2, 5]],
# (2010, 153, 3) : [[-97.0318, 27.8304, 3, 3, 4], [-97.0318, 27.8304, 3, 3, 5]],
# (2010, 154, 3) : [[-97.0318, 27.8304, 3, 3, 4], [-97.0318, 27.8304, 3, 3, 5]],
# (2010, 155, 3) : [[-97.0318, 27.8304, 3, 3, 4], [-97.0318, 27.8304, 3, 9, 5]],
# (2010, 156, 3) : [[-97.0318, 27.8304, 3, 3, 4], [-97.0318, 27.8304, 3, 5, 5]],
# (2010, 157, 3) : [[-97.0318, 27.8304, 3, 4, 4], [-97.0318, 27.8304, 3, 7, 5]],
# (2010, 158, 3) : [[-97.0318, 27.8304, 3, 9, 4], [-97.0318, 27.8304, 3, 10, 5]],
# (2010, 159, 3) : [[-97.0318, 27.8304, 3, 7, 4], [-97.0318, 27.8304, 3, 16, 5]],
# (2010, 160, 3) : [[-97.0318, 27.8304, 3, 13, 4], [-97.0318, 27.8304, 3, 41, 5]],
# (2010, 161, 3) : [[-97.0318, 27.8304, 3, 17, 4], [-97.0318, 27.8304, 3, 26, 5]],
# (2010, 162, 3) : [[-97.0318, 27.8304, 3, 18, 4], [-97.0318, 27.8304, 3, 24, 5]],
# (2010, 163, 3) : [[-97.0318, 27.8304, 3, 18, 4], [-97.0318, 27.8304, 3, 33, 5]],
# (2010, 164, 3) : [[-97.0318, 27.8304, 3, 14, 4], [-97.0318, 27.8304, 3, 23, 5]],
# (2010, 165, 3) : [[-97.0318, 27.8304, 3, 19, 4], [-97.0318, 27.8304, 3, 25, 5]],
# (2010, 166, 3) : [[-97.0318, 27.8304, 3, 26, 4], [-97.0318, 27.8304, 3, 21, 5]],
# (2010, 167, 3) : [[-97.0318, 27.8304, 3, 10, 4], [-97.0318, 27.8304, 3, 11, 5]],
# (2010, 168, 3) : [[-97.0318, 27.8304, 3, 11, 4], [-97.0318, 27.8304, 3, 11, 5]],
# (2010, 169, 3) : [[-97.0318, 27.8304, 3, 3, 4], [-97.0318, 27.8304, 3, 4, 5]],
# (2010, 170, 3) : [[-97.0318, 27.8304, 3, 6, 4], [-97.0318, 27.8304, 3, 6, 5]],
# (2010, 171, 3) : [[-97.0318, 27.8304, 3, 4, 4], [-97.0318, 27.8304, 3, 6, 5]],
# (2010, 172, 3) : [[-97.0318, 27.8304, 3, 2, 4], [-97.0318, 27.8304, 3, 5, 5]],
# (2010, 173, 3) : [[-97.0318, 27.8304, 3, 3, 4], [-97.0318, 27.8304, 3, 3, 5]],
# (2010, 174, 3) : [[-97.0318, 27.8304, 3, 6, 4], [-97.0318, 27.8304, 3, 5, 5]],
# (2010, 175, 3) : [[-97.0318, 27.8304, 3, 6, 4], [-97.0318, 27.8304, 3, 5, 5]],
# (2010, 176, 3) : [[-97.0318, 27.8304, 3, 5, 4], [-97.0318, 27.8304, 3, 6, 5]],
# (2010, 177, 3) : [[-97.0318, 27.8304, 3, 5, 4], [-97.0318, 27.8304, 3, 3, 5]],
# (2010, 178, 3) : [[-97.0318, 27.8304, 3, 4, 4], [-97.0318, 27.8304, 3, 4, 5]],
# (2010, 179, 3) : [[-97.0318, 27.8304, 3, 4, 4], [-97.0318, 27.8304, 3, 4, 5]],
# (2010, 180, 3) : [[-97.0318, 27.8304, 3, 3, 4], [-97.0318, 27.8304, 3, 6, 5]],
# (2010, 182, 3) : [[-97.0318, 27.8304, 3, 33, 4], [-97.0318, 27.8304, 3, 11, 5]],
# (2010, 183, 3) : [[-97.0318, 27.8304, 3, 16, 4], [-97.0318, 27.8304, 3, 48, 5]],
# (2010, 184, 3) : [[-97.0318, 27.8304, 3, 9, 4], [-97.0318, 27.8304, 3, 35, 5]],
# (2010, 185, 3) : [[-97.0318, 27.8304, 3, 6, 4], [-97.0318, 27.8304, 3, 103, 5]],
# (2010, 186, 3) : [[-97.0318, 27.8304, 3, 5, 4], [-97.0318, 27.8304, 3, 83, 5]],
# (2010, 187, 3) : [[-97.0318, 27.8304, 3, 5, 4], [-97.0318, 27.8304, 3, 3, 5]],
# (2010, 188, 3) : [[-97.0318, 27.8304, 3, 8, 4], [-97.0318, 27.8304, 3, 29, 5]],
# (2010, 189, 3) : [[-97.0318, 27.8304, 3, 5, 4], [-97.0318, 27.8304, 3, 15, 5]],
# (2010, 190, 3) : [[-97.0318, 27.8304, 3, 7, 4], [-97.0318, 27.8304, 3, 44, 5]], #7/9/2010
# (2010, 191, 3) : [[-97.0318, 27.8304, 3, 9, 4], [-97.0318, 27.8304, 3, 36, 5]],
# (2010, 192, 3) : [[-97.0318, 27.8304, 3, 4, 4], [-97.0318, 27.8304, 3, 21, 5]],
# (2010, 193, 3) : [[-97.0318, 27.8304, 3, 3, 4], [-97.0318, 27.8304, 3, 24, 5]],
# (2010, 194, 3) : [[-97.0318, 27.8304, 3, 8, 4], [-97.0318, 27.8304, 3, 20, 5]],
# (2010, 195, 3) : [[-97.0318, 27.8304, 3, 31, 4], [-97.0318, 27.8304, 3, 14, 5]],
# (2010, 196, 3) : [[-97.0318, 27.8304, 3, 16, 4], [-97.0318, 27.8304, 3, 10, 5]],
# (2010, 197, 3) : [[-97.0318, 27.8304, 3, 3, 4], [-97.0318, 27.8304, 3, 9, 5]],
# (2010, 198, 3) : [[-97.0318, 27.8304, 3, 5, 4], [-97.0318, 27.8304, 3, 14, 5]],
# (2010, 199, 3) : [[-97.0318, 27.8304, 3, 3, 4], [-97.0318, 27.8304, 3, 6, 5]],
# (2010, 200, 3) : [[-97.0318, 27.8304, 3, 3, 5]],
# (2010, 201, 3) : [[-97.0318, 27.8304, 3, 3, 5]],
# (2010, 202, 3) : [],
# (2010, 203, 3) : [],
# (2010, 204, 3) : [[-97.0318, 27.8304, 3, 5, 5]],
# (2010, 205, 3) : [[-97.0318, 27.8304, 3, 4, 4, [-97.0318, 27.8304, 3, 7, 5]]],
# (2010, 206, 3) : [[-97.0318, 27.8304, 3, 2, 4], [-97.0318, 27.8304, 3, 6, 5]],
# (2010, 207, 3) : [[-97.0318, 27.8304, 3, 2, 5]],
# (2010, 208, 3) : [[-97.0318, 27.8304, 3, 3, 4], [-97.0318, 27.8304, 3, 4, 5]],
# (2010, 209, 3) : [[-97.0318, 27.8304, 3, 5, 5]],
# (2010, 210, 3) : [[-97.0318, 27.8304, 3, 8, 4], [-97.0318, 27.8304, 3, 11, 5]],
# (2010, 211, 3) : [[-97.0318, 27.8304, 3, 6, 4], [-97.0318, 27.8304, 3, 15, 5]],
# (2010, 212, 3) : [[-97.0318, 27.8304, 3, 5, 4], [-97.0318, 27.8304, 3, 12, 5]],
# (2010, 213, 3) : [[-97.0318, 27.8304, 3, 4, 4], [-97.0318, 27.8304, 3, 16, 5]], #8/1/2010
# (2010, 214, 3) : [[-97.0318, 27.8304, 3, 2, 4], [-97.0318, 27.8304, 3, 11, 5]],
# (2010, 215, 3) : [[-97.0318, 27.8304, 3, 7, 4], [-97.0318, 27.8304, 3, 23, 5]],
# (2010, 216, 3) : [[-97.0318, 27.8304, 3, 4, 4], [-97.0318, 27.8304, 3, 11, 5]],
# (2010, 217, 3) : [[-97.0318, 27.8304, 3, 3, 4], [-97.0318, 27.8304, 3, 6, 5]],
# (2010, 218, 3) : [[-97.0318, 27.8304, 3, 4, 5]],
# (2010, 219, 3) : [[-97.0318, 27.8304, 3, 7, 5]],
# (2010, 220, 3) : [[-97.0318, 27.8304, 3, 4, 4], [-97.0318, 27.8304, 3, 12, 5]], #8/8/2010
# (2010, 221, 3) : [[-97.0318, 27.8304, 3, 4, 4], [-97.0318, 27.8304, 3, 17, 5]],
# (2010, 222, 3) : [[-97.0318, 27.8304, 3, 5, 4], [-97.0318, 27.8304, 3, 11, 5]],
# (2010, 223, 3) : [[-97.0318, 27.8304, 3, 4, 4], [-97.0318, 27.8304, 3, 16, 5]],
# (2010, 224, 3) : [[-97.0318, 27.8304, 3, 3, 5]],
# (2010, 225, 3) : [],
# (2010, 226, 3) : [[-97.0318, 27.8304, 3, 4, 5]],
# (2010, 227, 3) : [[-97.0318, 27.8304, 3, 5, 4], [-97.0318, 27.8304, 3, 15, 5]],
# (2010, 228, 3) : [[-97.0318, 27.8304, 3, 4, 4], [-97.0318, 27.8304, 3, 10, 5]],
# (2010, 229, 3) : [[-97.0318, 27.8304, 3, 3, 4], [-97.0318, 27.8304, 3, 16, 5]],
# (2010, 230, 3) : [[-97.0318, 27.8304, 3, 10, 5]],
# (2010, 231, 3) : [[-97.0318, 27.8304, 3, 8, 5]],
# (2010, 232, 3) : [[-97.0318, 27.8304, 3, 10, 5]],
# (2010, 233, 3) : [],
# (2010, 234, 3) : [],
# (2010, 235, 3) : [[-97.0318, 27.8304, 3, 15, 5]],
# (2010, 236, 3) : [[-97.0318, 27.8304, 3, 2, 4], [-97.0318, 27.8304, 3, 15, 5]], #8/24/2010
# (2010, 237, 3) : [[-97.0318, 27.8304, 3, 10, 5]],
# (2010, 242, 3) : [[-97.0318, 27.8304, 3, 2, 5]],
# (2010, 243, 3) : [[-97.0318, 27.8304, 3, 2, 5]],
# (2010, 245, 3) : [[-97.0318, 27.8304, 3, 2, 5]],
# (2010, 248, 3) : [[-97.0318, 27.8304, 3, 3, 4]],
# (2010, 249, 3) : [[-97.0318, 27.8304, 3, 6, 5]],
# (2010, 250, 3) : [[-97.0318, 27.8304, 3, 14, 5]],
# (2010, 251, 3) : [[-97.0318, 27.8304, 3, 2, 5]],
# (2010, 252, 3) : [[-97.0318, 27.8304, 3, 3, 4]],
# (2010, 253, 3) : [[-97.0318, 27.8304, 3, 8, 4]],
# (2010, 254, 3) : [[-97.0318, 27.8304, 3, 12, 4]],
# (2010, 256, 3) : [[-97.0318, 27.8304, 3, 2, 5]],
# (2010, 261, 3) : [[-97.0318, 27.8304, 3, 4, 5]],
# (2010, 262, 3) : [[-97.0318, 27.8304, 3, 3, 5]],
# (2010, 263, 3) : [[-97.0318, 27.8304, 3, 5, 5]],
# (2010, 266, 3) : [[-97.0318, 27.8304, 3, 3, 5]],
# (2010, 267, 3) : [[-97.0318, 27.8304, 3, 5, 5]],
# (2010, 285, 3) : [[-97.0318, 27.8304, 3, 3, 5]],
# (2010, 286, 3) : [[-97.0318, 27.8304, 3, 5, 5]],
# (2010, 296, 3) : [[-97.0318, 27.8304, 3, 2, 5]],
# (2010, 299, 3) : [[-97.0318, 27.8304, 3, 3, 5]],
#
# (2010, 308, 3) : [[-97.0318, 27.8304, 3, 2, 4], [-97.0318, 27.8304, 3, 4, 5]], #11/4/2010
# (2010, 335, 3) : [[-97.0318, 27.8304, 3, 3, 5]],
# (2010, 339, 3) : [[-97.0318, 27.8304, 3, 2, 4]],
# (2010, 340, 3) : [[-97.0318, 27.8304, 3, 3, 4]], #12/6/2010
# (2010, 346, 3) : [[-97.0318, 27.8304, 3, 4, 4]],
# (2010, 347, 3) : [[-97.0318, 27.8304, 3, 5, 4]],
# (2010, 350, 3) : [[-97.0318, 27.8304, 3, 9, 4]],
# (2010, 351, 3) : [[-97.0318, 27.8304, 3, 8, 4]],
# (2010, 352, 3) : [[-97.0318, 27.8304, 3, 2, 4]], #12/18/2010
(2011, 14, 3) : [[-97.0318, 27.8304, 3, 3, 4]],
(2011, 15, 3) : [[-97.0318, 27.8304, 3, 4, 4]],
(2011, 16, 3) : [[-97.0318, 27.8304, 3, 2, 4]],
(2011, 17, 3) : [[-97.0318, 27.8304, 3, 4, 4]],
(2011, 19, 3) : [[-97.0318, 27.8304, 3, 2, 4]],
(2011, 20, 3) : [[-97.0318, 27.8304, 3, 2, 4]],
(2011, 21, 3) : [[-97.0318, 27.8304, 3, 5, 4], [-97.0318, 27.8304, 3, 2, 5]],
(2011, 23, 3) : [[-97.0318, 27.8304, 3, 4, 4]],
(2011, 25, 3) : [[-97.0318, 27.8304, 3, 3, 4]], #1/25/2011
(2011, 26, 3) : [[-97.0318, 27.8304, 3, 2, 4]],
(2011, 57, 3) : [[-97.0318, 27.8304, 3, 1, 1]], #2/26/2011
(2011, 58, 3) : [[-97.0318, 27.8304, 3, 1, 1]],
(2011, 59, 3) : [[-97.0318, 27.8304, 3, 2, 4], [-97.0318, 27.8304, 3, 1, 1]], #2/28/2011
(2011, 60, 3) : [[-97.0318, 27.8304, 3, 1, 1]], #3/1/2011
(2011, 61, 3) : [[-97.0318, 27.8304, 3, 1, 1]], #3/2/2011
(2011, 62, 3) : [[-97.0318, 27.8304, 3, 4, 1]], #3/3/2011
(2011, 63, 3) : [[-97.0318, 27.8304, 3, 7, 1]], #3/4/2011
(2011, 64, 3) : [[-97.0318, 27.8304, 3, 8, 1]], #3/5/2011
(2011, 65, 3) : [[-97.0318, 27.8304, 3, 8, 4], [-97.0318, 27.8304, 3, 3, 5], [-97.0318, 27.8304, 3, 2, 1]], #3/6/2011
(2011, 66, 3) : [[-97.0318, 27.8304, 3, 3, 4], [-97.0318, 27.8304, 3, 3, 1]], #3/7/2011
(2011, 67, 3) : [[-97.0318, 27.8304, 3, 5, 1]], #3/8/2011
(2011, 68, 3) : [[-97.0318, 27.8304, 3, 6, 1]], #3/9/2011
(2011, 69, 3) : [[-97.0318, 27.8304, 3, 3, 1]], #3/10/2011
(2011, 70, 3) : [[-97.0318, 27.8304, 3, 4, 1]], #3/11/2011
(2011, 71, 3) : [[-97.0318, 27.8304, 3, 6, 1]], #3/12/2011
(2011, 72, 3) : [[-97.0318, 27.8304, 3, 5, 1]], #3/13/2011
(2011, 73, 3) : [[-97.0318, 27.8304, 3, 5, 1]], #3/14/2011
(2011, 74, 3) : [[-97.0318, 27.8304, 3, 5, 1], [-97.0318, 27.8304, 3, 1, 2]], #3/15/2011
(2011, 75, 3) : [[-97.0318, 27.8304, 3, 8, 1], [-97.0318, 27.8304, 3, 1, 2]], #3/16/2011
(2011, 76, 3) : [[-97.0318, 27.8304, 3, 11, 1], [-97.0318, 27.8304, 3, 1, 2]], #3/17/2011
(2011, 77, 3) : [[-97.0318, 27.8304, 3, 7, 1], [-97.0318, 27.8304, 3, 1, 2]], #3/18/2011
(2011, 78, 3) : [[-97.0318, 27.8304, 3, 6, 1], [-97.0318, 27.8304, 3, 1, 2]], #3/19/2011
(2011, 79, 3) : [[-97.0318, 27.8304, 3, 6, 4], [-97.0318, 27.8304, 3, 4, 1], [-97.0318, 27.8304, 3, 1, 2]], #3/20/2011
(2011, 80, 3) : [[-97.0318, 27.8304, 3, 5, 4], [-97.0318, 27.8304, 3, 3, 1], [-97.0318, 27.8304, 3, 1, 2]], #3/21/2011
(2011, 81, 3) : [[-97.0318, 27.8304, 3, 20, 4], [-97.0318, 27.8304, 3, 2, 1], [-97.0318, 27.8304, 3, 1, 2]], #3/22/2011
(2011, 82, 3) : [[-97.0318, 27.8304, 3, 7, 4], [-97.0318, 27.8304, 3, 2, 1], [-97.0318, 27.8304, 3, 1, 2]], #3/23/2011
(2011, 83, 3) : [[-97.0318, 27.8304, 3, 4, 4], [-97.0318, 27.8304, 3, 2, 1], [-97.0318, 27.8304, 3, 2, 2]], #3/24/2011
(2011, 84, 3) : [[-97.0318, 27.8304, 3, 1, 1], [-97.0318, 27.8304, 3, 2, 2]], #3/25/2011
(2011, 85, 3) : [[-97.0318, 27.8304, 3, 1, 1], [-97.0318, 27.8304, 3, 2, 2]], #3/26/2011
(2011, 86, 3) : [[-97.0318, 27.8304, 3, 1, 1], [-97.0318, 27.8304, 3, 2, 2]], #3/27/2011
(2011, 87, 3) : [[-97.0318, 27.8304, 3, 1, 1], [-97.0318, 27.8304, 3, 5, 2]], #3/28/2011
(2011, 88, 3) : [[-97.0318, 27.8304, 3, 1, 1], [-97.0318, 27.8304, 3, 7, 2]], #3/29/2011
(2011, 89, 3) : [[-97.0318, 27.8304, 3, 1, 1], [-97.0318, 27.8304, 3, 4, 2]], #3/30/2011
(2011, 90, 3) : [[-97.0318, 27.8304, 3, 1, 1], [-97.0318, 27.8304, 3, 4, 2]], #3/31/2011
(2011, 91, 3) : [[-97.0318, 27.8304, 3, 2, 4], [-97.0318, 27.8304, 3, 15, 2]], #4/1/2011
(2011, 92, 3) : [[-97.0318, 27.8304, 3, 3, 4], [-97.0318, 27.8304, 3, 4, 2]], #4/2/2011
(2011, 97, 3) : [[-97.0318, 27.8304, 3, 15, 4], [-97.0318, 27.8304, 3, 3, 5], [-97.0318, 27.8304, 3, 1, 2]], #4/7/2011
(2011, 98, 3) : [[-97.0318, 27.8304, 3, 3, 4], [-97.0318, 27.8304, 3, 1, 2]], #4/8/2011
# (2011, 99, 3) : [[-97.0318, 27.8304, 3, 5, 4]],
# (2011, 100, 3) : [[-97.0318, 27.8304, 3, 11, 4]],
# (2011, 101, 3) : [[-97.0318, 27.8304, 3, 3, 4]],
# (2011, 102, 3) : [[-97.0318, 27.8304, 3, 6, 4]],
# (2011, 111, 3) : [[-97.0318, 27.8304, 3, 8, 4]],
# (2011, 113, 3) : [[-97.0318, 27.8304, 3, 14, 4]],
# (2011, 114, 3) : [[-97.0318, 27.8304, 3, 16, 4]], #4/24/2011
# (2011, 117, 3) : [[-97.0318, 27.8304, 3, 10, 4]],
# (2011, 121, 3) : [[-97.0318, 27.8304, 3, 5, 4]],
# (2011, 122, 3) : [[-97.0318, 27.8304, 3, 42, 4]],
# (2011, 123, 3) : [[-97.0318, 27.8304, 3, 9, 4]],
# (2011, 124, 3) : [[-97.0318, 27.8304, 3, 11, 4]],
# (2011, 125, 3) : [[-97.0318, 27.8304, 3, 9, 4]],
# (2011, 126, 3) : [[-97.0318, 27.8304, 3, 2, 4]],
# (2011, 130, 3) : [[-97.0318, 27.8304, 3, 5, 4], [-97.0318, 27.8304, 3, 3, 5]],
# (2011, 131, 3) : [[-97.0318, 27.8304, 3, 5, 4]],
# (2011, 133, 3) : [[-97.0318, 27.8304, 3, 5, 4]], #5/13/2011
# (2011, 146, 3) : [[-97.0318, 27.8304, 3, 7, 4]], #5/26/2011
# (2011, 147, 3) : [[-97.0318, 27.8304, 3, 2, 4], [-97.0318, 27.8304, 3, 2, 5]],
# (2011, 148, 3) : [[-97.0318, 27.8304, 3, 3, 5]],
# (2011, 149, 3) : [[-97.0318, 27.8304, 3, 3, 4], [-97.0318, 27.8304, 3, 5, 5]], #5/29/2011
# (2011, 150, 3) : [[-97.0318, 27.8304, 3, 4, 4], [-97.0318, 27.8304, 3, 3, 5]], #5/30/2011
# (2011, 153, 3) : [[-97.0318, 27.8304, 3, 3, 5]],
# (2011, 167, 3) : [[-97.0318, 27.8304, 3, 3, 5]],
# (2011, 169, 3) : [[-97.0318, 27.8304, 3, 2, 5]],
# (2011, 181, 3) : [[-97.0318, 27.8304, 3, 3, 5]],
#
# (2011, 222, 3) : [[-89.013, 21.44, 1, 7, 0], [-89.05, 21.44, 1, 22, 0], [-89.11, 21.44, 1, 15, 0]], #8/10/2011 (from Yucatan, Mexico), #8/10/2011
# (2011, 232, 3) : [[-89.013, 21.44, 1, 7, 0], [-89.05, 21.44, 1, 22, 0], [-89.11, 21.44, 1, 15, 0]], #8/20/2011 (from Yucatan, Mexico), #8/20/2011
# (2011, 242, 3) : [[-89.013, 21.44, 1, 7, 0], [-89.05, 21.44, 1, 22, 0], [-89.11, 21.44, 1, 15, 0]], #8/30/2011 (from Yucatan, Mexico), #8/30/2011
# (2011, 258, 3) : [[-97.337806,25.974583,1,25, 0],[-97.332194,25.977028,1,25, 0],[-97.325,25.978333,1,25, 0],[-97.1575,26.0755556,1,7, 0],[-97.156331,26.068453,1,2, 0], [-90.56, 20.32, 1, 10, 0]], #9/15/2011, #9/15/2011
# (2011, 259, 3) : [[-97.1532,26.0687,1,3, 0],[-97.1575,26.0756,1,4, 0],[-97.1563,26.0685,1,2, 0],[-97.1516,26.0133,1,25, 0],[-97.1924,26.0506,1,25, 0],[-97.2363,26.0280,1,25, 0],[-97.1627,26.0666,1,25, 0],[-97.1619,26.0963,1,25, 0],[-97.1642,26.1114,1,25, 0],[-97.1682,26.1359,1,13, 0], [-91.243, 20.594, 1, 25, 0]], #9/16/2011, #9/16/2011
# (2011, 260, 3) : [[-97.1563,26.0685,1,25, 0],[-97.1575,26.0756,1,25, 0],[-97.2986,26.0021,1,25, 0],[-97.1841,26.2453,1,4, 0],[-97.1719,26.1653,1,25, 0],[-97.1776,26.2074,1,8, 0, 0]], #9/17/2011, #9/17/2011
# (2011, 261, 3) : [[-97.1516,26.0133,1,13, 0],[-97.1478,26.0642,1,13, 0],[-97.2986,26.0021,1,25, 0],[-97.1575,26.0756,1,20, 0]], #9/18/2011, #9/18/2011
# (2011, 262, 3) : [[-97.153183,26.049006,1,2, 0]], #9/19/2011, #9/19/2011
# (2011, 264, 3) : [[-97.1575,26.0755556,1,25, 0],[-97.156331,26.068453,1,12, 0]], #9/21/2011 #9/21/2011
# (2011, 265, 3) : [[-97.207767,26.0428,1,25, 0],[-97.169517,26.10175,1,25, 0],[-97.1575,26.0755556,1,25, 0],[-97.156331,26.068453,1,24, 0],[-95.387778,28.995833,1,25, 0],[-95.380833,28.864722,1,25, 0],[-95.253333,28.976944,1,25, 0, 0]], #9/22/2011, #9/22/2011
# (2011, 266, 3) : [[-97.1563,26.0685,1,20, 0],[-97.1575,26.0756,1,13, 0],[-91.243, 20.594, 1, 25, 0]], #9/23/2011, #9/23/2011
# (2011, 268, 3) : [[-97.4017,25.9512,1,25, 0],[-97.2986,26.0021,1,25, 0],[-97.1563,26.0685,1,18, 0],[-97.1575,26.0756,1,6, 0]], #9/25/2011, #9/25/2011
# (2011, 269, 3) : [[-82.3857,26.9632,1,4, 0],[-82.4131,27.0103,1,25, 0],[-82.4529,27.0783,1,3, 0],[-82.4609,27.1003,1,2, 0],[-97.401669,25.951203,1,25, 0]], #9/26/2011, #9/26/2011
# (2011, 270, 3) : [[-82.2742,26.8056,1,18, 0],[-82.3619,26.9247,1,37, 0],[-97.1636111,26.0694444,1,22, 0],[-97.1575,26.0755556,1,15, 0]], #9/27/2011, #9/27/2011
# (2011, 271, 3) : [[-82.5096,27.0896,1,25, 0],[-82.3857,26.9632,1,20, 0],[-82.4131,27.0103,1,25, 0],[-82.4509,27.0735,1,25, 0],[-82.4529,27.0783,1,25, 0],[-82.4609,27.1003,1,25, 0],[-97.1636111,26.0694444,1,25, 0],[-97.401669,25.951203,1,25, 0],[-97.28204,26.01221,1,25, 0],[-97.1575,26.0755556,1,6, 0],[-97.156331,26.068453,1,3, 0]], #9/28/2011, #9/28/2011
# (2011, 272, 3) : [[-82.5369,27.1228,1,25, 0],[-82.5640,27.1579,1,10, 0],[-82.5802,27.1993,1,25, 0],[-82.3162,26.7887,1,25, 0],[-82.2825,26.6941,1,7, 0],[-97.156331,26.068453,1,2, 0],[-97.1636111,26.0694444,1,3, 0],[-97.1575,26.0755556,1,11, 0]], #9/29/2011, #9/29/2011
# (2011, 275, 3) : [[-82.2943,26.6185,1,22, 0]], #10/2/2011, #10/2/2011
# (2011, 276, 3) : [[-82.3857,26.9632,1,25, 0],[-82.4131,27.0103,1,2, 0],[-97.1563,26.0685,1,4, 0],[-97.1575,26.0756,1,12, 0],[-95.1181,29.0856,1,25, 0],[-95.1217,29.0899,1,11, 0],[-95.9147,28.6372,1,2, 0]], #10/3/2011, #10/3/2011
# (2011, 277, 3) : [[-82.3417,26.8933,1,25, 0],[-82.2805,26.8162,1,25, 0],[-82.2575,26.7156,1,25, 0],[-82.2742,26.8056,1,25, 0],[-82.3619,26.9247,1,25, 0],[-82.2332,26.3608,3,3, 0],[-82.2332,26.3608,1,25, 0],[-97.3775,27.083056,1,25, 0],[-97.220216666,27.81306666,1,2, 0],[-97.1575,26.0755556,1,11, 0],[-97.156331,26.068453,1,6, 0],[-97.13943333,27.8233333,1,2, 0],[-95.44895,28.8683944,1,25, 0],[-95.12165,29.0899,1,25, 0],[-94.8773666,29.29105,1,4, 0],[-94.775,29.3230555,1,25, 0],[-94.75041666,29.33461666,1,4, 0]], #10/4/2011, #10/4/2011
# (2011, 278, 3) : [[-82.1939,26.5264,1,2, 0],[-82.0327,26.4633,1,4, 0],[-82.4201,26.9292,1,5, 0],[-82.3752,26.8484,1,25, 0],[-82.3753,26.8484,1,25, 0],[-82.3738,26.8484,10,25, 0],[-82.3706,26.8495,1,25, 0],[-97.1632,26.0687,1,2, 0],[-97.1563,26.0685,1,12, 0],[-97.1575,26.0756,1,6, 0],[-96.3153,28.4141,1,25, 0],[-96.4014,28.4425,1,25, 0],[-96.4122,28.4402,1,25, 0],[-96.6216,28.2965,1,7, 0]], #10/5/2011, #10/5/2011
# (2011, 279, 3) : [[-82.2781,26.7071,1,25, 0],[-82.3140,26.6800,3,25, 0],[-82.3140,26.6800,1,25, 0],[-82.3445,26.7024,7,25, 0],[-82.3445,26.7024,1,25, 0],[-82.3766,26.6999,7,25, 0],[-82.3766,26.6999,1,25, 0],[-82.3529,26.6654,7,25, 0],[-82.3529,26.6654,1,25, 0],[-82.3355,26.6348,7,25, 0],[-82.3355,26.6348,1,25, 0],[-82.3430,26.4823,7,25, 0],[-82.3430,26.4823,1,25, 0],[-82.3140,26.5896,5,25, 0],[-82.3140,26.5896,1,25, 0],[-82.2971,26.5129,5,25, 0],[-82.2971,26.5129,1,25, 0],[-82.2722,26.5191,5,25, 0],[-82.2722,26.5191,1,25, 0],[-82.2492,26.5306,4,25, 0],[-82.2492,26.5306,1,25, 0],[-82.2010,26.5541,1,25, 0],[-95.9827333,28.59421666,1,25, 0],[-97.379083,27.024333,1,18, 0],[-97.361667,26.85775,1,25, 0],[-97.323,26.702472,1,25, 0],[-97.276667,26.566742,1,25, 0],[-97.1575,26.0755556,1,5, 0],[-97.156331,26.068453,1,2, 0],[-96.403228,28.454197,1,25, 0],[-95.95696666,28.6931833,1,25, 0],[-95.6590333,28.7577666,1,25, 0],[-95.65855,28.7567166,1,25, 0],[-94.8773666,29.29105,1,2, 0],[-94.775,29.3230555,1,6, 0]], #10/6/2011, #10/6/2011
# (2011, 280, 3) : [[-97.1575,26.0755556,1,10, 0],[-97.156331,26.068453,1,11, 0],[-96.488422,28.510919,1,25, 0],[-96.418142,28.437414,1,25, 0, 0]], #10/7/2011, #10/7/2011
# (2011, 281, 3) : [], #10/8/2011, #10/8/2011
# (2011, 282, 3) : [[-97.0318,27.8304,3,25, 0],[-97.030156,27.830103,1,25, 0]], #10/9/2011, #10/9/2011
# (2011, 283, 3) : [[-97.0318,27.8304,8,25, 0],[-97.0318,27.8304,1,25, 0],[-96.6736,28.3141,1,25, 0]], #10/10/2011, #10/10/2011
# (2011, 284, 3) : [[-82.1895,26.6716,1,25, 0],[-82.1837,26.7045,1,25, 0],[-82.2575,26.7156,1,25, 0],[-82.2196,26.6798,1,25, 0],[-82.2203,26.6467,1,25, 0],[-82.2210,26.6092,1,25, 0],[-82.1846,26.5993,1,13, 0],[-82.1963,26.5535,1,6, 0],[-82.2742,26.8056,1,25, 0],[-82.3335,26.6956,11,25, 0],[-82.3335,26.2656,1,25, 0],[-82.3023,26.6160,11,25, 0],[-82.3023,26.6160,1,25, 0],[-82.4000,26.6509,15,25, 0],[-82.4000,26.6509,1,25, 0],[-82.4353,26.5797,19,25, 0],[-82.4353,26.5797,1,25, 0],[-82.5786,26.7003,1,25, 0],[-82.5786,26.7003,1,25, 0],[-82.4941,26.7353,17,25, 0],[-82.4941,26.7353,1,25, 0],[-82.4088,26.7575,14,25, 0],[-82.4088,26.7575,1,25, 0],[-82.3248,26.8029,9,10, 0],[-82.3248,26.8029,1,25, 0],[-82.4132,27.0155,1,5, 0],[-97.220216666,27.81306666,1,25, 0],[-97.17757,26.20738,1,6, 0],[-97.1575,26.0755556,1,7, 0],[-97.156331,26.068453,1,8, 0],[-97.13943333,27.8233333,1,25, 0],[-97.1061,27.83443333,1,25, 0],[-97.066666,27.83878333,1,25, 0],[-97.062417,27.841606,1,25, 0],[-97.052858,27.837867,1,25, 0],[-97.030156,27.830103,1,25, 0],[-97.030156,27.830103,8,25, 0],[-97.030156,27.830103,3,25, 0],[-97.03666666,27.92388333,1,2, 0],[-97.02081666,27.82475,1,25, 0]], #10/11/2011, #10/11/2011
# (2011, 285, 3) : [[-82.3417,26.8933,1,14, 0],[-82.2805,26.8162,1,25, 0],[-82.2805,26.8162,1,25, 0],[-82.2524,26.8166,1,25, 0],[-82.2452,26.7876,1,25, 0],[-82.2333,26.7667,1,25, 0],[-82.2575,26.7156,1,25, 0],[-82.2145,26.7407,1,25, 0],[-82.2022,26.6091,1,25, 0],[-82.2075,26.6295,1,25, 0],[-82.2500,26.7183,1,25, 0],[-82.2493,26.7398,1,25, 0],[-82.1756,26.5448,1,7, 0],[-82.2060,26.5938,1,3, 0],[-97.1575,26.0755556,1,18, 0],[-97.156331,26.068453,1,6, 0],[-97.13577,27.82971666,1,25, 0],[-97.116908,27.882094,1,25, 0]], #10/12/2011, #10/12/2011
# (2011, 286, 3) : [[-82.1846,26.5993,1,3, 0],[-97.156331,26.068453,1,5, 0],[-97.030156,27.830103,8,25, 0],[-97.030156,27.830103,1,25, 0],[-96.561944,28.5955666,1,25, 0],[-96.5165,28.5664,1,25, 0],[-96.5067666,28.5693166,1,25, 0],[-96.488775,28.507942,1,5, 0],[-95.1181333,29.08556667,1,13, 0],[-94.91811666,29.2555333,1,4, 0],[-94.8824333,29.51276666,1,6, 0],[-94.8773666,29.29105,1,4, 0],[-94.82866666,29.417,1,15, 0],[-94.775,29.3230555,1,4, 0],[-94.76873333,29.34435,1,20, 0],[-94.7522222,29.3388888,1,24, 0],[-94.73113333,29.33953333,1,2, 0],[-94.70205,29.46563333,1,6, 0],[-94.6978,29.3323833,1,2, 0]], #10/13/2011, #10/13/2011
# (2011, 287, 3) : [[-82.2619,26.2946,7,7, 0],[-82.2619,26.2946,1,10, 0],[-82.1798,26.5678,1,2, 0],[-82.2153,26.3231,1,3, 0],[-82.3830,26.5975,1,25, 0],[-82.3785,26.6016,1,25, 0],[-82.3794,26.6123,1,25, 0],[-82.3759,26.5996,1,25, 0],[-82.3731,26.5973,1,25, 0],[-82.3728,26.5976,1,25, 0],[-82.3728,26.5668,1,25, 0],[-82.3757,26.5636,1,25, 0],[-82.3761,26.5486,1,25, 0],[-82.3745,26.4986,1,25, 0],[-82.3280,26.4337,1,25, 0],[-82.3283,26.3953,1,25, 0],[-82.2060,26.5938,1,7, 0],[-97.220216666,27.81306666,1,25, 0],[-97.13943333,27.8233333,1,25, 0],[-97.13441666,27.8977333,1,2, 0],[-97.066666,27.83878333,1,25, 0],[-97.030156,27.830103,8,25, 0],[-97.030156,27.830103,1,25, 0],[-97.04943333,27.8386,1,25, 0]], #10/14/2011, #10/14/2011
# (2011, 288, 3) : [[-97.20965,26.35323,1,25, 0],[-97.17757,26.20738,1,25, 0],[-97.170394,26.156644,1,25, 0],[-97.1575,26.0755556,1,2, 0]], #10/15/2011, #10/15/2011
# (2011, 289, 3) : [[-97.16856,26.14034,1,25, 0],[-97.1575,26.0755556,1,25, 0],[-97.156331,26.068453,1,25, 0],[-97.149928,26.063144,1,25, 0]], #10/16/2011, #10/16/2011
# (2011, 290, 3) : [[-82.1798,26.5678,1,25, 0],[-82.2060,26.5938,1,2, 0],[-82.1895,26.6716,1,18, 0],[-82.1846,26.5993,1,3, 0],[-82.3857,26.9632,1,24, 0],[-82.4131,27.0103,1,3, 0],[-97.1636111,26.0694444,1,25, 0],[-97.37869,27.061231,1,25, 0],[-97.301565,27.4148512,1,25, 0],[-97.279977,26.572839,1,25, 0],[-97.165769,26.119692,1,25, 0],[-97.160497,26.089992,1,25, 0],[-97.1575,26.0755556,1,25, 0],[-97.156331,26.068453,1,25, 0],[-97.066666,27.83878333,1,25, 0],[-97.030156,27.830103,1,25, 0],[-97.030156,27.830103,8,25, 0],[-97.04943333,27.8386,1,2, 0]], #10/17/2011, #10/17/2011
# (2011, 291, 3) : [[-82.2742,26.8056,1,25, 0],[-82.3619,26.9247,1,25, 0],[-97.0318,27.8304,1,25, 0],[-97.1636111,26.0694444,1,25, 0],[-97.1575,26.0755556,1,25, 0],[-97.156331,26.068453,1,25, 0],[-97.1516333,26.0133167,1,4, 0],[-97.147825,26.064153,1,25, 0],[-97.052858,27.837867,1,25, 0],[-97.030156,27.830103,1,25, 0]], #10/18/2011, #10/18/2011
# (2011, 292, 3) : [[-97.270522,26.562508,1,25, 0],[-97.269558,26.561714,1,25, 0],[-97.212617,26.368847,1,25, 0],[-97.208569,26.064783,1,25, 0],[-97.17757,26.20738,1,25, 0],[-97.1719444,26.1652778,1,25, 0],[-97.1575,26.0755556,1,25, 0],[-97.156331,26.068453,1,25, 0]], #10/19/2011, #10/19/2011
# (2011, 293, 3) : [[-97.208569,26.064783,1,25, 0],[-97.1575,26.0755556,1,25, 0],[-97.156331,26.068453,1,25, 0],[-97.030156,27.830103,1,25, 0],[-97.030156,27.830103,8,25, 0],[-97.049433333,27.933333,1,11, 0],[-97.04741666,27.8386,1,25, 0],[-97.03666666,27.92388333,1,15, 0],[-97.0272,28.0013833,1,8, 0],[-96.9251833,28.1176,1,25, 0],[-96.85965,28.17745,1,25, 0]], #10/20/2011, #10/20/2011
# (2011, 294, 3) : [[-97.1719444,26.1652778,1,25, 0],[-97.16838333,26.1375194,1,25, 0],[-97.1575,26.0755556,1,25, 0],[-97.156331,26.068453,1,25, 0],[-97.1516333,26.0133167,1,25, 0],[-97.030156,27.830103,1,18, 0],[-97.030156,27.830103,8,14, 0]], #10/21/2011, #10/21/2011
# (2011, 295, 3) : [[-97.16838333,26.1375194,1,25, 0],[-97.17757,26.20738,1,25, 0],[-97.1719444,26.1652778,1,25, 0],[-97.1575,26.0755556,1,25, 0],[-97.156331,26.068453,1,25, 0]], #10/22/2011, #10/22/2011
# (2011, 296, 3) : [[-97.030156,27.830103,1,25, 0]], #10/23/2011, #10/23/2011
# (2011, 297, 3) : [[-97.030156,27.830103,1,25, 0],[-97.17757,26.20738,1,25, 0],[-97.17205833,26.12915833,1,25, 0],[-97.1719444,26.1652778,1,25, 0],[-97.1575,26.0755556,1,25, 0],[-97.156331,26.068453,1,25, 0],[-97.030156,27.830103,8,25, 0],[-97.03291666,28.0603333,1,25, 0],[-97.03145,28.0198,1,8, 0],[-96.274167,28.641667,1,25, 0],[-96.24915,28.64255,1,25, 0],[-96.2479333,28.65653333,1,25, 0],[-96.1336,28.59185,1,25, 0],[-95.95743333,28.6936333,1,25, 0],[-95.8296166,28.7026833,1,7, 0],[-95.65855,28.7567166,1,14, 0],[-95.1651333,29.04881667,1,25, 0],[-95.1181333,29.08556667,1,25, 0],[-94.8824333,29.51276666,1,25, 0],[-94.82866666,29.417,1,20, 0],[-94.80321666,29.3839,1,25, 0],[-94.80025,29.4908333,1,11, 0],[-94.775,29.3230555,1,25, 0],[-94.76873333,29.34435,1,25, 0],[-94.7522222,29.3388888,1,13, 0],[-94.73113333,29.33953333,1,8, 0],[-94.70205,29.46563333,1,25, 0],[-94.6978,29.3323833,1,25, 0]], #10/24/2011, #10/24/2011
# (2011, 298, 3) : [[-82.2587,26.7231,1,17],[-82.2702,26.8266,1,3, 0], [-97.38645,27.79965,1,25, 0], [-97.35207222,27.8493833,1,25, 0], [-97.31161944,27.8702,1,25, 0], [-97.306,27.71483333,1,6, 0], [-97.220216666,27.81306666,1,25, 0], [-97.17757,26.20738,1,25, 0], [-97.1719444,26.1652778,1,25, 0], [-97.16315,27.75965,1,25, 0], [-97.1575,26.0755556,1,25, 0], [-97.156331,26.068453,1,25, 0], [-97.13943333,27.8233333,1,25, 0], [-97.1061,27.83443333,1,25, 0], [-96.620025,28.296675,1,25, 0], [-96.51831,28.33306,1,25, 0], [-96.418469,28.400386,1,25, 0], [-96.4013833,28.4425,1,25, 0], [-96.323578,28.642072,1,25, 0], [-96.323264,28.639361,1,25, 0]], #10/25/2011, #10/25/2011
# (2011, 299, 3) : [[-82.3053,26.6526,1,3, 0], [-82.2598,26.5436,8,17, 0], [-82.2598,26.5436,1,25, 0], [-82.2226,26.4318,10,25, 0], [-82.2226,26.4318,1,25, 0], [-82.2672,26.4036,11,25, 0], [-82.2672,26.4036,1,12, 0], [-82.3275,26.5079,12,25, 0], [-82.3275,26.5079,1,25, 0], [-82.3831,26.6142,1,13, 0], [-97.17757,26.20738,1,25, 0], [-97.1719444,26.1652778,1,25, 0], [-97.1575,26.0755556,1,25, 0], [-97.156331,26.068453,1,25, 0], [-97.030156,27.830103,1,25, 0], [-97.030156,27.830103,8,25, 0]], #10/26/2011, #10/26/2011
# (2011, 300, 3) : [[-82.1715,26.5545,1,8, 0], [-82.3481,26.3903,1,25],[-97.17757,26.20738,1,8, 0],[-97.1719444,26.1652778,1,11, 0],[-97.1575,26.0755556,1,25, 0],[-97.156331,26.068453,1,16, 0],[-97.030156,27.830103,8,25, 0],[-97.030156,27.830103,3,25, 0],[-97.030156,27.830103,1,25, 0]], #10/27/2011, #10/27/2011
# (2011, 301, 3) : [[-82.2587,26.7231,1,12, 0],[-97.37869,27.061231,1,25, 0],[-97.30123,27.4148923,1,25, 0],[-97.279977,26.572839,1,25, 0],[-97.1575,26.0755556,1,20, 0],[-97.156331,26.068453,1,4, 0]], #10/28/2011, #10/28/2011
# (2011, 302, 3) : [[-97.17757,26.20738,1,25, 0],[-97.1719444,26.1652778,1,25, 0],[-97.1575,26.0755556,1,25, 0],[-97.156331,26.068453,1,7, 0]], #10/29/2011, #10/29/2011
# (2011, 304, 3) : [[-82.0294,26.4694,1,13, 0],[-97.17757,26.20738,1,25, 0],[-97.176106,26.140067,1,2, 0],[-97.1719444,26.1652778,1,25, 0],[-97.16665,26.12625,1,25, 0],[-97.160183,26.088517,1,25, 0],[-97.1575,26.0755556,1,25, 0],[-97.156331,26.068453,1,25, 0],[-97.05593333,28.13206666,1,4, 0],[-97.052858,27.837867,1,25, 0],[-97.030156,27.830103,1,25, 0],[-97.030156,27.830103,8,25, 0],[-97.030156,27.830103,3,25, 0],[-97.049433333,27.933333,1,25, 0],[-97.04943333,27.8386,1,25, 0],[-97.03666666,27.92388333,1,25, 0],[-95.1651333,29.04881667,1,25, 0],[-95.1181333,29.08556667,1,25, 0],[-94.91811666,29.2555333,1,25, 0],[-94.8824333,29.51276666,1,25, 0],[-94.82866666,29.417,1,25, 0],[-94.80321666,29.3839,1,25, 0],[-94.80025,29.4908333,1,25, 0],[-94.775,29.3230555,1,25, 0],[-94.76873333,29.34435,1,25, 0],[-94.7522222,29.3388888,1,25, 0],[-94.73113333,29.33953333,1,25, 0],[-94.70205,29.46563333,1,25, 0],[-94.6978,29.3323833,1,25, 0]], #10/31/2011, #10/31/2011
# (2011, 305, 3) : [[-82.2742,26.8056,1,25, 0],[-97.38305,27.84005,1,25, 0],[-97.17757,26.20738,1,25, 0],[-97.1719444,26.1652778,1,25, 0],[-97.1575,26.0755556,1,25, 0],[-97.156331,26.068453,1,25, 0],[-97.1516333,26.0133167,1,25, 0],[-97.147825,26.064153,1,25, 0],[-97.030156,27.830103,8,25, 0],[-97.030156,27.830103,1,25, 0],[-97.030156,27.830103,3,20, 0],[-96.315316666,28.41411667,1,25, 0]], #11/1/2011, #11/1/2011
# (2011, 306, 3) : [[-97.1719444,26.1652778,1,25, 0],[-97.37869,27.061231,1,25, 0],[-97.30123,27.4148923,1,10, 0],[-97.279977,26.572839,1,25, 0],[-97.17757,26.20738,1,25, 0],[-97.176106,26.140067,1,25, 0],[-97.1575,26.0755556,1,25, 0],[-97.156331,26.068453,1,25, 0],[-97.052858,27.837867,1,25, 0],[-97.030156,27.830103,8,25, 0],[-97.030156,27.830103,1,25, 0]], #11/2/2011, #11/2/2011
# (2011, 307, 3) : [[-82.0026,26.3737,1,11, 0],[-82.1762,26.3953,1,5, 0],[-82.1761,26.3655,1,19, 0],[-82.1721,26.3235,1,25, 0],[-97.1719444,26.1652778,1,25, 0],[-97.1575,26.0755556,1,25, 0],[-97.156331,26.068453,1,25, 0]], #11/3/2011, #11/3/2011
# (2011, 308, 3) : [[-97.1719444,26.1652778,1,25, 0],[-97.17205833,26.12915833,1,25, 0],[-97.1575,26.0755556,1,25, 0],[-97.156331,26.068453,1,25, 0],[-97.030156,27.830103,1,25, 0],[-97.030156,27.830103,8,25, 0]], #11/4/2011, #11/4/2011
# (2011, 309, 3) : [[-97.1719444,26.1652778,1,25, 0],[-97.160236,26.088614,1,25, 0],[-97.1575,26.0755556,1,25, 0],[-97.156331,26.068453,1,25, 0]], #11/5/2011, #11/5/2011
# (2011, 310, 3) : [[-82.5422,27.3075,1,8, 0],[-97.160236,26.088614,1,25, 0]], #11/6/2011, #11/6/2011
# (2011, 311, 3) : [[-81.7278,25.9733,1,3, 0],[-97.372352,27.150221,1,25, 0],[-97.30123,27.4148923,1,25, 0],[-97.17757,26.20738,1,25, 0],[-97.17205833,26.12915833,1,13, 0],[-97.1719444,26.1652778,1,25, 0],[-97.1575,26.0755556,1,25, 0],[-97.156331,26.068453,1,25, 0],[-97.030156,27.830103,1,25, 0],[-97.030156,27.830103,8,25, 0],[-94.8824333,29.51276666,1,25, 0],[-94.82866666,29.417,1,25, 0],[-94.80321666,29.3839,1,25, 0],[-94.80025,29.4908333,1,25, 0],[-94.775,29.3230555,1,25, 0],[-94.76873333,29.34435,1,25, 0],[-94.7522222,29.3388888,1,25, 0],[-94.73113333,29.33953333,1,25, 0],[-94.70205,29.46563333,1,25, 0],[-94.6978,29.3323833,1,25, 0],[-97.00346666,28.00523333,1,25, 0]], #11/7/2011, #11/7/2011
# (2011, 312, 3) : [[-97.17757,26.20738,1,25, 0],[-97.1719444,26.1652778,1,25, 0],[-97.1575,26.0755556,1,25, 0],[-97.156331,26.068453,1,25, 0],[-97.049433333,27.933333,1,25, 0],[-97.04943333,27.8386,1,25, 0]], #11/8/2011, #11/8/2011
# (2011, 313, 3) : [[-97.1719444,26.1652778,1,25, 0],[-97.17757,26.20738,1,25, 0],[-97.17205833,26.12915833,1,19, 0],[-97.1575,26.0755556,1,25, 0],[-97.156331,26.068453,1,25, 0]], #11/9/2011, #11/9/2011
# (2011, 314, 3) : [[-97.1575,26.0755556,1,25, 0],[-97.156331,26.068453,1,25, 0]], #11/10/2011, #11/10/2011
# (2011, 315, 3) : [[-97.1719444,26.1652778,1,25, 0],[-97.17757,26.20738,1,25, 0],[-97.17205833,26.12915833,1,2, 0],[-97.1575,26.0755556,1,25, 0],[-97.156331,26.068453,1,25, 0]], #11/11/2011, #11/11/2011
# (2011, 316, 3) : [[-97.1719444,26.1652778,1,25, 0],[-97.17757,26.20738,1,25, 0],[-97.1575,26.0755556,1,25, 0],[-97.156331,26.068453,1,25, 0]], #11/12/2011, #11/12/2011
# (2011, 318, 3) : [[-97.1575,26.0755556,1,25, 0], [-97.156331,26.068453,1,25, 0], [-97.1516333,26.0133167,1,25, 0]], #11/14/2011, #11/14/2011
# (2011, 319, 3) : [[-97.1575,26.0755556,1,25, 0], [-97.156331,26.068453,1,25, 0]], #11/15/2011, #11/15/2011
# (2011, 320, 3) : [[-82.0154,26.4516,1,19, 0], [-97.17757,26.20738,1,25, 0], [-97.176106,26.140067,1,25, 0], [-97.1719444,26.1652778,1,25, 0], [-97.1575,26.0755556,1,25, 0], [-97.156331,26.068453,1,25, 0]], #11/16/2011, #11/16/2011
# (2011, 321, 3) : [[-81.8453,26.3301,1,25, 0], [-81.8239,26.2531,1,25, 0], [-81.8169,26.2073,1,8, 0], [-97.204864,26.066503,1,25, 0], [-97.17757,26.20738,1,25, 0], [-97.1719444,26.1652778,1,25, 0], [-97.1575,26.0755556,1,25, 0], [-97.156331,26.068453,1,25, 0]], #11/17/2011, #11/17/2011
# (2011, 322, 3) : [[-97.204864,26.066503,1,22, 0], [-97.17757,26.20738,1,25, 0], [-97.1719444,26.1652778,1,25, 0], [-97.1575,26.0755556,1,25, 0], [-97.156331,26.068453,1,25, 0]], #11/18/2011, #11/18/2011
# (2011, 325, 3) : [[-81.7171,25.9124,1,25, 0], [-81.7278,25.9733,1,25, 0], [-81.7281,25.9117,1,5, 0], [-81.7897,26.1428,1,19, 0], [-97.204864,26.066503,1,25, 0], [-97.1575,26.0755556,1,25, 0], [-97.156331,26.068453,1,25, 0], [-97.030156,27.830103,1,25, 0], [-97.030156,27.830103,8,25, 0]], #11/21/2011, #11/21/2011
# (2011, 326, 3) : [[-81.8457,26.3311,1,9, 0], [-81.8790,26.3905,1,6, 0], [-81.9578,26.4540,1,3, 0], [-82.0154,26.4516,1,25, 0], [-97.204864,26.066503,1,25, 0], [-97.17757,26.20738,1,25, 0], [-97.1719444,26.1652778,1,25, 0]], #11/22/2011, #11/22/2011
# (2011, 327, 3) : [[-81.9774,26.2033,13,25, 0], [-81.9774,26.2033,1,25, 0], [-82.0531,26.1373,13,25, 0], [-82.0531,26.1373,1,25, 0], [-82.0531,26.0553,15,25, 0], [-82.0531,26.0553,1,25, 0], [-82.1353,26.1605,17,25, 0], [-82.1353,26.1605,1,25, 0], [-82.1369,26.2590,7,25, 0], [-82.1369,26.2590,1,25, 0], [-82.1370,26.3556,13,25, 0], [-82.1370,26.3556,1,25, 0], [-82.0626,26.3683,8,25, 0], [-82.0626,26.3683,1,25, 0], [-81.9982,26.3870,6,25, 0], [-81.9982,26.3870,1,25, 0], [-97.204864,26.066503,1,9, 0], [-97.1575,26.0755556,1,25, 0], [-97.156331,26.068453,1,25, 0], [-97.030156,27.830103,1,25, 0], [-97.030156,27.830103,8,25, 0]], #11/23/2011, #11/23/2011
# (2011, 328, 3) : [[-82.2624,26.6070,1,5, 0]], #11/24/2011, #11/24/2011
# (2011, 330, 3) : [[-97.17757,26.20738,1,25, 0], [-97.1719444,26.1652778,1,3, 0], [-97.1575,26.0755556,1,11, 0], [-97.156331,26.068453,1,25, 0]], #11/26/2011, #11/26/2011
# (2011, 331, 3) : [[-81.7883,26.1367,1,25, 0]], #11/27/2011, #11/27/2011
# (2011, 332, 3) : [[-81.9851,26.4074,1,25, 0], [-81.9648,26.4159,1,25, 0], [-81.9461,26.4103,1,25, 0], [-81.9466,26.3836,1,25, 0], [-81.9067,26.3900,1,25, 0], [-81.9224,26.3894,1,25, 0], [-81.9414,26.3874,1,25, 0], [-81.9574,26.3704,1,25, 0], [-81.9593,26.3904,1,25, 0], [-81.9788,26.3895,1,25, 0], [-81.9975,26.3883,1,25, 0], [-82.0160,26.3886,1,25, 0], [-82.0345,26.3877,1,25, 0], [-82.0536,26.3855,1,25, 0], [-82.0713,26.3855,1,25, 0], [-82.0906,26.3870,1,25, 0], [-82.1083,26.3882,1,25, 0], [-82.0924,26.3940,1,25, 0], [-82.0735,26.4014,1,25, 0], [-82.0586,26.4113,1,25, 0], [-82.0434,26.4210,1,25, 0], [-82.0299,26.4326,1,25, 0], [-82.0161,26.4442,1,25, 0], [-82.0020,26.4546,1,25, 0], [-81.7029,25.9804,1,25, 0], [-81.6496,25.9334,1,8, 0], [-81.7171,25.9124,1,25, 0], [-81.7278,25.9733,1,25, 0], [-81.8453,26.3301,1,25, 0], [-81.8239,26.2531,1,25, 0], [-81.8169,26.2073,1,25, 0], [-81.8063,26.1316,1,25, 0], [-81.7281,25.9117,1,25, 0], [-82.0813,26.4904,1,25, 0], [-97.204864,26.066503,1,25, 0], [-97.37869,27.061231,1,25, 0], [-97.30123,27.4148923,1,25, 0], [-97.279977,26.572839,1,25, 0], [-97.1575,26.0755556,1,25, 0], [-97.156331,26.068453,1,25, 0], [-97.1516333,26.0133167,1,8, 0]], #11/28/2011, #11/28/2011
# (2011, 333, 3) : [[-82.0444,26.5090,1,25, 0], [-81.9835,26.5136,1,21, 0], [-82.0642,26.4688,1,25, 0], [-82.1143,26.4834,1,25, 0], [-82.2203,26.6467,1,2, 0], [-82.1963,26.5535,1,25, 0], [-82.1752,26.4942,1,6, 0]], #11/29/2011, #11/29/2011
# (2011, 334, 3) : [[-97.204864,26.066503,1,25, 0], [-97.17757,26.20738,1,25, 0], [-97.1719444,26.1652778,1,25, 0], [-97.1575,26.0755556,1,25, 0], [-97.156331,26.068453,1,25, 0], [-97.030156,27.830103,8,25, 0], [-97.030156,27.830103,1,25, 0]], #11/30/2011, #11/30/2011
# (2011, 335, 3) : [[-81.7281,25.9117,1,25, 0], [-81.8063,26.1316,1,25, 0], [-81.8239,26.2531,1,2, 0], [-81.8453,26.3301,1,8, 0], [-97.17757,26.20738,1,25, 0], [-97.1719444,26.1652778,1,25, 0], [-97.1575,26.0755556,1,25, 0], [-97.156331,26.068453,1,25, 0]], #12/1/2011, #12/1/2011
# (2011, 336, 3) : [[-97.204864,26.066503,1,25, 0], [-97.17757,26.20738,1,25, 0], [-97.1719444,26.1652778,1,25, 0], [-97.1575,26.0755556,1,25, 0], [-97.156331,26.068453,1,25, 0]], #12/2/2011, #12/2/2011
# (2011, 338, 3) : [[-97.1632,26.0687,1,25, 0], [-97.1575,26.0756,1,25, 0]], #12/4/2011, #12/4/2011
# (2011, 339, 3) : [[-82.1752,26.4942,1,25, 0], [-82.1143,26.4834,1,25, 0], [-82.0272,26.4525,1,4, 0], [-81.9719,26.2706,1,3, 0], [-81.8437,26.1452,1,25, 0], [-81.8894,26.2285,8,25, 0], [-81.8894,26.2285,1,25, 0], [-81.9052,26.2577,1,25, 0], [-81.9918,26.4287,1,25, 0], [-82.1826,26.4827,1,25, 0], [-81.7171,25.9124,1,25, 0], [-81.8239,26.2531,1,9, 0], [-81.7281,25.9117,1,25, 0], [-81.7278,25.9733,1,25, 0], [-81.6496,25.9334,1,25, 0], [-81.7029,25.9804,1,25, 0]], #12/5/2011, #12/5/2011
# (2011, 340, 3) : [[-81.8445,26.3550,1,6, 0], [-81.8463,26.3859,1,2, 0], [-81.9114,26.4349,1,25, 0], [-81.6070,25.9038,1,25, 0], [-81.6323,25.8815,1,25, 0], [-81.4735,25.6161,1,25, 0], [-81.4241,25.6379,1,25, 0], [-81.3856,25.6614,1,21, 0]], #12/6/2011, #12/6/2011
# (2011, 341, 3) : [[-82.1939,26.5264,1,25, 0], [-82.0251,26.4429,1,25, 0], [-82.0212,26.4475,1,25, 0], [-81.8457,26.3311,1,25, 0], [-81.8790,26.3905,1,25, 0], [-81.9578,26.4540,1,25, 0], [-82.0154,26.4516,1,25, 0], [-81.4392,24.9128,1,3, 0]], #12/7/2011, #12/7/2011
# (2011, 342, 3) : [[-81.7861,26.1155,1,25, 0], [-81.7281,25.9117,1,25, 0], [-81.8063,26.1316,1,25, 0], [-81.8169,26.2073,1,25, 0], [-81.8239,26.2531,1,25, 0], [-81.8453,26.3301,1,19, 0], [-82.0829,26.4424,1,25, 0], [-82.0829,26.4424,1,25, 0], [-81.7296,25.9206,1,25, 0], [-81.7171,25.9129,1,25, 0]], #12/8/2011, #12/8/2011
# (2011, 343, 3) : [[-82.0765,26.4131,1,25, 0], [-82.1122,26.4180,1,25, 0], [-82.1238,26.4223,1,25, 0], [-82.1304,26.4260,1,25, 0], [-82.1367,26.4318,1,25, 0], [-82.1324,26.4378,1,25, 0], [-82.1244,26.4327,1,25, 0], [-82.1058,26.4249,1,25, 0], [-82.0870,26.4196,1,25, 0], [-82.0775,26.4196,1,25, 0], [-82.0557,26.4265,1,25, 0], [-82.0487,26.4297,1,25, 0], [-82.0352,26.4374,1,25, 0], [-82.0247,26.4431,1,25, 0], [-82.0240,26.4435,1,25, 0], [-82.0174,26.4489,1,25, 0], [-81.9898,26.4576,1,25, 0]], #12/9/2011, #12/9/2011
# (2011, 345, 3) : [[-80.7565,24.8202,1,25, 0], [-82.0629,26.5076,1,6, 0], [-82.0279,26.4301,1,25, 0], [-82.0355,26.4778,1,5, 0], [-81.9830,26.4774,1,18, 0]], #12/11/2011, #12/11/2011
# (2011, 346, 3) : [[-81.6496,25.9334,1,25, 0], [-81.7029,25.9804,1,25, 0], [-81.7171,25.9124,1,25, 0], [-81.7278,25.9733,1,25, 0], [-81.0304,24.7006,1,4, 0], [-81.0304,24.7006,1,22, 0], [-80.8333,24.7127,1,25, 0], [-80.8632,24.4538,1,25, 0], [-80.6918,24.7161,1,25, 0], [-80.6918,24.7161,1,7, 0], [-80.6918,24.7161,1,6, 0], [-80.7249,24.7629,1,19, 0], [-81.2042,24.6734,1,25, 0], [-81.4220,24.6047,1,25, 0]], #12/12/2011, #12/12/2011
# (2011, 347, 3) : [[-81.7281,25.9117,1,25, 0]], #12/13/2011, #12/13/2011
# (2011, 348, 3) : [[-81.7168,25.9125,1,25, 0], [-82.0154,26.4516,1,25, 0], [-82.0801,26.4231,1,7, 0]], #12/14/2011, #12/14/2011
# (2011, 349, 3) : [[-81.7281,25.9117,1,22, 0], [-81.8063,26.1316,1,11, 0]], #12/15/2011, #12/15/2011
# (2011, 351, 3) : [[-81.7879,26.0948,1,2, 0], [-82.0135,26.4487,1,25, 0], [-82.0771,26.4076,1,25, 0], [-82.0644,26.4042,1,25, 0], [-82.0236,26.4425,1,25, 0], [-82.0488,26.4279,1,25, 0], [-82.0732,26.4199,1,14, 0], [-82.0097,26.4534,1,25, 0]], #12/17/2011, #12/17/2011
# (2011, 353, 3) : [[-81.7029,25.9804,1,25, 0], [-81.6496,25.9334,1,25, 0], [-81.7171,25.9124,1,25, 0], [-81.7281,25.9117,1,25, 0]], #12/19/2011, #12/19/2011
# (2011, 354, 3) : [[-81.6419,25.9212,1,5, 0], [-81.6773,25.8463,1,25, 0]], #12/20/2011, #12/20/2011
# (2011, 355, 3) : [[-82.0408,26.4292,1,25, 0], [-82.0190,26.4442,1,25, 0], [-82.0289,26.4362,1,25, 0], [-82.0287,26.4155,1,25, 0], [-82.0617,26.4201,1,25, 0], [-82.0456,26.4079,1,25, 0], [-82.0666,26.4088,1,25, 0], [-82.0087,26.4516,1,25, 0], [-82.0154,26.4516,1,25, 0], [-82.0801,26.4231,1,25, 0], [-82.1939,26.5264,1,25, 0], [-81.4735,25.6161,1,10, 0], [-81.4241,25.6379,1,17, 0], [-81.3856,25.6614,1,24, 0], [-81.3579,25.6817,1,25, 0]], #12/21/2011, #12/21/2011
# (2011, 356, 3) : [[-81.9575,26.4604,1,25, 0], [-81.7168,25.9125,1,25, 0]], #12/22/2011, #12/22/2011
# (2011, 358, 3) : [[-82.1479,26.3092,1,25, 0], [-82.0135,26.4449,1,25, 0], [-82.0066,26.5233,1,25, 0]], #12/24/2011, #12/24/2011
# (2011, 360, 3) : [[-81.7933,26.1063,1,25, 0], [-81.9996,26.4163,1,25, 0], [-81.9894,26.4172,1,25, 0], [-81.9838,26.4189,1,25, 0], [-82.0112,26.4069,1,25, 0], [-82.0114,26.4142,1,25, 0], [-82.0180,26.4183,1,25, 0], [-82.0200,26.4190,1,25, 0], [-82.0213,26.4187,1,25, 0], [-82.0103,26.4242,1,25, 0], [-82.0114,26.4095,1,25, 0], [-82.0103,26.4283,1,25, 0], [-82.0129,26.4306,1,25, 0], [-82.0074,26.4549,1,25, 0], [-82.0125,26.4602,1,25, 0]], #12/26/2011, #12/26/2011
# (2011, 361, 3) : [[-82.0816,26.4909,1,25, 0], [-81.7281,25.9117,1,25, 0], [-81.8063,26.1316,1,25, 0], [-81.8369,26.2073,1,5, 0], [-81.8439,26.2531,1,25, 0], [-81.8653,26.3301,1,25, 0]], #12/27/2011, #12/27/2011
#
(2012, 33, 3) : [[-97.0318, 27.8304, 3, 0, 1]], #2/2/2012
(2012, 34, 3) : [[-97.0318, 27.8304, 3, 0, 1]], #2/3/2012
(2012, 35, 3) : [[-97.0318, 27.8304, 3, 0, 1]], #2/4/2012
(2012, 36, 3) : [[-97.0318, 27.8304, 3, 1, 1]], #2/5/2012
(2012, 37, 3) : [[-97.0318, 27.8304, 3, 3, 1]],
(2012, 38, 3) : [[-97.0318, 27.8304, 3, 4, 1], [-97.0318, 27.8304, 3, 1, 2]],
(2012, 39, 3) : [[-97.0318, 27.8304, 3, 12, 1], [-97.0318, 27.8304, 3, 3, 2]], #2/8/2012
(2012, 40, 3) : [[-97.0318, 27.8304, 3, 7, 1], [-97.0318, 27.8304, 3, 1, 2]],
(2012, 41, 3) : [[-97.0318, 27.8304, 3, 11, 1], [-97.0318, 27.8304, 3, 4, 2]], #2/10/2012
(2012, 42, 3) : [[-97.0318, 27.8304, 3, 5, 1], [-97.0318, 27.8304, 3, 1, 2]],
(2012, 43, 3) : [[-97.0318, 27.8304, 3, 4, 1], [-97.0318, 27.8304, 3, 1, 2]], #2/12/2012
(2012, 44, 3) : [[-97.0318, 27.8304, 3, 6, 1], [-97.0318, 27.8304, 3, 3, 2]],
(2012, 45, 3) : [[-97.0318, 27.8304, 3, 7, 1], [-97.0318, 27.8304, 3, 1, 2]], #2/14/2012
(2012, 46, 3) : [[-97.0318, 27.8304, 3, 8, 1], [-97.0318, 27.8304, 3, 1, 2]],
(2012, 47, 3) : [[-97.0318, 27.8304, 3, 10, 1], [-97.0318, 27.8304, 3, 1, 2]], #2/16/2012
(2012, 48, 3) : [[-97.0318, 27.8304, 3, 6, 1], [-97.0318, 27.8304, 3, 1, 2]],
(2012, 49, 3) : [[-97.0318, 27.8304, 3, 5, 1], [-97.0318, 27.8304, 3, 3, 2]],
(2012, 50, 3) : [[-97.0318, 27.8304, 3, 3, 1], [-97.0318, 27.8304, 3, 1, 2]], #2/19/2012
(2012, 51, 3) : [[-97.0318, 27.8304, 3, 1, 1], [-97.0318, 27.8304, 3, 2, 2]],
(2012, 52, 3) : [[-97.0318, 27.8304, 3, 2, 1], [-97.0318, 27.8304, 3, 3, 2]], #2/21/2012
(2012, 53, 3) : [[-97.0318, 27.8304, 3, 1, 1], [-97.0318, 27.8304, 3, 7, 2]],
(2012, 54, 3) : [[-97.0318, 27.8304, 3, 1, 1], [-97.0318, 27.8304, 3, 4, 2]], #2/23/2012
(2012, 55, 3) : [[-97.0318, 27.8304, 3, 0, 1], [-97.0318, 27.8304, 3, 3, 2]],
(2012, 56, 3) : [[-97.0318, 27.8304, 3, 0, 1], [-97.0318, 27.8304, 3, 3, 2]], #2/25/2012
(2012, 57, 3) : [[-97.0318, 27.8304, 3, 0, 1], [-97.0318, 27.8304, 3, 2, 2]],
(2012, 58, 3) : [[-97.0318, 27.8304, 3, 0, 1], [-97.0318, 27.8304, 3, 2, 2]], #2/27/2012
(2012, 59, 3) : [[-97.0318, 27.8304, 3, 0, 1]],
(2012, 60, 3) : [[-97.0318, 27.8304, 3, 0, 1]], #2/29/2012
(2012, 61, 3) : [[-97.0318, 27.8304, 3, 1, 2]], #3/1/2012
(2012, 62, 3) : [[-97.0318, 27.8304, 3, 1, 2]], #3/2/2012
(2012, 63, 3) : [[-97.0318, 27.8304, 3, 1, 2]], #3/3/2012
(2012, 64, 3) : [[-97.0318, 27.8304, 3, 1, 2]], #3/4/2012
(2012, 65, 3) : [[-97.0318, 27.8304, 3, 0, 2]], #3/5/2012
(2012, 66, 3) : [[-97.0318, 27.8304, 3, 1, 2]], #3/6/2012
(2012, 67, 3) : [[-97.0318, 27.8304, 3, 2, 2]], #3/7/2012
(2012, 68, 3) : [[-97.0318, 27.8304, 3, 2, 2]], #3/8/2012
(2012, 69, 3) : [[-97.0318, 27.8304, 3, 1, 2]], #3/9/2012
(2012, 70, 3) : [[-97.0318, 27.8304, 3, 1, 2]], #3/10/2012
(2013, 103, 3) : [[-97.0318, 27.8304, 3, 1, 2]], #4/13/2013
(2013, 104, 3) : [[-97.0318, 27.8304, 3, 1, 2]], #4/14/2013
(2013, 106, 3) : [[-97.0318, 27.8304, 3, 1, 2]], #4/16/2013
(2013, 110, 3) : [[-97.0318, 27.8304, 3, 1, 2]], #4/20/2013
(2013, 111, 3) : [[-97.0318, 27.8304, 3, 5, 2]], #4/21/2013
(2013, 112, 3) : [[-97.0318, 27.8304, 3, 8, 2]], #4/22/2013
(2013, 113, 3) : [[-97.0318, 27.8304, 3, 6, 2]], #4/23/2013
(2013, 114, 3) : [[-97.0318, 27.8304, 3, 3, 2]], #4/24/2013
(2013, 120, 3) : [[-97.0318, 27.8304, 3, 1, 2]], #4/30/2013
#
# (2013, 123, 3) : [[-97.0318, 27.8304, 3, 7, 2]], #5/03/2013
# (2013, 124, 3) : [[-97.0318, 27.8304, 3, 3, 2]], #5/04/2013
#
# (2013, 139, 3) : [[-97.0318, 27.8304, 3, 1, 2]], #5/19/2013
# (2013, 140, 3) : [[-97.0318, 27.8304, 3, 1, 2]], #5/20/2013
# (2013, 141, 3) : [[-97.0318, 27.8304, 3, 3, 2]], #5/21/2013
#
# (2013, 154, 3) : [[-97.0318, 27.8304, 3, 3, 2]], #6/03/2013
# (2013, 155, 3) : [[-97.0318, 27.8304, 3, 3, 2]], #6/04/2013
# (2013, 156, 3) : [[-97.0318, 27.8304, 3, 4, 2]], #6/05/2013
# (2013, 157, 3) : [[-97.0318, 27.8304, 3, 6, 2]], #6/06/2013
# (2013, 158, 3) : [[-97.0318, 27.8304, 3, 18, 2]], #6/07/2013
# (2013, 159, 3) : [[-97.0318, 27.8304, 3, 13, 2]], #6/08/2013
# (2013, 160, 3) : [[-97.0318, 27.8304, 3, 5, 2]], #6/09/2013
# (2013, 161, 3) : [[-97.0318, 27.8304, 3, 3, 2]], #6/10/2013
# (2013, 162, 3) : [[-97.0318, 27.8304, 3, 2, 2]], #6/11/2013
# (2013, 163, 3) : [[-97.0318, 27.8304, 3, 1, 2]], #6/12/2013
# (2013, 165, 3) : [[-97.0318, 27.8304, 3, 1, 2]], #6/14/2013
# (2013, 166, 3) : [[-97.0318, 27.8304, 3, 5, 2]], #6/15/2013
# (2013, 167, 3) : [[-97.0318, 27.8304, 3, 2, 2]], #6/16/2013
#
# (2013, 175, 3) : [[-97.0318, 27.8304, 3, 1, 2]], #6/24/2013
# (2013, 176, 3) : [[-97.0318, 27.8304, 3, 1, 2]], #6/25/2013
# (2013, 177, 3) : [[-97.0318, 27.8304, 3, 1, 2]], #6/26/2013
# (2013, 178, 3) : [[-97.0318, 27.8304, 3, 2, 2]], #6/29/2013
#
#
# (2013, 240, 3) : [[-95.26, 28.91, 1, 25, 0], [-95.275, 28.899, 1, 25, 0], [-95.20, 28.875, 1, 25, 0]], #8/28/2013
# (2013, 255, 3) : [[-97.0318, 27.8304, 3, 31, 0]], #9/12/2013
# (2013, 256, 3) : [[-97.0318, 27.8304, 3, 4, 0]], #9/13/2013
# (2013, 257, 3) : [[-97.0318, 27.8304, 3, 1, 0]], #9/14/2013
# (2013, 258, 3) : [[-97.0318, 27.8304, 3, 1, 0]], #9/15/2013
# (2013, 259, 3) : [[-97.0318, 27.8304, 3, 1, 0]], #9/16/2013
# (2013, 260, 3) : [[-97.0318, 27.8304, 3, 2, 0]], #9/17/2013
#
# (2013, 335, 3) : [[-95.287, 28.929, 1, 50, 0], [-97.0318, 27.8304, 1, 50, 0]],
#
(2014, 61, 3) : [[-97.0318, 27.8304, 3, 1, 1]], #3/2/2014
(2014, 62, 3) : [[-97.0318, 27.8304, 3, 1, 1]], #3/3/2014
(2014, 63, 3) : [[-97.0318, 27.8304, 3, 0, 1]], #3/4/2014
(2014, 64, 3) : [[-97.0318, 27.8304, 3, 1, 1], [-97.0318, 27.8304, 3, 1, 2]], #3/5/2014
(2014, 65, 3) : [[-97.0318, 27.8304, 3, 1, 1], [-97.0318, 27.8304, 3, 1, 2]],
(2014, 66, 3) : [[-97.0318, 27.8304, 3, 2, 1], [-97.0318, 27.8304, 3, 1, 2]],
(2014, 67, 3) : [[-97.0318, 27.8304, 3, 3, 1], [-97.0318, 27.8304, 3, 1, 2]], #3/8/2014
(2014, 68, 3) : [[-97.0318, 27.8304, 3, 2, 1], [-97.0318, 27.8304, 3, 1, 2]],
(2014, 69, 3) : [[-97.0318, 27.8304, 3, 1, 1], [-97.0318, 27.8304, 3, 1, 2]], #3/10/2014
(2014, 70, 3) : [[-97.0318, 27.8304, 3, 2, 1], [-97.0318, 27.8304, 3, 1, 2]],
(2014, 71, 3) : [[-97.0318, 27.8304, 3, 1, 1], [-97.0318, 27.8304, 3, 1, 2]], #3/12/2014
(2014, 72, 3) : [[-97.0318, 27.8304, 3, 0, 1], [-97.0318, 27.8304, 3, 1, 2]],
(2014, 73, 3) : [[-97.0318, 27.8304, 3, 1, 1], [-97.0318, 27.8304, 3, 1, 2]], #3/14/2014
(2014, 74, 3) : [[-97.0318, 27.8304, 3, 1, 1], [-97.0318, 27.8304, 3, 1, 2]],
(2014, 76, 3) : [[-97.0318, 27.8304, 3, 1, 1], [-97.0318, 27.8304, 3, 1, 2]], #3/17/2014
(2014, 77, 3) : [[-97.0318, 27.8304, 3, 0, 1], [-97.0318, 27.8304, 3, 1, 2]],
(2014, 78, 3) : [[-97.0318, 27.8304, 3, 0, 1], [-97.0318, 27.8304, 3, 1, 2]],
(2014, 79, 3) : [[-97.0318, 27.8304, 3, 0, 1], [-97.0318, 27.8304, 3, 1, 2]], #3/20/2014
(2014, 80, 3) : [[-97.0318, 27.8304, 3, 0, 1]],
(2014, 81, 3) : [[-97.0318, 27.8304, 3, 0, 1]], #3/22/2014
(2014, 82, 3) : [[-97.0318, 27.8304, 3, 0, 1], [-97.0318, 27.8304, 3, 2, 2]],
(2014, 83, 3) : [[-97.0318, 27.8304, 3, 0, 1], [-97.0318, 27.8304, 3, 2, 2]], #3/24/2014
(2014, 84, 3) : [[-97.0318, 27.8304, 3, 1, 1], [-97.0318, 27.8304, 3, 3, 2]],
(2014, 85, 3) : [[-97.0318, 27.8304, 3, 0, 1], [-97.0318, 27.8304, 3, 1, 2]], #3/26/2014
(2014, 86, 3) : [[-97.0318, 27.8304, 3, 2, 1], [-97.0318, 27.8304, 3, 2, 2]],
(2014, 87, 3) : [[-97.0318, 27.8304, 3, 1, 1], [-97.0318, 27.8304, 3, 2, 2]], #3/28/2014
(2014, 88, 3) : [[-97.0318, 27.8304, 3, 2, 1], [-97.0318, 27.8304, 3, 2, 2]],
(2014, 89, 3) : [[-97.0318, 27.8304, 3, 5, 1], [-97.0318, 27.8304, 3, 6, 2]], #3/30/2014
(2014, 90, 3) : [[-97.0318, 27.8304, 3, 20, 1], [-97.0318, 27.8304, 3, 11, 2]],
(2014, 91, 3) : [[-97.0318, 27.8304, 3, 20, 1], [-97.0318, 27.8304, 3, 16, 2]], #4/1/2014
(2014, 92, 3) : [[-97.0318, 27.8304, 3, 17, 1], [-97.0318, 27.8304, 3, 19, 2]], #4/2/2014
(2014, 93, 3) : [[-97.0318, 27.8304, 3, 18, 1], [-97.0318, 27.8304, 3, 23, 2]],
(2014, 94, 3) : [[-97.0318, 27.8304, 3, 44, 1], [-97.0318, 27.8304, 3, 36, 2]], #4/4/2014
(2014, 95, 3) : [[-97.0318, 27.8304, 3, 28, 1], [-97.0318, 27.8304, 3, 29, 2]],
(2014, 96, 3) : [[-97.0318, 27.8304, 3, 14, 1], [-97.0318, 27.8304, 3, 33, 2]], #4/6/2014
(2014, 97, 3) : [[-97.0318, 27.8304, 3, 9, 1], [-97.0318, 27.8304, 3, 23, 2]], #4/7/2014
(2014, 98, 3) : [[-97.0318, 27.8304, 3, 11, 2]], #4/8/2014
(2014, 99, 3) : [[-97.0318, 27.8304, 3, 6, 2]], #4/9/2014
(2014, 100, 3) : [[-97.0318, 27.8304, 3, 18, 2]], #4/10/2014
(2014, 101, 3) : [[-97.0318, 27.8304, 3, 17, 2]], #4/11/2014
(2014, 102, 3) : [[-97.0318, 27.8304, 3, 16, 2]], #4/12/2014
(2014, 103, 3) : [[-97.0318, 27.8304, 3, 11, 2]], #4/13/2014
(2014, 104, 3) : [[-97.0318, 27.8304, 3, 5, 2]], #4/14/2014
(2014, 105, 3) : [[-97.0318, 27.8304, 3, 3, 2]], #4/15/2014
(2014, 106, 3) : [[-97.0318, 27.8304, 3, 3, 2]], #4/16/2014
(2014, 107, 3) : [[-97.0318, 27.8304, 3, 2, 2]], #4/17/2014
(2014, 108, 3) : [[-97.0318, 27.8304, 3, 1, 2]], #4/18/2014
(2014, 109, 3) : [[-97.0318, 27.8304, 3, 2, 2]], #4/19/2014
(2014, 110, 3) : [[-97.0318, 27.8304, 3, 4, 2]], #4/20/2014
#
#
# (2014, 259, 3) : [[-97.0318, 27.8304, 3, 2, 0]], #9/16/2014
# (2014, 260, 3) : [[-97.0318, 27.8304, 3, 2, 0]],
# (2014, 261, 3) : [[-97.0318, 27.8304, 3, 2, 0]], #9/18/2014
# (2014, 262, 3) : [[-97.0318, 27.8304, 3, 2, 0]],
# (2014, 263, 3) : [[-97.0318, 27.8304, 3, 2, 0]], #9/20/2014
# (2014, 264, 3) : [[-97.0318, 27.8304, 3, 2, 0]],
# (2014, 265, 3) : [[-97.0318, 27.8304, 3, 4, 0]], #9/22/2014
# (2014, 266, 3) : [[-97.0318, 27.8304, 3, 3, 0]],
# (2014, 267, 3) : [[-97.0318, 27.8304, 3, 12, 0]], #9/24/2014
# (2014, 268, 3) : [[-97.0318, 27.8304, 3, 12, 0]],
# (2014, 269, 3) : [[-97.0318, 27.8304, 3, 2, 0]], #9/26/2014
# (2014, 270, 3) : [[-97.0318, 27.8304, 3, 2, 0]],
# (2014, 271, 3) : [[-97.0318, 27.8304, 3, 2, 0]], #9/28/2014
# (2014, 272, 3) : [[-97.0318, 27.8304, 3, 2, 0]],
# (2014, 273, 3) : [[-97.0318, 27.8304, 3, 2, 0]], #9/30/2014
# (2014, 274, 3) : [[-97.0318, 27.8304, 3, 2, 0]], #10/1/2014
# (2014, 275, 3) : [[-97.0318, 27.8304, 3, 2, 0]], #10/2/2014
#
#
#
# ####2015
#
# #(2015, 193, 3) : [[-89.0026, 21.4990, 1, 100, 0], [-89.4071, 21.5074, 1, 100, 0], [-89.2029, 21.7014, 1, 100, 0], [-90.6804, 20.0064, 1, 100, 0], [-90.6443, 20.2276, 1, 100, 0]], #7/12/2015 Used for testing Shelly Tomlinson's data showing Kbrevis at Yucatan
# (2015, 257, 3) : [[-97.2665, 27.4915, 1, 270, 0], [-97.2164, 27.5809, 1, 672, 0], [-97.1993, 27.6144, 1, 85, 0], [-97.0505, 27.8258, 1, 301, 0]], #9/14/2015
# (2015, 258, 3) : [[-97.1651, 26.1197, 1, 3, 0], [-97.3715, 27.1561, 1, 400, 0], [-97.3479, 27.2763, 1, 660, 0], [-97.3268, 27.3467, 1, 510, 0], [-97.3012, 27.4152, 1, 470, 0], [-97.2665, 27.4915, 1, 360, 0]], #9/15/2015
# (2015, 259, 3) : [[-97.2164, 27.5809, 1, 1001, 0], [-97.0499, 27.8745, 1, 344, 0]], #9/16/2015
# (2015, 260, 3) : [[-97.1628, 26.0689, 1, 2, 0], [-97.1568, 26.0754, 1, 1, 0], [-97.0492, 27.8391, 1, 271, 0]], #9/17/2015
#
# #(2015, 264, 3) : [[-97.1542, 26.0674, 1, 2, 0], [-97.1628, 26.0689, 1, 1, 0], [-97.1559, 26.0706, 1, 4, 0], [-97.2701, 26.5620, 1, 555, 0], [-97.3094, 27.4733, 1, 5, 0], [-96.3148, 28.4137, 1, 1044, 0]], #9/21/2015
# #(2015, 265, 3) : [[-97.1477, 25.9691, 1, 2, 0], [-97.1512, 26.0599, 1, 82, 0], [-97.1542, 26.067, 1, 255, 0], [-97.1568, 26.0754, 1, 98, 0], [-97.1718, 26.1653, 1, 675, 0], [-97.1778, 26.2074, 1, 1316, 0], [-97.372, 27.149, 1, 8250, 0], [-97.3479, 27.276, 1, 2250, 0], [-97.3012, 27.4152, 1, 11200, 0], [-97.266, 27.4915, 1, 1717, 0], [-97.049, 27.839, 1, 13, 0]], #9/22/2015
#
# #(2015, 269, 3) : [[-97.1718, 26.1653, 1, 8118, 0], [-97.1778, 26.207, 1, 2025, 0]], #9/26/2015
# #(2015, 271, 3) : [[-97.301, 27.415, 1, 48, 0], [-97.313, 27.466, 1, 428, 0], [-97.309, 27.473, 1, 1024, 0], [-97.266, 27.491, 1, 72, 0]], #9/28/2015
#
# #(2015, 274, 3) : [[-97.156, 26.075, 1, 10010, 0], [-97.175, 26.126, 1, 33, 0], [-97.171, 26.165, 1, 15548, 0], [-97.177, 26.207, 1, 20592, 0], [-97.2919, 26.6049, 1, 5005, 0], [-97.343, 26.773, 1, 1340, 0], [-97.378, 26.988, 1, 176, 0], [-97.367, 27.191, 1, 92, 0], [-97.301, 27.415, 1, 56, 0]], #10/1/2015
# #(2015, 278, 3) : [[-97.156, 26.075, 1, 28392, 0], [-97.171, 26.165, 1, 50000, 0], [-97.177, 26.207, 1, 50000, 0], [-97.343, 26.773, 1, 2330, 0], [-97.378, 26.988, 1, 608, 0], [-97.367, 27.191, 1, 1096, 0], [-97.301, 27.415, 1, 52, 0], [-97.309, 27.473, 1, 108, 0]], #10/5/2015
#
# #####2016
# (2016, 239, 3) : [[-97.2164, 27.5809, 1, 2, 0], [-97.1608, 26.078, 1, 5, 0]], #08/26/2016
# (2016, 242, 3) : [[-97.1608, 26.078, 1, 1, 0], [-97.1719, 26.1652, 1, 1, 0], [-97.1775, 26.2085, 1, 1, 0]], #08/29/2016
# (2016, 250, 3) : [[-97.3017, 27.413, 1, 190, 0], [-97.3722, 27.1489, 1, 300, 0], [-97.3789, 27.0608, 1, 170, 0]], #09/06/2016
# (2016, 251, 3) : [[-97.2106, 27.5835, 1, 55, 0], [-97.2976, 27.4237, 1, 360, 0]], #09/07/2016
# (2016, 252, 3) : [[-97.1628, 26.0689, 1, 946, 0], [-97.1608, 26.0783, 1, 1386, 0], [-97.1542, 26.0674, 1, 746, 0]], #09/08/2016
# (2016, 253, 3) : [[-97.1542, 26.0674, 1, 1160, 0], [-97.1608, 26.0783, 1, 364, 0], [-97.1719, 26.1652, 1, 80, 0], [-97.1775, 26.2085, 1, 365, 0]], #09/09/2016
# (2016, 256, 3) : [[-97.1632, 26.0687, 1, 7800, 0], [-97.1576, 26.0691, 1, 550, 0], [-97.1569, 26.0751, 1, 378, 0], [-97.2761, 26.5648, 1, 110, 0], [-97.2766, 26.5652, 1, 120, 0], [-97.3232, 26.7028, 1, 90, 0], [-97.3594, 26.8444, 1, 70, 0], [-97.3780, 26.9885, 1, 60, 0], [-97.374, 27.1334, 1, 70, 0], [-97.3477, 27.2767, 1, 50, 0], [-97.3013, 27.4149, 1, 30, 0], [-97.2976, 27.424, 1, 60, 0], [-97.1501, 25.9930, 1, 30, 0], [-97.1778, 26.2074, 1, 1199, 0], [-97.1733, 26.1652, 1, 155, 0], [-97.1752, 26.1428, 1, 27, 0], [-97.1656, 26.076, 1, 4, 0], [-97.1493, 26.0643, 1, 398, 0]], #09/12/2016
# (2016, 257, 3) : [[-97.1778, 26.2074, 1, 913, 0], [-97.1631, 26.0687, 1, 41600, 0], [-97.1569, 26.0750, 1, 675, 0], [-97.1542, 26.0674, 1, 13380, 0], [-97.1605, 26.0886, 1, 347, 0], [-97.1655, 26.1191, 1, 422, 0], [-97.1772, 26.1376, 1, 1684, 0]], #09/13/2016
# (2016, 258, 3) : [[-97.1733, 26.1652, 1, 1683, 0], [-97.1775, 26.2085, 1, 1291, 0], [-97.1631, 26.0687, 1, 12170, 0], [-97.1542, 26.0674, 1, 2910, 0], [-97.1569, 26.0750, 1, 385, 0]], #09/14/2016
#
##fake data for testing the model
## (2009, 267, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]], #10/1/2009
## (2009, 271, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]], #10/1/2009
## (2009, 274, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]], #10/1/2009
## (2009, 278, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]],
## (2009, 281, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]],
## (2009, 285, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]],
## (2009, 288, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]],
## (2009, 292, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]],
## (2009, 295, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]],
## (2009, 299, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]],
## (2009, 302, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]],
## (2009, 306, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]],
## (2009, 309, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]],
## (2009, 313, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]],
## (2009, 316, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]],
## (2009, 320, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]],
## (2009, 323, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]],
## (2009, 327, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]],
## (2009, 330, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]],
## (2009, 334, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]],
## (2009, 337, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]],
##
## (2010, 267, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]], #10/1/2010
## (2010, 271, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]], #10/1/2010
## (2010, 274, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]], #10/1/2010
## (2010, 278, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]],
## (2010, 281, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]],
## (2010, 285, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]],
## (2010, 288, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]],
## (2010, 292, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]],
## (2010, 295, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]],
## (2010, 299, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]],
## (2010, 302, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]],
## (2010, 306, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]],
## (2010, 309, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]],
## (2010, 313, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]],
## (2010, 316, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]],
## (2010, 320, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]],
## (2010, 323, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]],
## (2010, 327, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]],
## (2010, 330, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]],
## (2010, 334, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]],
## (2010, 337, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]],
## (2010, 341, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]],
## (2010, 344, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]],
## (2010, 347, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]],
##
## (2011, 267, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]], #10/1/2009
## (2011, 271, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]], #10/1/2009
## (2011, 274, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]], #10/1/2009
## (2011, 278, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]],
## (2011, 281, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]],
## (2011, 285, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]],
## (2011, 288, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]],
## (2011, 292, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]],
## (2011, 295, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]],
## (2011, 299, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]],
## (2011, 302, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]],
## (2011, 306, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]],
## (2011, 309, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]],
## (2011, 313, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]],
## (2011, 316, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]],
## (2011, 320, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]],
## (2011, 323, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]],
## (2011, 327, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]],
## (2011, 330, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]],
## (2011, 334, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]],
## (2011, 337, 3) : [[-97.0318, 27.8304, 3, 20, 0], [-97.05, 26.078, 1, 20, 0], [-94.66, 29.28, 1, 20, 0], [-82.3311, 26.3656, 1, 5, 0], [-82.3311, 26.3656, 10, 5, 0], [-82.5, 27.00, 1, 5, 0], [-82.5, 27.00, 5, 5, 0], [-82.75, 27.4, 1, 5, 0], [-82.75, 27.4, 5, 5], 0]],
## ####end of fake data for model testing of Kbrevis
####fake data for model testing of Ptexanum
# (2010, 74, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2010, 76, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2010, 78, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2010, 80, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2010, 82, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2010, 84, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2010, 86, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2010, 88, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2010, 90, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2010, 92, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2010, 94, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2010, 96, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2010, 98, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2010, 100, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2010, 102, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2010, 104, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2010, 106, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2010, 108, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2010, 110, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
#
# (2011, 74, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2011, 76, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2011, 78, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2011, 80, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2011, 82, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2011, 84, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2011, 86, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2011, 88, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2011, 90, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2011, 92, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2011, 94, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2011, 96, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2011, 98, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2011, 100, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# #(2011, 102, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# #(2011, 104, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# #(2011, 106, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# #(2011, 108, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# #(2011, 110, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
#
# (2012, 74, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2012, 76, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2012, 78, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2012, 80, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2012, 82, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2012, 84, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2012, 86, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2012, 88, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2012, 90, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2012, 92, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2012, 94, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2012, 96, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2012, 98, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2012, 100, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2012, 102, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2012, 104, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2012, 106, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2012, 108, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2012, 110, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
#
# (2013, 84, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2013, 86, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2013, 88, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2013, 90, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2013, 92, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2013, 94, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2013, 96, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2013, 98, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2013, 100, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2013, 102, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2013, 104, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2013, 106, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2013, 108, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2013, 110, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2013, 112, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2013, 114, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2013, 116, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2013, 118, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2013, 120, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
#
# (2014, 74, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2014, 76, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2014, 78, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2014, 80, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2014, 82, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2014, 84, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2014, 86, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2014, 88, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2014, 90, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2014, 92, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2014, 94, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2014, 96, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2014, 98, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2014, 100, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2014, 102, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2014, 104, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2014, 106, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2014, 108, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
# (2014, 110, 3) : [[-97.0318, 27.8304, 3, 50, 2],],
#####end of fake data for model testing ptexanum
}
| 137.831263
| 1,249
| 0.414488
| 24,178
| 133,145
| 2.282405
| 0.062412
| 0.131777
| 0.175703
| 0.263555
| 0.755998
| 0.73958
| 0.67226
| 0.637395
| 0.576055
| 0.448645
| 0
| 0.590612
| 0.306193
| 133,145
| 965
| 1,250
| 137.974093
| 0.006766
| 0.788862
| 0
| 0
| 0
| 0
| 0.002315
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e1d5eba61b5c7286f6f676517ecfc9c934004230
| 5,612
|
py
|
Python
|
tests/router/strategies.py
|
brettcannon/vibora
|
1933b631d4df62e7d748016f7463ab746d4695cc
|
[
"MIT"
] | 6,238
|
2018-06-14T19:29:47.000Z
|
2022-03-29T21:42:03.000Z
|
tests/router/strategies.py
|
LL816/vibora
|
4cda888f89aec6bfb2541ee53548ae1bf50fbf1b
|
[
"MIT"
] | 213
|
2018-06-13T20:13:59.000Z
|
2022-03-26T07:46:49.000Z
|
tests/router/strategies.py
|
LL816/vibora
|
4cda888f89aec6bfb2541ee53548ae1bf50fbf1b
|
[
"MIT"
] | 422
|
2018-06-20T01:29:41.000Z
|
2022-02-27T16:45:29.000Z
|
import uuid
from vibora import Vibora, TestSuite
from vibora.router import RouterStrategy
from vibora.responses import Response
class RedirectStrategyTestCase(TestSuite):
def setUp(self):
self.app = Vibora(router_strategy=RouterStrategy.REDIRECT)
async def test_missing_slash_expects_redirect(self):
@self.app.route('/asd', methods=['GET'])
async def home():
return Response(b'123')
client = self.app.test_client(follow_redirects=False)
self.assertEqual((await client.request('/asd/')).status_code, 301)
async def test_missing_slash_with_default_post_route_expects_not_found(self):
@self.app.route('/asd', methods=['POST'])
async def home():
return Response(b'123')
client = self.app.test_client()
self.assertEqual((await client.request('/asd/')).status_code, 404)
async def test_wrong_method_expects_405_response(self):
@self.app.route('/asd/', methods=['GET'])
async def home():
return Response(b'')
client = self.app.test_client()
self.assertEqual((await client.request('/asd', method='POST')).status_code, 405)
async def test_additional_slash_expects_redirected(self):
@self.app.route('/asd/', methods=['GET'])
async def home():
return Response(b'')
client = self.app.test_client(follow_redirects=False)
response = await client.request('/asd', method='GET')
self.assertEqual(response.status_code, 301)
self.assertEqual('/asd/', response.headers['location'])
class StrictStrategyTestCase(TestSuite):
def setUp(self):
self.app = Vibora(router_strategy=RouterStrategy.STRICT)
async def test_simple_get_route_expects_found(self):
path = '/' + uuid.uuid4().hex
@self.app.route(path)
async def home():
return Response(b'123')
client = self.app.test_client()
self.assertEqual(200, (await client.request(path)).status_code)
async def test_simple_get_route_expects_404(self):
@self.app.route('/test')
async def home():
return Response(b'123')
client = self.app.test_client()
self.assertEqual(404, (await client.request('/wrong-path')).status_code)
async def test_route_missing_slash_expects_404(self):
@self.app.route('/test/')
async def home():
return Response(b'123')
client = self.app.test_client()
self.assertEqual(404, (await client.request('/test')).status_code)
async def test_route_correct_slash_but_different_method_expects_not_allowed(self):
@self.app.route('/test/', methods=['POST'])
async def home():
return Response(b'123')
client = self.app.test_client()
self.assertEqual(405, (await client.request('/test/')).status_code)
async def test_route_with_params_expect_found(self):
@self.app.route('/<name>/')
async def home(name: int):
self.assertEqual(name, 123)
return Response(b'123')
client = self.app.test_client()
self.assertEqual(200, (await client.request('/123/')).status_code)
async def test_route_with_params_expects_not_found(self):
@self.app.route('/<name>')
async def home(name: str):
return Response(name.encode())
client = self.app.test_client()
self.assertEqual(404, (await client.request('/123/')).status_code)
async def test_dynamic_route_expect_found(self):
@self.app.route('/.*/a')
async def home():
return Response(b'123')
client = self.app.test_client()
self.assertEqual(200, (await client.request('/123/a')).status_code)
async def test_dynamic_route_expects_not_found(self):
@self.app.route('/.*/a')
async def home():
return Response(b'123')
client = self.app.test_client()
self.assertEqual(404, (await client.request('/123/')).status_code)
class CloneStrategyTestCase(TestSuite):
def setUp(self):
self.app = Vibora(router_strategy=RouterStrategy.CLONE)
async def test_simple_get_route_expects_found(self):
@self.app.route('/test')
async def home():
return Response(b'123')
client = self.app.test_client()
self.assertEqual(200, (await client.request('/test')).status_code)
async def test_simple_get_route_wrong_method_expects_not_allowed(self):
@self.app.route('/test', methods=['POST'])
async def home():
return Response(b'123')
client = self.app.test_client()
self.assertEqual(405, (await client.request('/test')).status_code)
async def test_simple_get_route_wrong_path_expects_not_found(self):
@self.app.route('/test', methods=['POST'])
async def home():
return Response(b'123')
client = self.app.test_client()
self.assertEqual(404, (await client.request('/asd')).status_code)
async def test_missing_slash_expects_found(self):
@self.app.route('/test/', methods=['GET'])
async def home():
return Response(b'123')
client = self.app.test_client()
self.assertEqual(200, (await client.request('/test')).status_code)
async def test_additional_slash_expects_found(self):
@self.app.route('/test', methods=['GET'])
async def home():
return Response(b'123')
client = self.app.test_client()
self.assertEqual(200, (await client.request('/test/')).status_code)
| 34.219512
| 88
| 0.641839
| 692
| 5,612
| 5.011561
| 0.106936
| 0.074683
| 0.060265
| 0.083333
| 0.848328
| 0.831892
| 0.786621
| 0.767589
| 0.707612
| 0.67474
| 0
| 0.027039
| 0.222381
| 5,612
| 163
| 89
| 34.429448
| 0.767644
| 0
| 0
| 0.546218
| 0
| 0
| 0.047398
| 0
| 0
| 0
| 0
| 0
| 0.159664
| 1
| 0.02521
| false
| 0
| 0.033613
| 0
| 0.226891
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e1de61b5f502afdc039e7db98afaa40491ad5bdb
| 39
|
py
|
Python
|
kerastuner/applications/__init__.py
|
haifeng-jin/kt-legacy
|
15686b5e2d25b7094134d68956b2edce5dffa7a0
|
[
"Apache-2.0"
] | 1
|
2022-03-29T21:49:22.000Z
|
2022-03-29T21:49:22.000Z
|
kerastuner/applications/__init__.py
|
haifeng-jin/kt-legacy
|
15686b5e2d25b7094134d68956b2edce5dffa7a0
|
[
"Apache-2.0"
] | null | null | null |
kerastuner/applications/__init__.py
|
haifeng-jin/kt-legacy
|
15686b5e2d25b7094134d68956b2edce5dffa7a0
|
[
"Apache-2.0"
] | 1
|
2022-02-14T18:57:19.000Z
|
2022-02-14T18:57:19.000Z
|
from keras_tuner.applications import *
| 19.5
| 38
| 0.846154
| 5
| 39
| 6.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102564
| 39
| 1
| 39
| 39
| 0.914286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e1f219de481817814f06b0f511bd28746666a782
| 120
|
py
|
Python
|
articles/admin.py
|
kronok/feincms-articles
|
0577491fdb6f79a0360c5559bea771c5405da046
|
[
"BSD-2-Clause"
] | null | null | null |
articles/admin.py
|
kronok/feincms-articles
|
0577491fdb6f79a0360c5559bea771c5405da046
|
[
"BSD-2-Clause"
] | null | null | null |
articles/admin.py
|
kronok/feincms-articles
|
0577491fdb6f79a0360c5559bea771c5405da046
|
[
"BSD-2-Clause"
] | null | null | null |
from django.contrib import admin
from .models import Article, ArticleAdmin
admin.site.register(Article, ArticleAdmin)
| 20
| 42
| 0.825
| 15
| 120
| 6.6
| 0.666667
| 0.383838
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108333
| 120
| 6
| 42
| 20
| 0.925234
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c005b09311a06dbdbc0c6a997382f7fdb785ab5b
| 21,454
|
py
|
Python
|
pyroomacoustics/windows.py
|
HemaZ/pyroomacoustics
|
c401f829c71ff03a947f68f9b6b2f48346ae84b2
|
[
"MIT"
] | 1
|
2019-08-04T07:34:02.000Z
|
2019-08-04T07:34:02.000Z
|
pyroomacoustics/windows.py
|
HemaZ/pyroomacoustics
|
c401f829c71ff03a947f68f9b6b2f48346ae84b2
|
[
"MIT"
] | null | null | null |
pyroomacoustics/windows.py
|
HemaZ/pyroomacoustics
|
c401f829c71ff03a947f68f9b6b2f48346ae84b2
|
[
"MIT"
] | 1
|
2021-01-14T08:42:47.000Z
|
2021-01-14T08:42:47.000Z
|
# coding=utf-8
#
# MIT License
#
# Window functions Copyright (C) 2015-2019 Taishi Nakashima, Robin Scheibler
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# You should have received a copy of the MIT License along with this program. If
# not, see <https://opensource.org/licenses/MIT>.
r"""
Window Functions
================
This is a collection of many popular window functions used in signal processing.
A few options are provided to correctly construct the required window function.
The ``flag`` keyword argument can take the following values.
``asymmetric``
This way, many of the functions will sum to one when their left part is added
to their right part. This is useful for overlapped transforms such as the STFT.
``symmetric``
With this flag, the window is perfectly symmetric. This might be more
suitable for analysis tasks.
``mdct``
Available for only some of the windows. The window is modified to satisfy
the perfect reconstruction condition of the MDCT transform.
Often, we would like to get the full window function, but on some occasions, it is useful
to get only the left (or right) part. This can be indicated via the keyword argument
``length`` that can take values ``full`` (default), ``left``, or ``right``.
"""
import numpy as np
from scipy import special
pi = np.pi
# Bartlett window
def bart(N, flag='asymmetric', length='full'):
r'''
The Bartlett window function
.. math::
w[n] = 2 / (M-1) ((M-1)/2 - |n - (M-1)/2|) , n=0,\ldots,N-1
Parameters
----------
N: int
the window length
flag: string, optional
Possible values
- *asymmetric*: asymmetric windows are used
for overlapping transforms (:math:`M=N`)
- *symmetric*: the window is symmetric (:math:`M=N-1`)
- *mdct*: impose MDCT condition on the window (:math:`M=N-1` and
:math:`w[n]^2 + w[n+N/2]^2=1`)
length: string, optional
Possible values
- *full*: the full length window is computed
- *right*: the right half of the window is computed
- *left*: the left half of the window is computed
'''
# first choose the indexes of points to compute
if length == 'left': # left side of window
t = np.arange(0, N / 2)
elif length == 'right': # right side of window
t = np.arange(N / 2, N)
else: # full window by default
t = np.arange(0, N)
# if asymmetric window, denominator is N, if symmetric it is N-1
if flag in ['symmetric', 'mdct']:
t = t / float(N - 1)
else:
t = t / float(N)
w = 2/(N-1) * ((N-1)/2 - np.abs(t - (N-1)/2))
# make the window respect MDCT condition
if flag == 'mdct':
w **= 2
d = w[:N / 2] + w[N / 2:]
w[:N / 2] *= 1. / d
w[N / 2:] *= 1. / d
# compute window
return w
# Modified Bartlett--Hann window
def bart_hann(N, flag='asymmetric', length='full'):
r'''
The modified Bartlett--Hann window function
.. math::
w[n] = 0.62 - 0.48|(n/M-0.5)| + 0.38 \cos(2\pi(n/M-0.5)),
n=0,\ldots,N-1
Parameters
----------
N: int
the window length
flag: string, optional
Possible values
- *asymmetric*: asymmetric windows are used
for overlapping transforms (:math:`M=N`)
- *symmetric*: the window is symmetric (:math:`M=N-1`)
- *mdct*: impose MDCT condition on the window (:math:`M=N-1` and
:math:`w[n]^2 + w[n+N/2]^2=1`)
length: string, optional
Possible values
- *full*: the full length window is computed
- *right*: the right half of the window is computed
- *left*: the left half of the window is computed
'''
# first choose the indexes of points to compute
if length == 'left': # left side of window
t = np.arange(0, N / 2)
elif length == 'right': # right side of window
t = np.arange(N / 2, N)
else: # full window by default
t = np.arange(0, N)
# if asymmetric window, denominator is N, if symmetric it is N-1
if flag in ['symmetric', 'mdct']:
t = t / float(N - 1)
else:
t = t / float(N)
w = 0.62 - 0.48 * np.abs(t/N - 0.5) + 0.38 * np.cos(2*pi*(t/N - 0.5))
# make the window respect MDCT condition
if flag == 'mdct':
w **= 2
d = w[:N / 2] + w[N / 2:]
w[:N / 2] *= 1. / d
w[N / 2:] *= 1. / d
# compute window
return w
# Blackman window
def blackman(N, flag='asymmetric', length='full'):
r'''
The Blackman window function
.. math::
w[n] = 0.42 - 0.5\cos(2\pi n/(M-1)) + 0.08\cos(4\pi n/(M-1)),
n = 0, \ldots, M-1
Parameters
----------
N: int
the window length
flag: string, optional
Possible values
- *asymmetric*: asymmetric windows are used
for overlapping transforms (:math:`M=N`)
- *symmetric*: the window is symmetric (:math:`M=N-1`)
- *mdct*: impose MDCT condition on the window (:math:`M=N-1` and
:math:`w[n]^2 + w[n+N/2]^2=1`)
length: string, optional
Possible values
- *full*: the full length window is computed
- *right*: the right half of the window is computed
- *left*: the left half of the window is computed
'''
# first choose the indexes of points to compute
if length == 'left': # left side of window
t = np.arange(0, N / 2)
elif length == 'right': # right side of window
t = np.arange(N / 2, N)
else: # full window by default
t = np.arange(0, N)
# if asymmetric window, denominator is N, if symmetric it is N-1
if flag in ['symmetric', 'mdct']:
t = t / float(N - 1)
else:
t = t / float(N)
w = 0.42 - 0.5*np.cos(2*pi*t/(N-1)) + 0.08*np.cos(4*pi*t/(N-1))
# make the window respect MDCT condition
if flag == 'mdct':
w **= 2
d = w[:N / 2] + w[N / 2:]
w[:N / 2] *= 1. / d
w[N / 2:] *= 1. / d
# compute window
return w
# Blackman-Harris window
def blackman_harris(N, flag='asymmetric', length='full'):
r'''
The Hann window function
.. math::
w[n] = a_0 - a_1 \cos(2\pi n/M)
+ a_2 \cos(4\pi n/M) + a_3 \cos(6\pi n/M), n=0,\ldots,N-1
Parameters
----------
N: int
the window length
flag: string, optional
Possible values
- *asymmetric*: asymmetric windows are used
for overlapping transforms (:math:`M=N`)
- *symmetric*: the window is symmetric (:math:`M=N-1`)
length: string, optional
Possible values
- *full*: the full length window is computed
- *right*: the right half of the window is computed
- *left*: the left half of the window is computed
'''
# coefficients
a = np.array([.35875, .48829, .14128, .01168])
# first choose the indexes of points to compute
if length == 'left': # left side of window
t = np.arange(0, N / 2)
elif length == 'right': # right side of window
t = np.arange(N / 2, N)
else: # full window by default
t = np.arange(0, N)
# if asymmetric window, denominator is N, if symmetric it is N-1
if flag == 'symmetric':
t = t / float(N - 1)
else:
t = t / float(N)
w = a[0] - a[1]*np.cos(2*pi*t) + a[2]*np.cos(4*pi*t) + a[3]*np.cos(6*pi*t)
return w
# Bohman window function
def bohman(N, flag='asymmetric', length='full'):
r'''
The Bohman window function
.. math::
w[n] = (1-|x|) \cos(\pi |x|) + \pi / |x| \sin(\pi |x|), -1\leq x\leq 1
Parameters
----------
N: int
the window length
flag: string, optional
Possible values
- *asymmetric*: asymmetric windows are used
for overlapping transforms (:math:`M=N`)
- *symmetric*: the window is symmetric (:math:`M=N-1`)
- *mdct*: impose MDCT condition on the window (:math:`M=N-1` and
:math:`w[n]^2 + w[n+N/2]^2=1`)
length: string, optional
Possible values
- *full*: the full length window is computed
- *right*: the right half of the window is computed
- *left*: the left half of the window is computed
'''
# first choose the indexes of points to compute
if length == 'left': # left side of window
t = np.arange(0, N / 2)
elif length == 'right': # right side of window
t = np.arange(N / 2, N)
else: # full window by default
t = np.arange(0, N)
# if asymmetric window, denominator is N, if symmetric it is N-1
if flag in ['symmetric', 'mdct']:
t = t / float(N - 1)
else:
t = t / float(N)
x = np.abs(np.linspace(-1, 1, N)[1:-1])
w = (1 - x) * np.cos(pi * x) + 1.0 / pi * np.sin(pi * x)
w = np.r_[0, w, 0]
# make the window respect MDCT condition
if flag == 'mdct':
d = w[:N / 2] + w[N / 2:]
w[:N / 2] *= 1. / d
w[N / 2:] *= 1. / d
# compute window
return w
# cosine window function
def cosine(N, flag='asymmetric', length='full'):
r'''
The cosine window function
.. math::
w[n] = \cos(\pi (n/M - 0.5))^2
Parameters
----------
N: int
the window length
flag: string, optional
Possible values
- *asymmetric*: asymmetric windows are used
for overlapping transforms (:math:`M=N`)
- *symmetric*: the window is symmetric (:math:`M=N-1`)
- *mdct*: impose MDCT condition on the window (:math:`M=N-1` and
:math:`w[n]^2 + w[n+N/2]^2=1`)
length: string, optional
Possible values
- *full*: the full length window is computed
- *right*: the right half of the window is computed
- *left*: the left half of the window is computed
'''
# first choose the indexes of points to compute
if length == 'left': # left side of window
t = np.arange(0, N / 2)
elif length == 'right': # right side of window
t = np.arange(N / 2, N)
else: # full window by default
t = np.arange(0, N)
# if asymmetric window, denominator is N, if symmetric it is N-1
if flag in ['symmetric', 'mdct']:
t = t / float(N - 1)
else:
t = t / float(N)
w = np.cos(pi * (t - 0.5)) ** 2
# make the window respect MDCT condition
if flag == 'mdct':
w **= 2
d = w[:N / 2] + w[N / 2:]
w[:N / 2] *= 1. / d
w[N / 2:] *= 1. / d
# compute window
return w
# Flattop window
def flattop(N, flag='asymmetric', length='full'):
r'''
The flat top weighted window function
.. math::
w[n] = a_0 - a_1 \cos(2\pi n/M) + a_2 \cos(4\pi n/M)
+ a_3 \cos(6\pi n/M) + a_4 \cos(8\pi n/M), n=0,\ldots,N-1
where
.. math::
a0 = 0.21557895
a1 = 0.41663158
a2 = 0.277263158
a3 = 0.083578947
a4 = 0.006947368
Parameters
----------
N: int
the window length
flag: string, optional
Possible values
- *asymmetric*: asymmetric windows are
used for overlapping transforms (:math:`M=N`)
- *symmetric*: the window is symmetric (:math:`M=N-1`)
length: string, optional
Possible values
- *full*: the full length window is computed
- *right*: the right half of the window is computed
- *left*: the left half of the window is computed
'''
# coefficients
a = np.array([.21557895, .41663158, .277263158, .083578947, .006947368])
# first choose the indexes of points to compute
if length == 'left': # left side of window
t = np.arange(0, N / 2)
elif length == 'right': # right side of window
t = np.arange(N / 2, N)
else: # full window by default
t = np.arange(0, N)
# if asymmetric window, denominator is N, if symmetric it is N-1
if flag == 'symmetric':
t = t / float(N - 1)
else:
t = t / float(N)
w = a[0] - a[1]*np.cos(2*pi*t) + a[2]*np.cos(4*pi*t)\
+ a[3]*np.cos(6*pi*t) + a[4]*np.cos(8*pi*t)
return w
# Gaussian window
def gaussian(N, std, flag='asymmetric', length='full'):
r'''
The flat top weighted window function
.. math::
w[n] = e^{ -\frac{1}{2}\left(\frac{n}{\sigma}\right)^2 }
Parameters
----------
N: int
the window length
std: float
the standard deviation
flag: string, optional
Possible values
- *asymmetric*: asymmetric windows are used
for overlapping transforms (:math:`M=N`)
- *symmetric*: the window is symmetric (:math:`M=N-1`)
length: string, optional
Possible values
- *full*: the full length window is computed
- *right*: the right half of the window is computed
- *left*: the left half of the window is computed
'''
# first choose the indexes of points to compute
if length == 'left': # left side of window
t = np.arange(0, N / 2)
elif length == 'right': # right side of window
t = np.arange(N / 2, N)
else: # full window by default
t = np.arange(0, N)
# if asymmetric window, denominator is N, if symmetric it is N-1
if flag == 'symmetric':
t = t / float(N - 1)
else:
t = t / float(N)
n = np.arange(0, N) - (N - 1.0) / 2.0
sig2 = 2 * std**2
w = np.exp(-n**2 / sig2)
return w
# hamming window function
def hamming(N, flag='asymmetric', length='full'):
r'''
The Hamming window function
.. math::
w[n] = 0.54 - 0.46 \cos(2 \pi n / M), n=0,\ldots,N-1
Parameters
----------
N: int
the window length
flag: string, optional
Possible values
- *asymmetric*: asymmetric windows are used
for overlapping transforms (:math:`M=N`)
- *symmetric*: the window is symmetric (:math:`M=N-1`)
- *mdct*: impose MDCT condition on the window (:math:`M=N-1` and
:math:`w[n]^2 + w[n+N/2]^2=1`)
length: string, optional
Possible values
- *full*: the full length window is computed
- *right*: the right half of the window is computed
- *left*: the left half of the window is computed
'''
# first choose the indexes of points to compute
if length == 'left': # left side of window
t = np.arange(0, N / 2)
elif length == 'right': # right side of window
t = np.arange(N / 2, N)
else: # full window by default
t = np.arange(0, N)
# if asymmetric window, denominator is N, if symmetric it is N-1
if flag in ['symmetric', 'mdct']:
t = t / float(N - 1)
else:
t = t / float(N)
w = 0.54 - 0.46*np.cos(2*pi*t)
# make the window respect MDCT condition
if flag == 'mdct':
d = w[:N / 2] + w[N / 2:]
w[:N / 2] *= 1. / d
w[N / 2:] *= 1. / d
# compute window
return w
# hann window function
def hann(N, flag='asymmetric', length='full'):
r'''
The Hann window function
.. math::
w[n] = 0.5 (1 - \cos(2 \pi n / M)), n=0,\ldots,N-1
Parameters
----------
N: int
the window length
flag: string, optional
Possible values
- *asymmetric*: asymmetric windows are used
for overlapping transforms (:math:`M=N`)
- *symmetric*: the window is symmetric (:math:`M=N-1`)
- *mdct*: impose MDCT condition on the window (:math:`M=N-1` and
:math:`w[n]^2 + w[n+N/2]^2=1`)
length: string, optional
Possible values
- *full*: the full length window is computed
- *right*: the right half of the window is computed
- *left*: the left half of the window is computed
'''
# first choose the indexes of points to compute
if length == 'left': # left side of window
t = np.arange(0, N / 2)
elif length == 'right': # right side of window
t = np.arange(N / 2, N)
else: # full window by default
t = np.arange(0, N)
# if asymmetric window, denominator is N, if symmetric it is N-1
if flag in ['symmetric', 'mdct']:
t = t / float(N - 1)
else:
t = t / float(N)
w = 0.5 * (1 - np.cos(2 * pi * t))
# make the window respect MDCT condition
if flag == 'mdct':
d = w[:N / 2] + w[N / 2:]
w[:N / 2] *= 1. / d
w[N / 2:] *= 1. / d
# compute window
return w
# Kaiser window function
def kaiser(N, beta, flag='asymmetric', length='full'):
r'''
The Kaiser window function
.. math::
w[n] = I_0\left( \beta \sqrt{1-\frac{4n^2}{(M-1)^2}} \right)/I_0(\beta)
with
.. math::
\quad -\frac{M-1}{2} \leq n \leq \frac{M-1}{2},
where :math:`I_0` is the modified zeroth-order Bessel function.
Parameters
----------
N: int
the window length
beta: float
Shape parameter, determines trade-off between main-lobe width and
side lobe level. As beta gets large, the window narrows.
flag: string, optional
Possible values
- *asymmetric*: asymmetric windows are used
for overlapping transforms (:math:`M=N`)
- *symmetric*: the window is symmetric (:math:`M=N-1`)
- *mdct*: impose MDCT condition on the window (:math:`M=N-1` and
:math:`w[n]^2 + w[n+N/2]^2=1`)
length: string, optional
Possible values
- *full*: the full length window is computed
- *right*: the right half of the window is computed
- *left*: the left half of the window is computed
'''
# first choose the indexes of points to compute
if length == 'left': # left side of window
t = np.arange(0, N / 2)
elif length == 'right': # right side of window
t = np.arange(N / 2, N)
else: # full window by default
t = np.arange(0, N)
# if asymmetric window, denominator is N, if symmetric it is N-1
if flag in ['symmetric', 'mdct']:
t = t / float(N - 1)
else:
t = t / float(N)
n = np.arange(0, N)
alpha = (N - 1) / 2.0
w = (special.i0(beta * np.sqrt(1 - ((n - alpha) / alpha) ** 2.0)) /
special.i0(beta))
# make the window respect MDCT condition
if flag == 'mdct':
d = w[:N / 2] + w[N / 2:]
w[:N / 2] *= 1. / d
w[N / 2:] *= 1. / d
# compute window
return w
# Rectangular window function
def rect(N):
r'''
The rectangular window
.. math::
w[n] = 1, n=0,\ldots,N-1
Parameters
----------
N: int
the window length
'''
return np.ones(N)
# triangular window function
def triang(N, flag='asymmetric', length='full'):
r'''
The triangular window function
.. math::
w[n] = 1 - | 2 n / M - 1 |, n=0,\ldots,N-1
Parameters
----------
N: int
the window length
flag: string, optional
Possible values
- *asymmetric*: asymmetric windows are used
for overlapping transforms (:math:`M=N`)
- *symmetric*: the window is symmetric (:math:`M=N-1`)
- *mdct*: impose MDCT condition on the window (:math:`M=N-1` and
:math:`w[n]^2 + w[n+N/2]^2=1`)
length: string, optional
Possible values
- *full*: the full length window is computed
- *right*: the right half of the window is computed
- *left*: the left half of the window is computed
'''
# first choose the indexes of points to compute
if length == 'left': # left side of window
t = np.arange(0, N / 2)
elif length == 'right': # right side of window
t = np.arange(N / 2, N)
else: # full window by default
t = np.arange(0, N)
# if asymmetric window, denominator is N, if symmetric it is N-1
if flag in ['symmetric', 'mdct']:
t = t / float(N - 1)
else:
t = t / float(N)
w = 1. - np.abs(2. * t - 1.)
# make the window respect MDCT condition
if flag == 'mdct':
d = w[:N / 2] + w[N / 2:]
w[:N / 2] *= 1. / d
w[N / 2:] *= 1. / d
# compute window
return w
| 28.154856
| 89
| 0.552904
| 3,186
| 21,454
| 3.718456
| 0.097301
| 0.013506
| 0.011649
| 0.009116
| 0.751752
| 0.742888
| 0.728117
| 0.708534
| 0.704313
| 0.704313
| 0
| 0.034371
| 0.312436
| 21,454
| 761
| 90
| 28.191853
| 0.768761
| 0.625571
| 0
| 0.764444
| 0
| 0
| 0.067376
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057778
| false
| 0
| 0.008889
| 0
| 0.124444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c054b2b885178ee8ac75c1b76197be1c2f169794
| 133
|
py
|
Python
|
sample/tests2.py
|
quantamc/EmokitVisualizer
|
e6e48163eb8029351304849953235a4e50eb17f1
|
[
"MIT"
] | 16
|
2018-06-01T16:16:51.000Z
|
2021-06-08T11:37:58.000Z
|
sample/tests2.py
|
quantamc/EmokitVisualizer
|
e6e48163eb8029351304849953235a4e50eb17f1
|
[
"MIT"
] | 2
|
2019-09-23T11:56:51.000Z
|
2019-10-10T21:12:22.000Z
|
sample/tests2.py
|
quantamc/EmokitVisualizer
|
e6e48163eb8029351304849953235a4e50eb17f1
|
[
"MIT"
] | 5
|
2018-11-24T17:55:48.000Z
|
2021-01-06T20:08:58.000Z
|
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui
import numpy as np
import pyqtgraph.examples
pyqtgraph.examples.run()
| 19
| 38
| 0.819549
| 20
| 133
| 5.45
| 0.6
| 0.275229
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12782
| 133
| 7
| 39
| 19
| 0.939655
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.8
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fbeb6dd2af4725bc6b932d0ea30d080003f67f2a
| 2,509
|
py
|
Python
|
django_bootstrap_wysiwyg/tests/widgets.py
|
Prithvi45/django-bootstrap-wysiwyg
|
7ec93c29221207d793070c2956814b36dcc175a5
|
[
"MIT"
] | 9
|
2015-02-03T07:01:38.000Z
|
2017-10-18T09:08:18.000Z
|
django_bootstrap_wysiwyg/tests/widgets.py
|
Prithvi45/django-bootstrap-wysiwyg
|
7ec93c29221207d793070c2956814b36dcc175a5
|
[
"MIT"
] | 4
|
2015-01-06T13:44:59.000Z
|
2020-06-04T19:24:46.000Z
|
django_bootstrap_wysiwyg/tests/widgets.py
|
laplacesdemon/django-bootstrap-wysiwyg
|
7ec93c29221207d793070c2956814b36dcc175a5
|
[
"MIT"
] | 8
|
2015-01-06T13:45:21.000Z
|
2020-11-24T17:32:58.000Z
|
from django.test import TestCase
from bs4 import BeautifulSoup
from django_bootstrap_wysiwyg.widgets import WysiwygInput
class WysiwygInputTests(TestCase):
def test_render_simple(self):
obj = WysiwygInput()
attrs = {"id": "id_message"}
html = obj.render("message", "my value", attrs)
soup = BeautifulSoup(html)
self.assertIn("my value", html)
self.assertIn('class="editor"', html)
message = soup.find(id="id_message")
self.assertEqual(message.attrs, {'id': 'id_message', 'class': ['editor']})
self.assertEqual(message.get_text(), u'\n my value\n')
# all toolbar items should be present by default
toolbar_items = soup.find_all(attrs={"class": "btn-group"})
self.assertEqual(8, len(toolbar_items))
toolbar_items_context = obj.get_context("message", "")
self.assertEqual(9, len(toolbar_items_context['toolbar_items']))
def test_render_with_attrs(self):
obj = WysiwygInput()
attrs = {"class": "my-class", "style": "width:200px", "id": "id_message"}
html = obj.render("message", "my value", attrs)
soup = BeautifulSoup(html)
self.assertIn("my value", html)
self.assertIn('class="my-class editor"', html)
message = soup.find(id="id_message")
self.assertEqual(message.attrs, {'id': 'id_message', 'class': ['my-class', 'editor'], 'style': 'width:200px'})
self.assertEqual(message.get_text(), u'\n my value\n')
# all toolbar items should be present by default
toolbar_items = soup.find_all(attrs={"class": "btn-group"})
self.assertEqual(8, len(toolbar_items))
toolbar_items_context = obj.get_context("message", "")
self.assertEqual(9, len(toolbar_items_context['toolbar_items']))
def test_toolbar_options_font(self):
obj = WysiwygInput(toolbar_items=['fonts'])
attrs = {"id": "id_message"}
html = obj.render("message", "my value", attrs)
soup = BeautifulSoup(html)
toolbar_items = soup.find_all(attrs={"class": "btn-group"})
self.assertEqual(1, len(toolbar_items))
def test_toolbar_options_font_size(self):
obj = WysiwygInput(toolbar_items=['font_size'])
attrs = {"id": "id_message"}
html = obj.render("message", "my value", attrs)
soup = BeautifulSoup(html)
toolbar_items = soup.find_all(attrs={"class": "btn-group"})
self.assertEqual(1, len(toolbar_items))
| 38.6
| 118
| 0.636907
| 305
| 2,509
| 5.072131
| 0.183607
| 0.139625
| 0.056884
| 0.051713
| 0.811248
| 0.77117
| 0.77117
| 0.743374
| 0.743374
| 0.743374
| 0
| 0.006602
| 0.215225
| 2,509
| 64
| 119
| 39.203125
| 0.779076
| 0.037067
| 0
| 0.673913
| 0
| 0
| 0.174886
| 0
| 0
| 0
| 0
| 0
| 0.304348
| 1
| 0.086957
| false
| 0
| 0.065217
| 0
| 0.173913
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
2277d7dbc2ec1bf84f5e93a0a30c3c86fb1be7bc
| 49
|
py
|
Python
|
src/at_serial_can/__init__.py
|
gueei/at_serial_can
|
97d3aee4f833dabd68b619ae3f6a55cf340ad8ad
|
[
"MIT"
] | null | null | null |
src/at_serial_can/__init__.py
|
gueei/at_serial_can
|
97d3aee4f833dabd68b619ae3f6a55cf340ad8ad
|
[
"MIT"
] | null | null | null |
src/at_serial_can/__init__.py
|
gueei/at_serial_can
|
97d3aee4f833dabd68b619ae3f6a55cf340ad8ad
|
[
"MIT"
] | null | null | null |
import can
from .at_serial_can import ATSerialBus
| 24.5
| 38
| 0.877551
| 8
| 49
| 5.125
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102041
| 49
| 2
| 38
| 24.5
| 0.931818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3f11d90f552d33ac311a232e90faae85783b2935
| 1,217
|
py
|
Python
|
Tests/test_strings.py
|
klassen-software-solutions/pyutil
|
a0ddaa15791537c92b78ceb0120d44a4e907b8b6
|
[
"MIT"
] | null | null | null |
Tests/test_strings.py
|
klassen-software-solutions/pyutil
|
a0ddaa15791537c92b78ceb0120d44a4e907b8b6
|
[
"MIT"
] | 9
|
2020-01-27T17:56:22.000Z
|
2020-02-04T15:35:15.000Z
|
Tests/test_strings.py
|
klassen-software-solutions/pyutil
|
a0ddaa15791537c92b78ceb0120d44a4e907b8b6
|
[
"MIT"
] | 1
|
2022-02-05T09:08:02.000Z
|
2022-02-05T09:08:02.000Z
|
import unittest
import kss.util.strings as strings
class StringsTestCase(unittest.TestCase):
def test_remove_prefix(self):
text = "this is a test"
self.assertEqual(strings.remove_prefix(text, "this "), "is a test")
self.assertEqual(strings.remove_prefix(text, "not"), "this is a test")
self.assertEqual(strings.remove_prefix(text, "THIS"), "this is a test")
self.assertEqual(strings.remove_prefix(text, ""), "this is a test")
with self.assertRaises(TypeError):
strings.remove_prefix(text, None)
with self.assertRaises(AttributeError):
strings.remove_prefix(None, "hi")
def test_remove_suffix(self):
text = "this is a test"
self.assertEqual(strings.remove_suffix(text, " test"), "this is a")
self.assertEqual(strings.remove_suffix(text, "not"), "this is a test")
self.assertEqual(strings.remove_suffix(text, "TEST"), "this is a test")
self.assertEqual(strings.remove_suffix(text, ""), "this is a test")
with self.assertRaises(TypeError):
strings.remove_suffix(text, None)
with self.assertRaises(AttributeError):
strings.remove_suffix(None, "hi")
| 43.464286
| 79
| 0.663106
| 152
| 1,217
| 5.203947
| 0.177632
| 0.197219
| 0.088496
| 0.125158
| 0.806574
| 0.806574
| 0.76359
| 0.76359
| 0.624526
| 0.624526
| 0
| 0
| 0.21364
| 1,217
| 27
| 80
| 45.074074
| 0.826541
| 0
| 0
| 0.26087
| 0
| 0
| 0.129827
| 0
| 0
| 0
| 0
| 0
| 0.521739
| 1
| 0.086957
| false
| 0
| 0.086957
| 0
| 0.217391
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3f36e46db36d754224a4ce602b7f7e7dc2baf898
| 17,680
|
py
|
Python
|
tests/test_pins.py
|
mpunkenhofer/pychess
|
29e48d3ec68ba55e87b8d5a07544d96bc14ab558
|
[
"MIT"
] | null | null | null |
tests/test_pins.py
|
mpunkenhofer/pychess
|
29e48d3ec68ba55e87b8d5a07544d96bc14ab558
|
[
"MIT"
] | null | null | null |
tests/test_pins.py
|
mpunkenhofer/pychess
|
29e48d3ec68ba55e87b8d5a07544d96bc14ab558
|
[
"MIT"
] | null | null | null |
# Mathias Punkenhofer
# code.mpunkenhofer@gmail.com
#
import unittest
import pychess
class PinTests(unittest.TestCase):
def test_diagonally_pinned_w_pawns(self):
board = pychess.board.SetupBoard('b3k3/7b/4n3/3P1P2/2n1K1n1/3P1P2/8/1b5b w - -')
pinned_piece = board.get_pawns(pychess.PieceColor.WHITE)
moves = []
for p in pinned_piece:
for m in p.moves():
moves.append(m.to_algebraic())
self.assertFalse(moves)
def test_capture_diagonal_pinner_wP(self):
board = pychess.board.SetupBoard('4k3/7b/2b5/3P1P2/4K3/8/8/8 w - -')
pinned_piece = board.get_pawns(pychess.PieceColor.WHITE)
moves = []
for p in pinned_piece:
for m in p.moves():
moves.append(m.to_algebraic())
self.assertCountEqual(moves, ['dxc6'])
def test_diagonally_pinned_b_pawns(self):
board = pychess.board.SetupBoard('B3K3/7B/8/3p1p2/2N1k1N1/3p1p2/4N3/1B5B w - -')
pinned_piece = board.get_pawns(pychess.PieceColor.BLACK)
moves = []
for p in pinned_piece:
for m in p.moves():
moves.append(m.to_algebraic())
self.assertFalse(moves)
def test_capture_diagonal_pinner_bP(self):
board = pychess.board.SetupBoard('4k3/3p1p2/6B1/8/B7/8/8/4K3 w - -')
pinned_piece = board.get_pawns(pychess.PieceColor.BLACK)
moves = []
for p in pinned_piece:
for m in p.moves():
moves.append(m.to_algebraic())
self.assertCountEqual(moves, ['fxg6'])
def test_diagonally_not_pinned_w_piece(self):
board = pychess.board.SetupBoard('4k3/8/8/b7/7b/2P5/3P1P2/4K3 w - -')
pinned_piece = board.get_pawns(pychess.PieceColor.WHITE)
moves = []
for p in pinned_piece:
for m in p.moves():
moves.append(m.to_algebraic())
self.assertCountEqual(moves, ['c4', 'd3', 'd4'])
def test_diagonally_not_pinned_b_piece(self):
board = pychess.board.SetupBoard('4k3/3p1p2/2p5/7B/B7/8/8/4K3 w - -')
pinned_piece = board.get_pawns(pychess.PieceColor.BLACK)
moves = []
for p in pinned_piece:
for m in p.moves():
moves.append(m.to_algebraic())
self.assertCountEqual(moves, ['d6', 'd5', 'c5'])
def test_diagonally_pinned_w_rooks(self):
board = pychess.board.SetupBoard('b3k3/7b/8/2bR1R2/4Kn2/3R1R2/3b4/1b5b w - -')
pinned_piece = board.get_rooks(pychess.PieceColor.WHITE)
moves = []
for p in pinned_piece:
for m in p.moves():
moves.append(m.to_algebraic())
self.assertFalse(moves)
def test_diagonally_pinned_b_rooks(self):
board = pychess.board.SetupBoard('B3K3/7B/8/2Br1r2/4kN2/3r1r2/3B4/1B5B w - -')
pinned_piece = board.get_rooks(pychess.PieceColor.BLACK)
moves = []
for p in pinned_piece:
for m in p.moves():
moves.append(m.to_algebraic())
self.assertFalse(moves)
def test_diagonally_pinned_w_knights(self):
board = pychess.board.SetupBoard('b3k3/7b/8/2bN1Nb1/4K3/3N1N2/3b4/1b5b w - -')
pinned_piece = board.get_knights(pychess.PieceColor.WHITE)
moves = []
for p in pinned_piece:
for m in p.moves():
moves.append(m.to_algebraic())
self.assertFalse(moves)
def test_diagonally_pinned_b_knights(self):
board = pychess.board.SetupBoard('B3K3/7B/8/2Bn1nB1/4k3/3n1n2/3B4/1B5B w - -')
pinned_piece = board.get_knights(pychess.PieceColor.BLACK)
moves = []
for p in pinned_piece:
for m in p.moves():
moves.append(m.to_algebraic())
self.assertFalse(moves)
def test_same_rising_diagonal(self):
same_r1 = pychess.pieces.Piece.same_rising_diagonal((0, 0), (7, 7))
same_r2 = pychess.pieces.Piece.same_rising_diagonal((7, 7), (0, 0))
self.assertTrue(same_r1 == same_r2 and same_r1)
def test_same_falling_diagonal(self):
same_r1 = pychess.pieces.Piece.same_falling_diagonal((0, 7), (7, 0))
same_r2 = pychess.pieces.Piece.same_falling_diagonal((7, 0), (0, 7))
self.assertTrue(same_r1 == same_r2 and same_r1)
def test_diagonally_pinned_w_queens(self):
board = pychess.board.SetupBoard('b3k3/7b/8/3Q1Q2/4KPP1/3QPQ2/4P1N1/1b5b w - -')
pinned_piece = board.get_queens(pychess.PieceColor.WHITE)
moves = []
for p in pinned_piece:
for m in p.moves():
moves.append(m.to_algebraic())
self.assertCountEqual(moves, ['Qf2', 'Qf1', 'Qg3', 'Qh3', 'Qc2', 'Qc6', 'Qb7', 'Qg6', 'Qxa8', 'Qxb1', 'Qxh7'])
def test_diagonally_pinned_b_queens(self):
board = pychess.board.SetupBoard('B3K3/7B/8/3q1q2/4kpp1/3qpq2/4p1n1/1B5B w - -')
pinned_piece = board.get_queens(pychess.PieceColor.BLACK)
moves = []
for p in pinned_piece:
for m in p.moves():
moves.append(m.to_algebraic())
self.assertCountEqual(moves, ['Qf2', 'Qf1', 'Qg3', 'Qh3', 'Qc2', 'Qc6', 'Qb7', 'Qg6', 'Qxa8', 'Qxb1', 'Qxh7'])
def test_file_pinned_pawns_one_not_pinned(self):
board = pychess.board.SetupBoard('2r1k3/8/8/2P5/1nKn4/2P5/8/2q5 w - -')
pinned_piece = board.get_pawns(pychess.PieceColor.WHITE)
moves = []
for p in pinned_piece:
for m in p.moves():
moves.append(m.to_algebraic())
self.assertCountEqual(moves, ['c6'])
def test_file_pinned_pawns_one_not_pinned_on_second_rank(self):
board = pychess.board.SetupBoard('4k1q1/8/8/8/8/5b2/6P1/6K1 w - -')
pinned_piece = board.get_pawns(pychess.PieceColor.WHITE)
moves = []
for p in pinned_piece:
for m in p.moves():
moves.append(m.to_algebraic())
self.assertCountEqual(moves, ['g3', 'g4'])
def test_file_pinned_pawns_en_passant(self):
board = pychess.board.SetupBoard('4r1k1/ppp2ppp/8/3pP3/8/8/8/4K3 w - d6')
pinned_piece = board.get_pawns(pychess.PieceColor.WHITE)
moves = []
for p in pinned_piece:
for m in p.moves():
moves.append(m.to_algebraic())
self.assertCountEqual(moves, ['e6'])
def test_file_pinned_bishops_all_pinned(self):
board = pychess.board.SetupBoard('3kq3/8/5p2/4B3/4K3/4B3/3r4/4r3 w - -')
pinned_piece = board.get_bishops(pychess.PieceColor.WHITE)
moves = []
for p in pinned_piece:
for m in p.moves():
moves.append(m.to_algebraic())
self.assertFalse(moves)
def test_file_pinned_bishops_one_not_pinned(self):
board = pychess.board.SetupBoard('3kq3/8/5p2/4B3/3nKn2/4B3/8/4n3 w - -')
pinned_piece = board.get_bishops(pychess.PieceColor.WHITE)
moves = []
for p in pinned_piece:
for m in p.moves():
moves.append(m.to_algebraic())
self.assertCountEqual(moves, ['Bf2', 'Bg1', 'Bd2', 'Bc1', 'Bxd4', 'Bxf4'])
def test_file_pinned_bishops_one_not_pinned_by_block(self):
board = pychess.board.SetupBoard('3kq3/8/5p2/4B3/3nKn2/4B3/4N3/4r3 w - -')
pinned_piece = board.get_bishops(pychess.PieceColor.WHITE)
moves = []
for p in pinned_piece:
for m in p.moves():
moves.append(m.to_algebraic())
self.assertCountEqual(moves, ['Bf2', 'Bg1', 'Bd2', 'Bc1', 'Bxd4', 'Bxf4'])
def test_file_pinned_knights_all_pinned(self):
board = pychess.board.SetupBoard('3kq3/8/6n1/4N3/4K3/4N3/6n1/4r3 w - -')
pinned_piece = board.get_knights(pychess.PieceColor.WHITE)
moves = []
for p in pinned_piece:
for m in p.moves():
moves.append(m.to_algebraic())
self.assertFalse(moves)
def test_file_pinned_knights_one_not_pinned(self):
board = pychess.board.SetupBoard('3kq3/8/6n1/3PNP2/2P1K1P1/4N3/2P3n1/4n3 w - -')
pinned_piece = board.get_knights(pychess.PieceColor.WHITE)
moves = []
for p in pinned_piece:
for m in p.moves():
moves.append(m.to_algebraic())
self.assertCountEqual(moves, ['Nxg2', 'Nd1', 'Nf1'])
def test_file_pinned_knights_one_not_pinned_by_block(self):
board = pychess.board.SetupBoard('3kq3/8/6n1/3PNP2/2PPKPP1/4N3/2P1N1n1/4r3 w - -')
pinned_piece = board.get_knights(pychess.PieceColor.WHITE)
moves = []
for p in pinned_piece:
for m in p.moves():
moves.append(m.to_algebraic())
self.assertCountEqual(moves, ['Nxg2', 'Nd1', 'Nf1', 'Nc3', 'Ng3', 'Nc1', 'Ng1'])
def test_file_pinned_rooks_all_pinned(self):
board = pychess.board.SetupBoard('3kq3/8/5r2/4R3/4K3/4R3/3r4/4r3 w - -')
pinned_piece = board.get_rooks(pychess.PieceColor.WHITE)
moves = []
for p in pinned_piece:
for m in p.moves():
moves.append(m.to_algebraic())
self.assertCountEqual(moves, ['Re2', 'Rxe1', 'Re6', 'Re7', 'Rxe8'])
def test_file_pinned_rooks_one_not_pinned(self):
board = pychess.board.SetupBoard('3kq3/8/5r2/4R3/3pKp2/3PR3/3r4/4n3 w - - 0 1')
pinned_piece = board.get_rooks(pychess.PieceColor.WHITE)
moves = []
for p in pinned_piece:
for m in p.moves():
moves.append(m.to_algebraic())
self.assertCountEqual(moves, ['Re2', 'Rxe1', 'Rf3', 'Rg3', 'Rh3', 'Re6', 'Re7', 'Rxe8'])
def test_file_pinned_rooks_one_not_pinned_by_block(self):
board = pychess.board.SetupBoard('3kq3/8/5r2/3nR3/4Kp2/3PR3/3rR3/4n3 w - -')
pinned_piece = board.get_rooks(pychess.PieceColor.WHITE)
moves = []
for p in pinned_piece:
for m in p.moves():
moves.append(m.to_algebraic())
self.assertCountEqual(moves, ['Rf3', 'Rg3', 'Rh3', 'Re6', 'Re7', 'Rxe8', 'Rxd2', 'Rxe1', 'Rf2', 'Rg2', 'Rh2'])
def test_file_pinned_queens_all_pinned(self):
board = pychess.board.SetupBoard('3kq3/8/5r2/4Q3/4K3/4Q3/3r4/4r3 w - -')
pinned_piece = board.get_queens(pychess.PieceColor.WHITE)
moves = []
for p in pinned_piece:
for m in p.moves():
moves.append(m.to_algebraic())
self.assertCountEqual(moves, ['Qe2', 'Qxe1', 'Qe6', 'Qe7', 'Qxe8'])
def test_file_pinned_queens_one_not_pinned(self):
board = pychess.board.SetupBoard('3kq3/8/5r2/4Q3/3pKp2/3PQ3/3r4/4n3 w - - 0 1')
pinned_piece = board.get_queens(pychess.PieceColor.WHITE)
moves = []
for p in pinned_piece:
for m in p.moves():
moves.append(m.to_algebraic())
self.assertCountEqual(moves, ['Qe2', 'Qxe1', 'Qxd2', 'Qf2', 'Qg1', 'Qf3', 'Qg3', 'Qh3', 'Qxd4', 'Qxf4',
'Qe6', 'Qe7', 'Qxe8'])
def test_file_pinned_queens_one_not_pinned_by_block(self):
board = pychess.board.SetupBoard('3kq3/8/5r2/4Q3/3pKp2/3PQ3/3rR3/4n3 w - -')
pinned_piece = board.get_queens(pychess.PieceColor.WHITE)
moves = []
for p in pinned_piece:
for m in p.moves():
moves.append(m.to_algebraic())
self.assertCountEqual(moves, ['Qxd2', 'Qf2', 'Qg1', 'Qf3', 'Qg3', 'Qh3', 'Qxd4', 'Qxf4',
'Qe6', 'Qe7', 'Qxe8'])
def test_rank_pinned_pawns_all_pinned(self):
board = pychess.board.SetupBoard('44k3/8/8/8/6r1/r2PKP1q/8/8 w - -')
pinned_piece = board.get_pawns(pychess.PieceColor.WHITE)
moves = []
for p in pinned_piece:
for m in p.moves():
moves.append(m.to_algebraic())
self.assertFalse(moves)
def test_rank_pinned_pawns_one_not_pinned(self):
board = pychess.board.SetupBoard('4k3/8/8/8/6r1/r2PKP1b/8/8 w - -')
pinned_piece = board.get_pawns(pychess.PieceColor.WHITE)
moves = []
for p in pinned_piece:
for m in p.moves():
moves.append(m.to_algebraic())
self.assertCountEqual(moves, ['f4', 'fxg4'])
def test_rank_pinned_pawns_one_not_pinned_by_block(self):
board = pychess.board.SetupBoard('4k3/8/8/8/6r1/r2PKPNq/8/8 w - -')
pinned_piece = board.get_pawns(pychess.PieceColor.WHITE)
moves = []
for p in pinned_piece:
for m in p.moves():
moves.append(m.to_algebraic())
self.assertCountEqual(moves, ['f4', 'fxg4'])
def test_rank_pinned_bishops_all_pinned(self):
board = pychess.board.SetupBoard('4k3/8/8/8/6r1/r2BKB1q/8/8 w - -')
pinned_piece = board.get_bishops(pychess.PieceColor.WHITE)
moves = []
for p in pinned_piece:
for m in p.moves():
moves.append(m.to_algebraic())
self.assertFalse(moves)
def test_rank_pinned_bishops_one_not_pinned(self):
board = pychess.board.SetupBoard('4k3/8/8/8/4n1r1/r2BKB1n/8/8 w - -')
pinned_piece = board.get_bishops(pychess.PieceColor.WHITE)
moves = []
for p in pinned_piece:
for m in p.moves():
moves.append(m.to_algebraic())
self.assertCountEqual(moves, ['Be2', 'Bd1', 'Bxg4', 'Bxe4', 'Bg2', 'Bh1'])
def test_rank_pinned_bishops_one_not_pinned_by_block(self):
board = pychess.board.SetupBoard('4k3/8/8/8/4n1r1/r2BKBNr/8/8 w - -')
pinned_piece = board.get_bishops(pychess.PieceColor.WHITE)
moves = []
for p in pinned_piece:
for m in p.moves():
moves.append(m.to_algebraic())
self.assertCountEqual(moves, ['Be2', 'Bd1', 'Bxg4', 'Bxe4', 'Bg2', 'Bh1'])
def test_rank_pinned_knights_all_pinned(self):
board = pychess.board.SetupBoard('7k/8/1P1P1n2/r5P1/r1NKN2q/6P1/1n1P1P2/8 w - -')
pinned_piece = board.get_knights(pychess.PieceColor.WHITE)
moves = []
for p in pinned_piece:
for m in p.moves():
moves.append(m.to_algebraic())
self.assertFalse(moves)
def test_rank_pinned_knights_one_not_pinned(self):
board = pychess.board.SetupBoard('7k/8/1P1P1n2/r5P1/n1NKN2q/6P1/1n1P1P2/8 w - -')
pinned_piece = board.get_knights(pychess.PieceColor.WHITE)
moves = []
for p in pinned_piece:
for m in p.moves():
moves.append(m.to_algebraic())
self.assertCountEqual(moves, ['Nxb2', 'Ne3', 'Nxa5', 'Ne5', 'Na3'])
def test_rank_pinned_knights_one_not_pinned_by_block(self):
board = pychess.board.SetupBoard('7k/8/1P1P1n2/r5P1/nBNKN2q/6P1/1n1P1P2/8 w - -')
pinned_piece = board.get_knights(pychess.PieceColor.WHITE)
moves = []
for p in pinned_piece:
for m in p.moves():
moves.append(m.to_algebraic())
self.assertCountEqual(moves, ['Nxb2', 'Ne3', 'Nxa5', 'Ne5', 'Na3'])
def test_rank_pinned_rooks_all_pinned(self):
board = pychess.board.SetupBoard('7k/8/8/2P1n3/r1RKR2q/2n5/4P3/8 w - -')
pinned_piece = board.get_rooks(pychess.PieceColor.WHITE)
moves = []
for p in pinned_piece:
for m in p.moves():
moves.append(m.to_algebraic())
self.assertCountEqual(moves, ['Rb4', 'Rxa4', 'Rf4', 'Rg4', 'Rxh4'])
def test_rank_pinned_rooks_one_not_pinned(self):
board = pychess.board.SetupBoard('7k/8/8/2P1n3/n1RKR2q/2n5/4P3/8 w - -')
pinned_piece = board.get_rooks(pychess.PieceColor.WHITE)
moves = []
for p in pinned_piece:
for m in p.moves():
moves.append(m.to_algebraic())
self.assertCountEqual(moves, ['Rb4', 'Rxa4', 'Rf4', 'Rg4', 'Rxh4', 'Rxc3'])
def test_rank_pinned_rooks_one_not_pinned_by_block(self):
board = pychess.board.SetupBoard('7k/8/8/2P1n3/r1RKRQ1q/2n5/4P3/8 w - -')
pinned_piece = board.get_rooks(pychess.PieceColor.WHITE)
moves = []
for p in pinned_piece:
for m in p.moves():
moves.append(m.to_algebraic())
self.assertCountEqual(moves, ['Rb4', 'Rxa4', 'Rxe5', 'Re3'])
def test_rank_pinned_queens_all_pinned(self):
board = pychess.board.SetupBoard('7k/8/2R1R3/2P1nb2/r1QKQ2q/2nP4/4P3/8 w - -')
pinned_piece = board.get_queens(pychess.PieceColor.WHITE)
moves = []
for p in pinned_piece:
for m in p.moves():
moves.append(m.to_algebraic())
self.assertCountEqual(moves, ['Qb4', 'Qxa4', 'Qf4', 'Qg4', 'Qxh4'])
def test_rank_pinned_queens_one_not_pinned(self):
board = pychess.board.SetupBoard('7k/8/2R1R3/2P1nb2/n1QKQ2q/2nP4/4P3/8 w - -')
pinned_piece = board.get_queens(pychess.PieceColor.WHITE)
moves = []
for p in pinned_piece:
for m in p.moves():
moves.append(m.to_algebraic())
self.assertCountEqual(moves, ['Qb4', 'Qxa4', 'Qf4', 'Qg4', 'Qxh4', 'Qd5', 'Qb3', 'Qa2', 'Qb5', 'Qa6', 'Qxc3'])
def test_rank_pinned_queens_one_not_pinned_by_block(self):
board = pychess.board.SetupBoard('7k/8/2R1R3/2P1nb2/qRQKQ2q/2nP4/4P3/8 w - -')
pinned_piece = board.get_queens(pychess.PieceColor.WHITE)
moves = []
for p in pinned_piece:
for m in p.moves():
moves.append(m.to_algebraic())
self.assertCountEqual(moves, ['Qf4', 'Qg4', 'Qxh4', 'Qd5', 'Qb3', 'Qa2', 'Qb5', 'Qa6', 'Qxc3'])
if __name__ == '__main__':
unittest.main()
| 33.295669
| 118
| 0.603281
| 2,324
| 17,680
| 4.399742
| 0.103701
| 0.090367
| 0.065721
| 0.086259
| 0.941614
| 0.912078
| 0.894279
| 0.878924
| 0.849682
| 0.82445
| 0
| 0.062052
| 0.258937
| 17,680
| 530
| 119
| 33.358491
| 0.718364
| 0.002658
| 0
| 0.660969
| 0
| 0
| 0.12417
| 0.076635
| 0
| 0
| 0
| 0
| 0.125356
| 1
| 0.125356
| false
| 0.002849
| 0.005698
| 0
| 0.133903
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3f4d4388492600c6f60dc76e3a8fea3ae32635c0
| 6,312
|
py
|
Python
|
diffmah/tests/test_halo_population_assembly.py
|
ArgonneCPAC/diffmah
|
867d11def6284b07e58753f0d4590adc21495e3d
|
[
"BSD-3-Clause"
] | 5
|
2021-05-14T10:05:26.000Z
|
2022-01-13T08:56:16.000Z
|
diffmah/tests/test_halo_population_assembly.py
|
ArgonneCPAC/diffmah
|
867d11def6284b07e58753f0d4590adc21495e3d
|
[
"BSD-3-Clause"
] | 1
|
2021-04-21T20:41:27.000Z
|
2021-05-05T15:05:03.000Z
|
diffmah/tests/test_halo_population_assembly.py
|
ArgonneCPAC/diffmah
|
867d11def6284b07e58753f0d4590adc21495e3d
|
[
"BSD-3-Clause"
] | 1
|
2021-05-05T17:29:31.000Z
|
2021-05-05T17:29:31.000Z
|
"""
"""
import os
import numpy as np
from ..halo_population_assembly import _get_bimodal_halo_history
from ..halo_population_assembly import UE_ARR, UL_ARR, LGTC_ARR
from ..tng_pdf_model import DEFAULT_MAH_PDF_PARAMS as TNG_PARAMS
_THIS_DRNAME = os.path.dirname(os.path.abspath(__file__))
DDRN = os.path.join(_THIS_DRNAME, "testing_data")
def test_get_average_halo_histories():
"""Verify that the _get_average_halo_histories returns reasonable arrays."""
tarr = np.linspace(1, 13.8, 25)
lgt_arr = np.log10(tarr)
lgmp_arr = np.array((11.25, 11.75, 12, 12.5, 13, 13.5, 14, 14.5))
_res = _get_bimodal_halo_history(lgt_arr, lgmp_arr, UE_ARR, UL_ARR, LGTC_ARR)
mean_dmhdt, mean_mah, mean_log_mah, variance_dmhdt, variance_mah = _res
mean_log_mahs = np.log10(mean_mah)
# Average halo MAHs should agree at t=today
assert np.allclose(mean_log_mahs[:, -1], lgmp_arr, atol=0.01)
# Average halo MAHs should monotonically increase
assert np.all(np.diff(mean_log_mahs, axis=1) > 0)
# Average halo accretion rates should monotonically increase with present-day mass
assert np.all(np.diff(mean_dmhdt[:, -1]) > 0)
def test_average_halo_histories_agree_with_nbody_simulations():
mlist = list(
(
"11.50",
"11.75",
"12.00",
"12.25",
"12.50",
"12.75",
"13.00",
"13.25",
"13.50",
"13.75",
"14.00",
"14.25",
"14.50",
)
)
lgmp_targets = np.array([float(lgm) for lgm in mlist])
lgt = np.log10(np.loadtxt(os.path.join(DDRN, "nbody_t_target.dat")))
mah_pat = "mean_log_mah_nbody_logmp_{}.dat"
lgmah_fnames = list((os.path.join(DDRN, mah_pat.format(lgm)) for lgm in mlist))
mean_log_mah_targets = np.array([np.loadtxt(fn) for fn in lgmah_fnames])
vmah_pat = "var_log_mah_nbody_logmp_{}.dat"
vlgmah_fnames = list((os.path.join(DDRN, vmah_pat.format(lgm)) for lgm in mlist))
var_log_mah_targets = np.array([np.loadtxt(fn) for fn in vlgmah_fnames])
dmhdt_pat = "mean_dmhdt_nbody_logmp_{}.dat"
dmhdt_fnames = list((os.path.join(DDRN, dmhdt_pat.format(lgm)) for lgm in mlist))
mean_dmhdt_targets = np.array([np.loadtxt(fn) for fn in dmhdt_fnames])
vdmhdt_pat = "var_dmhdt_nbody_logmp_{}.dat"
vdmhdt_fnames = list((os.path.join(DDRN, vdmhdt_pat.format(lgm)) for lgm in mlist))
var_dmhdt_targets = np.array([np.loadtxt(fn) for fn in vdmhdt_fnames])
_res = _get_bimodal_halo_history(lgt, lgmp_targets, UE_ARR, UL_ARR, LGTC_ARR)
mean_dmhdt_preds, mean_log_mah_preds = _res[0], _res[2]
var_dmhdt_preds, var_log_mah_preds = _res[3], _res[4]
for im, lgmp in enumerate(lgmp_targets):
x, y = mean_log_mah_targets[im, :], mean_log_mah_preds[im, :]
msg = "Inaccurate N-body prediction for <log10(MAH)> at lgmp = {0:.2f}"
assert np.allclose(x, y, atol=0.1), msg.format(lgmp)
for im, lgmp in enumerate(lgmp_targets):
x, y = np.log10(mean_dmhdt_targets[im, :]), np.log10(mean_dmhdt_preds[im, :])
msg = "Inaccurate N-body prediction for <dMh/dt> at lgmp = {0:.2f}"
assert np.allclose(x, y, atol=0.1), msg.format(lgmp)
for im, lgmp in enumerate(lgmp_targets):
x, y = var_log_mah_targets[im, :], var_log_mah_preds[im, :]
msg = "Inaccurate N-body prediction for std(log10(MAH)) at lgmp = {0:.2f}"
assert np.allclose(x, y, atol=0.1), msg.format(lgmp)
for im, lgmp in enumerate(lgmp_targets):
x, y = np.log10(var_dmhdt_targets[im, :]), np.log10(var_dmhdt_preds[im, :])
msg = "Inaccurate N-body prediction for std(dMh/dt) at lgmp = {0:.2f}"
assert np.allclose(x, y, atol=0.1), msg.format(lgmp)
def test_average_halo_histories_agree_with_tng():
mlist = list(
(
"11.50",
"11.75",
"12.00",
"12.25",
"12.50",
"12.75",
"13.00",
"13.25",
"13.50",
"13.75",
)
)
lgmp_targets = np.array([float(lgm) for lgm in mlist])
lgt = np.log10(np.loadtxt(os.path.join(DDRN, "tng_t_target.dat")))
mah_pat = "mean_log_mah_tng_logmp_{}.dat"
lgmah_fnames = list((os.path.join(DDRN, mah_pat.format(lgm)) for lgm in mlist))
mean_log_mah_targets = np.array([np.loadtxt(fn) for fn in lgmah_fnames])
vmah_pat = "var_log_mah_tng_logmp_{}.dat"
vlgmah_fnames = list((os.path.join(DDRN, vmah_pat.format(lgm)) for lgm in mlist))
var_log_mah_targets = np.array([np.loadtxt(fn) for fn in vlgmah_fnames])
dmhdt_pat = "mean_dmhdt_tng_logmp_{}.dat"
dmhdt_fnames = list((os.path.join(DDRN, dmhdt_pat.format(lgm)) for lgm in mlist))
mean_dmhdt_targets = np.array([np.loadtxt(fn) for fn in dmhdt_fnames])
vdmhdt_pat = "var_dmhdt_tng_logmp_{}.dat"
vdmhdt_fnames = list((os.path.join(DDRN, vdmhdt_pat.format(lgm)) for lgm in mlist))
var_dmhdt_targets = np.array([np.loadtxt(fn) for fn in vdmhdt_fnames])
_res = _get_bimodal_halo_history(
lgt, lgmp_targets, UE_ARR, UL_ARR, LGTC_ARR, **TNG_PARAMS
)
mean_dmhdt_preds, mean_log_mah_preds = _res[0], _res[2]
var_dmhdt_preds, var_log_mah_preds = _res[3], _res[4]
for im, lgmp in enumerate(lgmp_targets):
x, y = mean_log_mah_targets[im, :], mean_log_mah_preds[im, :]
msg = "Inaccurate TNG prediction for <log10(MAH)> at lgmp = {0:.2f}"
assert np.allclose(x, y, atol=0.1), msg.format(lgmp)
for im, lgmp in enumerate(lgmp_targets):
x, y = np.log10(mean_dmhdt_targets[im, :]), np.log10(mean_dmhdt_preds[im, :])
msg = "Inaccurate TNG prediction for <dMh/dt> at lgmp = {0:.2f}"
assert np.allclose(x, y, atol=0.1), msg.format(lgmp)
for im, lgmp in enumerate(lgmp_targets):
x, y = var_log_mah_targets[im, :], var_log_mah_preds[im, :]
msg = "Inaccurate TNG prediction for std(log10(MAH)) at lgmp = {0:.2f}"
assert np.allclose(x, y, atol=0.1), msg.format(lgmp)
for im, lgmp in enumerate(lgmp_targets):
x, y = np.log10(var_dmhdt_targets[im, :]), np.log10(var_dmhdt_preds[im, :])
msg = "Inaccurate TNG prediction for std(dMh/dt) at lgmp = {0:.2f}"
assert np.allclose(x, y, atol=0.2), msg.format(lgmp)
| 41.801325
| 87
| 0.646388
| 1,010
| 6,312
| 3.778218
| 0.123762
| 0.033019
| 0.028826
| 0.028826
| 0.817086
| 0.796122
| 0.773585
| 0.754717
| 0.732704
| 0.728512
| 0
| 0.040565
| 0.214987
| 6,312
| 150
| 88
| 42.08
| 0.729566
| 0.038498
| 0
| 0.567797
| 0
| 0
| 0.144863
| 0.037661
| 0
| 0
| 0
| 0
| 0.09322
| 1
| 0.025424
| false
| 0
| 0.042373
| 0
| 0.067797
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
58da6f961d9c9dc7d421b3f5c3e993a42bb668ef
| 28
|
py
|
Python
|
eyepy/quantification/__init__.py
|
yiqian-wang/eyepy
|
0523e8cea78c23a9c1bcf2d5b47a8f0fb59712e5
|
[
"MIT"
] | null | null | null |
eyepy/quantification/__init__.py
|
yiqian-wang/eyepy
|
0523e8cea78c23a9c1bcf2d5b47a8f0fb59712e5
|
[
"MIT"
] | null | null | null |
eyepy/quantification/__init__.py
|
yiqian-wang/eyepy
|
0523e8cea78c23a9c1bcf2d5b47a8f0fb59712e5
|
[
"MIT"
] | null | null | null |
from ._drusen import drusen
| 14
| 27
| 0.821429
| 4
| 28
| 5.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 28
| 1
| 28
| 28
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
18b0364a5143082d53ebfce9d2f97679a8d2fbbf
| 10,877
|
py
|
Python
|
dwavebinarycsp/factories/constraint/gates.py
|
mcfarljm/dwavebinarycsp
|
383feb01422bc292b869f2994da57b5f475bd32d
|
[
"Apache-2.0"
] | 1
|
2022-02-01T14:40:05.000Z
|
2022-02-01T14:40:05.000Z
|
dwavebinarycsp/factories/constraint/gates.py
|
mcfarljm/dwavebinarycsp
|
383feb01422bc292b869f2994da57b5f475bd32d
|
[
"Apache-2.0"
] | null | null | null |
dwavebinarycsp/factories/constraint/gates.py
|
mcfarljm/dwavebinarycsp
|
383feb01422bc292b869f2994da57b5f475bd32d
|
[
"Apache-2.0"
] | 1
|
2022-02-01T14:40:31.000Z
|
2022-02-01T14:40:31.000Z
|
# Copyright 2018 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ================================================================================================
import dimod
from dwavebinarycsp.core.constraint import Constraint
__all__ = ['and_gate',
'or_gate',
'xor_gate',
'halfadder_gate',
'fulladder_gate']
@dimod.decorators.vartype_argument('vartype')
def and_gate(variables, vartype=dimod.BINARY, name='AND'):
"""AND gate.
Args:
variables (list): Variable labels for the and gate as `[in1, in2, out]`,
where `in1, in2` are inputs and `out` the gate's output.
vartype (Vartype, optional, default='BINARY'): Variable type. Accepted
input values:
* Vartype.SPIN, 'SPIN', {-1, 1}
* Vartype.BINARY, 'BINARY', {0, 1}
name (str, optional, default='AND'): Name for the constraint.
Returns:
Constraint(:obj:`.Constraint`): Constraint that is satisfied when its variables are
assigned values that match the valid states of an AND gate.
Examples:
>>> import dwavebinarycsp.factories.constraint.gates as gates
>>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.BINARY)
>>> csp.add_constraint(gates.and_gate(['a', 'b', 'c'], name='AND1'))
>>> csp.check({'a': 1, 'b': 0, 'c': 0})
True
"""
variables = tuple(variables)
if vartype is dimod.BINARY:
configurations = frozenset([(0, 0, 0),
(0, 1, 0),
(1, 0, 0),
(1, 1, 1)])
def func(in1, in2, out): return (in1 and in2) == out
else:
# SPIN, vartype is checked by the decorator
configurations = frozenset([(-1, -1, -1),
(-1, +1, -1),
(+1, -1, -1),
(+1, +1, +1)])
def func(in1, in2, out): return ((in1 > 0) and (in2 > 0)) == (out > 0)
return Constraint(func, configurations, variables, vartype=vartype, name=name)
@dimod.decorators.vartype_argument('vartype')
def or_gate(variables, vartype=dimod.BINARY, name='OR'):
"""OR gate.
Args:
variables (list): Variable labels for the and gate as `[in1, in2, out]`,
where `in1, in2` are inputs and `out` the gate's output.
vartype (Vartype, optional, default='BINARY'): Variable type. Accepted
input values:
* Vartype.SPIN, 'SPIN', {-1, 1}
* Vartype.BINARY, 'BINARY', {0, 1}
name (str, optional, default='OR'): Name for the constraint.
Returns:
Constraint(:obj:`.Constraint`): Constraint that is satisfied when its variables are
assigned values that match the valid states of an OR gate.
Examples:
>>> import dwavebinarycsp.factories.constraint.gates as gates
>>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.SPIN)
>>> csp.add_constraint(gates.or_gate(['x', 'y', 'z'], {-1,1}, name='OR1'))
>>> csp.check({'x': 1, 'y': -1, 'z': 1})
True
"""
variables = tuple(variables)
if vartype is dimod.BINARY:
configs = frozenset([(0, 0, 0),
(0, 1, 1),
(1, 0, 1),
(1, 1, 1)])
def func(in1, in2, out): return (in1 or in2) == out
else:
# SPIN, vartype is checked by the decorator
configs = frozenset([(-1, -1, -1),
(-1, +1, +1),
(+1, -1, +1),
(+1, +1, +1)])
def func(in1, in2, out): return ((in1 > 0) or (in2 > 0)) == (out > 0)
return Constraint(func, configs, variables, vartype=vartype, name=name)
@dimod.decorators.vartype_argument('vartype')
def xor_gate(variables, vartype=dimod.BINARY, name='XOR'):
"""XOR gate.
Args:
variables (list): Variable labels for the and gate as `[in1, in2, out]`,
where `in1, in2` are inputs and `out` the gate's output.
vartype (Vartype, optional, default='BINARY'): Variable type. Accepted
input values:
* Vartype.SPIN, 'SPIN', {-1, 1}
* Vartype.BINARY, 'BINARY', {0, 1}
name (str, optional, default='XOR'): Name for the constraint.
Returns:
Constraint(:obj:`.Constraint`): Constraint that is satisfied when its variables are
assigned values that match the valid states of an XOR gate.
Examples:
>>> import dwavebinarycsp.factories.constraint.gates as gates
>>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.BINARY)
>>> csp.add_constraint(gates.xor_gate(['x', 'y', 'z'], name='XOR1'))
>>> csp.check({'x': 1, 'y': 1, 'z': 1})
False
"""
variables = tuple(variables)
if vartype is dimod.BINARY:
configs = frozenset([(0, 0, 0),
(0, 1, 1),
(1, 0, 1),
(1, 1, 0)])
def func(in1, in2, out): return (in1 != in2) == out
else:
# SPIN, vartype is checked by the decorator
configs = frozenset([(-1, -1, -1),
(-1, +1, +1),
(+1, -1, +1),
(+1, +1, -1)])
def func(in1, in2, out): return ((in1 > 0) != (in2 > 0)) == (out > 0)
return Constraint(func, configs, variables, vartype=vartype, name=name)
@dimod.decorators.vartype_argument('vartype')
def halfadder_gate(variables, vartype=dimod.BINARY, name='HALF_ADDER'):
"""Half adder.
Args:
variables (list): Variable labels for the and gate as `[in1, in2, sum, carry]`,
where `in1, in2` are inputs to be added and `sum` and 'carry' the resultant
outputs.
vartype (Vartype, optional, default='BINARY'): Variable type. Accepted
input values:
* Vartype.SPIN, 'SPIN', {-1, 1}
* Vartype.BINARY, 'BINARY', {0, 1}
name (str, optional, default='HALF_ADDER'): Name for the constraint.
Returns:
Constraint(:obj:`.Constraint`): Constraint that is satisfied when its variables are
assigned values that match the valid states of a Boolean half adder.
Examples:
>>> import dwavebinarycsp.factories.constraint.gates as gates
>>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.BINARY)
>>> csp.add_constraint(gates.halfadder_gate(['a', 'b', 'total', 'carry'], name='HA1'))
>>> csp.check({'a': 1, 'b': 1, 'total': 0, 'carry': 1})
True
"""
variables = tuple(variables)
if vartype is dimod.BINARY:
configs = frozenset([(0, 0, 0, 0),
(0, 1, 1, 0),
(1, 0, 1, 0),
(1, 1, 0, 1)])
else:
# SPIN, vartype is checked by the decorator
configs = frozenset([(-1, -1, -1, -1),
(-1, +1, +1, -1),
(+1, -1, +1, -1),
(+1, +1, -1, +1)])
def func(augend, addend, sum_, carry):
total = (augend > 0) + (addend > 0)
if total == 0:
return (sum_ <= 0) and (carry <= 0)
elif total == 1:
return (sum_ > 0) and (carry <= 0)
elif total == 2:
return (sum_ <= 0) and (carry > 0)
else:
raise ValueError("func recieved unexpected values")
return Constraint(func, configs, variables, vartype=vartype, name=name)
@dimod.decorators.vartype_argument('vartype')
def fulladder_gate(variables, vartype=dimod.BINARY, name='FULL_ADDER'):
"""Full adder.
Args:
variables (list): Variable labels for the and gate as `[in1, in2, in3, sum, carry]`,
where `in1, in2, in3` are inputs to be added and `sum` and 'carry' the resultant
outputs.
vartype (Vartype, optional, default='BINARY'): Variable type. Accepted
input values:
* Vartype.SPIN, 'SPIN', {-1, 1}
* Vartype.BINARY, 'BINARY', {0, 1}
name (str, optional, default='FULL_ADDER'): Name for the constraint.
Returns:
Constraint(:obj:`.Constraint`): Constraint that is satisfied when its variables are
assigned values that match the valid states of a Boolean full adder.
Examples:
>>> import dwavebinarycsp.factories.constraint.gates as gates
>>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.BINARY)
>>> csp.add_constraint(gates.fulladder_gate(['a', 'b', 'c_in', 'total', 'c_out'], name='FA1'))
>>> csp.check({'a': 1, 'b': 0, 'c_in': 1, 'total': 0, 'c_out': 1})
True
"""
variables = tuple(variables)
if vartype is dimod.BINARY:
configs = frozenset([(0, 0, 0, 0, 0),
(0, 0, 1, 1, 0),
(0, 1, 0, 1, 0),
(0, 1, 1, 0, 1),
(1, 0, 0, 1, 0),
(1, 0, 1, 0, 1),
(1, 1, 0, 0, 1),
(1, 1, 1, 1, 1)])
else:
# SPIN, vartype is checked by the decorator
configs = frozenset([(-1, -1, -1, -1, -1),
(-1, -1, +1, +1, -1),
(-1, +1, -1, +1, -1),
(-1, +1, +1, -1, +1),
(+1, -1, -1, +1, -1),
(+1, -1, +1, -1, +1),
(+1, +1, -1, -1, +1),
(+1, +1, +1, +1, +1)])
def func(in1, in2, in3, sum_, carry):
total = (in1 > 0) + (in2 > 0) + (in3 > 0)
if total == 0:
return (sum_ <= 0) and (carry <= 0)
elif total == 1:
return (sum_ > 0) and (carry <= 0)
elif total == 2:
return (sum_ <= 0) and (carry > 0)
elif total == 3:
return (sum_ > 0) and (carry > 0)
else:
raise ValueError("func recieved unexpected values")
return Constraint(func, configs, variables, vartype=vartype, name=name)
| 37.122867
| 102
| 0.51448
| 1,267
| 10,877
| 4.380426
| 0.12865
| 0.041802
| 0.05027
| 0.058378
| 0.830991
| 0.819459
| 0.777117
| 0.762883
| 0.753874
| 0.74
| 0
| 0.044931
| 0.337041
| 10,877
| 292
| 103
| 37.25
| 0.724726
| 0.494438
| 0
| 0.495575
| 0
| 0
| 0.034652
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.115044
| false
| 0
| 0.017699
| 0.053097
| 0.238938
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e195ef092f122ac41723a4e958523064447e868d
| 154
|
py
|
Python
|
aas_core_codegen/csharp/stringification/__init__.py
|
gillistephan/aas-core-codegen
|
5b89ea2ee35aecaca9a1bed7ac81d420cc560f29
|
[
"MIT"
] | 5
|
2021-12-29T12:55:34.000Z
|
2022-03-01T17:57:21.000Z
|
aas_core_codegen/csharp/stringification/__init__.py
|
gillistephan/aas-core-codegen
|
5b89ea2ee35aecaca9a1bed7ac81d420cc560f29
|
[
"MIT"
] | 10
|
2021-12-29T02:15:55.000Z
|
2022-03-09T11:04:22.000Z
|
aas_core_codegen/csharp/stringification/__init__.py
|
aas-core-works/aas-core-csharp-codegen
|
731f706e2d12bf80722ac55d920fcf5402fb26ef
|
[
"MIT"
] | 2
|
2021-12-29T01:42:12.000Z
|
2022-02-15T13:46:33.000Z
|
"""Generate C# code for de/serialization of enumerations."""
from aas_core_codegen.csharp.stringification import _generate
generate = _generate.generate
| 30.8
| 61
| 0.818182
| 19
| 154
| 6.421053
| 0.789474
| 0.393443
| 0.393443
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097403
| 154
| 4
| 62
| 38.5
| 0.877698
| 0.350649
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
e19842c2e78a9d27c68857384281d3fb114683e0
| 15,518
|
py
|
Python
|
src/fvf/munge.py
|
NickleDave/active-vision-nengo
|
2736fc5ec8f10d7a55a063963ecd1834ec1d3cd0
|
[
"BSD-3-Clause"
] | 2
|
2019-03-03T23:30:02.000Z
|
2019-07-13T00:29:21.000Z
|
src/fvf/munge.py
|
NickleDave/active-vision-nengo
|
2736fc5ec8f10d7a55a063963ecd1834ec1d3cd0
|
[
"BSD-3-Clause"
] | 1
|
2019-06-10T14:14:56.000Z
|
2019-06-10T14:14:56.000Z
|
src/fvf/munge.py
|
NickleDave/active-vision-nengo
|
2736fc5ec8f10d7a55a063963ecd1834ec1d3cd0
|
[
"BSD-3-Clause"
] | null | null | null |
import json
from distutils.util import strtobool
from typing import NamedTuple
import numpy as np
from scipy import stats
def fixations(results_pkl):
"""munge fixation data from a results.pickle file
Parameters
----------
results_pkl : str
path to a results.pickle file
Returns
-------
FixationResults : NamedTuple
"""
return fixation_results
class RTResults(NamedTuple):
"""NamedTuple that represents reaction time results
from running simulation with FVF framework.
Fields
------
search_types: tuple
Unique set of search types in results. Tuple of str elements.
E.g., ('easy', 'medium', 'hard')
display_sizes: tuple
Unique set of display sizes in results. Tuple of int elements.
E.g., (6, 12, 18)
target_present: tuple
Unique set of "target present or absent" conditions in results.
Tuple of bool elements. E.g., (True, False).
conditions: list
Each permutation of (search type, display size, target present or absent)
that appears in the results. List of tuples.
RTs_by_condition: dict
Reaction times by condition. Dict where key is one condition, and
the corresponding value is a numpy array with all reaction times
for that condition.
mean_RTs_by_condition: dict
Mean reaction times by condition. Dict where key is one condition, and
the corresponding value is numpy.mean(reaction_times).
std_RTs_by_condition: dict
Standard deviation of reaction times by condition. Dict where key is
one condition, and the corresponding value is numpy.std(reaction_times).
mean_RTs_all_display_sizes: dict
Mean reaction times for each search type, target present or absent,
for all display sizes. Dict where key has form (search type, is target present),
and the corresponding value is a numpy array of mean reaction times, with each
element corresponding to one display size from display_sizes.
mean_RTs_regress_results: dict
Results of performing linear regression on reaction times v. display size for
each item in mean-RTs_all_display_sizes
std_RTs_all_display_sizes: dict
Standard deviation of reaction times for each search type, target present or absent,
for all display sizes.
"""
search_types: tuple
display_sizes: tuple
target_present: tuple
conditions: list
RTs_by_condition: dict
mean_RTs_by_condition: dict
std_RTs_by_condition: dict
mean_RTs_all_display_sizes: dict
mean_RTs_regress_results: dict
std_RTs_all_display_sizes: dict
class LinRegressResults(NamedTuple):
"""NamedTuple that represents results of linear regression.
The returned values from scipy.stats.linregress, but in a NamedTuple.
Fields
------
slope : float
slope of the regression line
intercept : float
intercept of the regression line
r-value : float
correlation coefficient
p-value : float
two-sided p-value for a hypothesis test whose null hypothesis is that the slope is zero.
stderr : float
Standard error of the estimate
"""
slope: float
intercept: float
r_value: float
p_value: float
std_err: float
def reaction_times(rt_json, responses_json):
"""munge results from a reaction_times.json file into format for plotting
Parameters
----------
rt_json : str
path to a reaction_times.json file created by running fvf.main
responses_json : str
path to a responses.json file saved created running fvf.main
Returns
-------
reaction_time_results : RTResults
instance of RTResults NamedTuple with the following fields:
search_types: tuple
Unique set of search types in results. Tuple of str elements.
E.g., ('easy', 'medium', 'hard')
display_sizes: tuple
Unique set of display sizes in results. Tuple of int elements.
E.g., (6, 12, 18)
target_present: tuple
Unique set of "target present or absent" conditions in results.
Tuple of bool elements. E.g., (True, False).
conditions: list
Each permutation of (search type, display size, target present or absent)
that appears in the results. List of tuples.
RTs_by_condition: dict
Reaction times by condition. Dict where key is one condition, and
the corresponding value is a numpy array with all reaction times
for that condition.
mean_RTs_by_condition: dict
Mean reaction times by condition. Dict where key is one condition, and
the corresponding value is numpy.mean(reaction_times).
std_RTs_by_condition: dict
Standard deviation of reaction times by condition. Dict where key is
one condition, and the corresponding value is numpy.std(reaction_times).
mean_RTs_all_display_sizes: dict
Mean reaction times for each search type, target present or absent,
for all display sizes. Dict where key has form (search type, is target present),
and the corresponding value is a numpy array of mean reaction times, with each
element corresponding to one display size from display_sizes.
mean_RTs_regress_results: dict
Results of performing linear regression on reaction times v. display size for
each item in mean-RTs_all_display_sizes
std_RTs_all_display_sizes: dict
Standard deviation of reaction times for each search type, target present or absent,
for all display sizes.
"""
with open(rt_json) as fp:
RTs = json.load(fp)
with open(responses_json) as fp:
responses = json.load(fp)
search_types = []
display_sizes = []
target_present = []
conditions = []
RTs_by_condition = {}
mean_RTs_by_condition = {}
std_RTs_by_condition = {}
for key, val in RTs.items():
# convert text key back into Python types
split_key = key.split(',')
search_type = split_key[0]
display_size = int(split_key[1])
is_target_present = bool(strtobool(split_key[2].strip()))
# add to conditions that will be returned
search_types.append(search_type)
display_sizes.append(display_size)
target_present.append(is_target_present)
tup_key = tuple([search_type, display_size, is_target_present])
conditions.append(tup_key)
rt_arr = np.asarray(val)
RTs_by_condition[tup_key] = rt_arr
# keep only correct trials, as in Young Hulleman 2013
response_arr = np.asarray(responses[key])
RTs_to_use = np.equal(response_arr, is_target_present)
mean_RTs_by_condition[tup_key] = np.mean(rt_arr[RTs_to_use])
std_RTs_by_condition[tup_key] = np.std(rt_arr[RTs_to_use])
search_types = tuple((set(search_types)))
display_sizes = tuple(
sorted( # sorted, so display_sizes is in ascending numerical order
set(display_sizes)
))
target_present = tuple(set(target_present))
mean_RTs_all_display_sizes = {}
mean_RTs_regress_results = {}
std_RTs_all_display_sizes = {}
# even more munging
for search_type in search_types:
for is_target_present in target_present:
mean_RT_vals = []
std_RT_vals = []
for display_size in display_sizes:
condition_tup = tuple([search_type, display_size, is_target_present])
mean_val = mean_RTs_by_condition[condition_tup]
mean_RT_vals.append(mean_val)
std_val = std_RTs_by_condition[condition_tup]
std_RT_vals.append(std_val)
key = tuple([search_type, is_target_present])
mean_RT_vals = np.asarray(mean_RT_vals)
mean_RTs_all_display_sizes[key] = mean_RT_vals
slope, intercept, r_value, p_value, std_err = stats.linregress(display_sizes,
mean_RT_vals)
regress_result = LinRegressResults(slope, intercept, r_value, p_value, std_err)
mean_RTs_regress_results[key] = regress_result
std_RT_vals = np.asarray(std_RT_vals)
std_RTs_all_display_sizes[key] = std_RT_vals
return RTResults(search_types,
display_sizes,
target_present,
conditions,
RTs_by_condition,
mean_RTs_by_condition,
std_RTs_by_condition,
mean_RTs_all_display_sizes,
mean_RTs_regress_results,
std_RTs_all_display_sizes)
class NumFixationsResults(NamedTuple):
"""NamedTuple that represents number of fixations results
from running simulation with FVF framework.
Fields
------
search_types: tuple
Unique set of search types in results. Tuple of str elements.
E.g., ('easy', 'medium', 'hard')
display_sizes: tuple
Unique set of display sizes in results. Tuple of int elements.
E.g., (6, 12, 18)
target_present: tuple
Unique set of "target present or absent" conditions in results.
Tuple of bool elements. E.g., (True, False).
conditions: list
Each permutation of (search type, display size, target present or absent)
that appears in the results. List of tuples.
num_fixations_by_condition: dict
Number of fixations by condition. Dict where key is one condition, and
the corresponding value is a numpy array with fixation counts for all trials
for that condition.
mean_num_fixations_by_condition: dict
Mean number of fixations by condition. Dict where key is one condition, and
the corresponding value is numpy.mean(number of fixations).
std_num_fixations_by_condition: dict
Standard deviation of number of fixations by condition. Dict where key is
one condition, and the corresponding value is numpy.std(number of fixations).
mean_num_fixations_all_display_sizes: dict
Mean number of fixations for each search type, target present or absent,
for all display sizes. Dict where key has form (search type, is target present),
and the corresponding value is a numpy array of mean number of fixations, with each
element corresponding to one display size from display_sizes.
"""
search_types: tuple
display_sizes: tuple
target_present: tuple
conditions: list
num_fixations_by_condition: dict
mean_num_fixations_by_condition: dict
std_num_fixations_by_condition: dict
mean_num_fixations_all_display_sizes: dict
def num_fixations(nf_json):
"""munge results from a num_fixations.json file into format for plotting
Parameters
----------
nf_json : str
Returns
-------
num_fixations_results : NumFixationsResults
instance of RTResults NamedTuple with the following fields:
search_types: tuple
Unique set of search types in results. Tuple of str elements.
E.g., ('easy', 'medium', 'hard')
display_sizes: tuple
Unique set of display sizes in results. Tuple of int elements.
E.g., (6, 12, 18)
target_present: tuple
Unique set of "target present or absent" conditions in results.
Tuple of bool elements. E.g., (True, False).
conditions: list
Each permutation of (search type, display size, target present or absent)
that appears in the results. List of tuples.
num_fixations_by_condition: dict
Number of fixations by condition. Dict where key is one condition, and
the corresponding value is a numpy array with fixation counts for all trials
for that condition.
mean_num_fixations_by_condition: dict
Mean number of fixations by condition. Dict where key is one condition, and
the corresponding value is numpy.mean(number of fixations).
std_num_fixations_by_condition: dict
Standard deviation of number of fixations by condition. Dict where key is
one condition, and the corresponding value is numpy.std(number of fixations).
mean_num_fixations_all_display_sizes: dict
Mean number of fixations for each search type, target present or absent,
for all display sizes. Dict where key has form (search type, is target present),
and the corresponding value is a numpy array of mean number of fixations, with each
element corresponding to one display size from display_sizes.
"""
with open(nf_json) as fp:
num_fix = json.load(fp)
search_types = []
display_sizes = []
target_present = []
conditions = []
num_fixations_by_condition = {}
mean_num_fixations_by_condition = {}
std_num_fixations_by_condition = {}
for key, val in num_fix.items():
# convert text key back into Python types
split_key = key.split(',')
search_type = split_key[0]
display_size = int(split_key[1])
is_target_present = bool(strtobool(split_key[2].strip()))
# add to conditions that will be returned
search_types.append(search_type)
display_sizes.append(display_size)
target_present.append(is_target_present)
tup_key = tuple([search_type, display_size, is_target_present])
conditions.append(tup_key)
nf_arr = np.asarray(val)
num_fixations_by_condition[tup_key] = nf_arr
mean_num_fixations_by_condition[tup_key] = np.mean(nf_arr)
std_num_fixations_by_condition[tup_key] = np.std(nf_arr)
search_types = tuple((set(search_types)))
display_sizes = tuple(
sorted( # sorted, so display_sizes is in ascending numerical order
set(display_sizes)
))
target_present = tuple(set(target_present))
mean_num_fixations_all_display_sizes = {}
# even more munging
for search_type in search_types:
for is_target_present in target_present:
mean_num_fixations_vals = []
for display_size in display_sizes:
condition_tup = tuple([search_type, display_size, is_target_present])
mean_val = mean_num_fixations_by_condition[condition_tup]
mean_num_fixations_vals.append(mean_val)
key = tuple([search_type, is_target_present])
mean_num_fixations_vals = np.asarray(mean_num_fixations_vals)
mean_num_fixations_all_display_sizes[key] = mean_num_fixations_vals
return NumFixationsResults(search_types,
display_sizes,
target_present,
conditions,
num_fixations_by_condition,
mean_num_fixations_by_condition,
std_num_fixations_by_condition,
mean_num_fixations_all_display_sizes)
| 41.940541
| 100
| 0.653306
| 1,992
| 15,518
| 4.860944
| 0.093876
| 0.068161
| 0.046473
| 0.045131
| 0.829082
| 0.791594
| 0.762574
| 0.741402
| 0.725911
| 0.717649
| 0
| 0.002722
| 0.289793
| 15,518
| 369
| 101
| 42.054201
| 0.875873
| 0.55368
| 0
| 0.418919
| 0
| 0
| 0.000322
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02027
| false
| 0
| 0.033784
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e198cf29776bba46ec3604c8b93e350037e09a29
| 20
|
py
|
Python
|
src/pyfunctrack/trackers/__init__.py
|
jamphan/pyfunctrack
|
30170d808fe2a643e94a04f8d7ccf5bf732dd2f2
|
[
"MIT"
] | 3
|
2020-10-26T14:08:24.000Z
|
2020-10-28T11:34:29.000Z
|
src/pyfunctrack/trackers/__init__.py
|
jamphan/pyfunctrack
|
30170d808fe2a643e94a04f8d7ccf5bf732dd2f2
|
[
"MIT"
] | 8
|
2020-10-27T22:16:46.000Z
|
2020-12-13T20:42:32.000Z
|
src/pyfunctrack/trackers/__init__.py
|
jamphan/pyfunctrack
|
30170d808fe2a643e94a04f8d7ccf5bf732dd2f2
|
[
"MIT"
] | 2
|
2020-10-28T13:36:48.000Z
|
2020-10-29T17:56:17.000Z
|
from . import logger
| 20
| 20
| 0.8
| 3
| 20
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15
| 20
| 1
| 20
| 20
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
bef7c25b7061414f0dd461e749607304f13283a7
| 56,341
|
py
|
Python
|
backend/corpora/common/utils/ontology_mapping.py
|
chanzuckerberg/dcp-prototype
|
24d2323ba5ae1482395da35ea11c42708e3a52ce
|
[
"MIT"
] | 2
|
2020-02-07T18:12:12.000Z
|
2020-02-11T14:59:03.000Z
|
backend/corpora/common/utils/ontology_mapping.py
|
HumanCellAtlas/dcp-prototype
|
44ca66a266004124f39d7d3e3dd75e9076012ff0
|
[
"MIT"
] | 173
|
2020-01-29T17:48:02.000Z
|
2020-03-20T02:52:58.000Z
|
backend/corpora/common/utils/ontology_mapping.py
|
HumanCellAtlas/dcp-prototype
|
44ca66a266004124f39d7d3e3dd75e9076012ff0
|
[
"MIT"
] | 1
|
2020-03-20T17:06:54.000Z
|
2020-03-20T17:06:54.000Z
|
# Generated by https://github.com/chanzuckerberg/single-cell-curation/blob/main/notebooks/compute_ancestor_mapping.ipynb
ontology_mapping = {
"HsapDv:0000000": ["HsapDv:0000000"],
"HsapDv:0000001": ["HsapDv:0000001"],
"HsapDv:0000002": ["HsapDv:0000002", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000045": ["HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000003": ["HsapDv:0000003", "HsapDv:0000002", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000004": ["HsapDv:0000004", "HsapDv:0000002", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000005": ["HsapDv:0000005", "HsapDv:0000004", "HsapDv:0000002", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000006": ["HsapDv:0000006", "HsapDv:0000002", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000007": ["HsapDv:0000007", "HsapDv:0000006", "HsapDv:0000002", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000205": [
"HsapDv:0000205",
"HsapDv:0000005",
"HsapDv:0000004",
"HsapDv:0000002",
"HsapDv:0000045",
"HsapDv:0000001",
],
"HsapDv:0000008": ["HsapDv:0000008", "HsapDv:0000006", "HsapDv:0000002", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000009": ["HsapDv:0000009", "HsapDv:0000006", "HsapDv:0000002", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000010": ["HsapDv:0000010", "HsapDv:0000002", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000011": ["HsapDv:0000011", "HsapDv:0000010", "HsapDv:0000002", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000033": [
"HsapDv:0000033",
"HsapDv:0000009",
"HsapDv:0000006",
"HsapDv:0000002",
"HsapDv:0000045",
"HsapDv:0000001",
],
"HsapDv:0000012": ["HsapDv:0000012", "HsapDv:0000002", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000013": ["HsapDv:0000013", "HsapDv:0000012", "HsapDv:0000002", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000035": [
"HsapDv:0000035",
"HsapDv:0000011",
"HsapDv:0000010",
"HsapDv:0000002",
"HsapDv:0000045",
"HsapDv:0000001",
],
"HsapDv:0000014": ["HsapDv:0000014", "HsapDv:0000012", "HsapDv:0000002", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000015": ["HsapDv:0000015", "HsapDv:0000002", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000016": ["HsapDv:0000016", "HsapDv:0000015", "HsapDv:0000002", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000017": ["HsapDv:0000017", "HsapDv:0000015", "HsapDv:0000002", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000018": ["HsapDv:0000018", "HsapDv:0000015", "HsapDv:0000002", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000019": ["HsapDv:0000019", "HsapDv:0000015", "HsapDv:0000002", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000020": ["HsapDv:0000020", "HsapDv:0000015", "HsapDv:0000002", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000021": ["HsapDv:0000021", "HsapDv:0000015", "HsapDv:0000002", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000022": ["HsapDv:0000022", "HsapDv:0000015", "HsapDv:0000002", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000023": ["HsapDv:0000023", "HsapDv:0000015", "HsapDv:0000002", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000024": ["HsapDv:0000024", "HsapDv:0000015", "HsapDv:0000002", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000025": ["HsapDv:0000025", "HsapDv:0000015", "HsapDv:0000002", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000026": ["HsapDv:0000026", "HsapDv:0000015", "HsapDv:0000002", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000027": ["HsapDv:0000027", "HsapDv:0000015", "HsapDv:0000002", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000028": ["HsapDv:0000028", "HsapDv:0000015", "HsapDv:0000002", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000029": ["HsapDv:0000029", "HsapDv:0000015", "HsapDv:0000002", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000030": ["HsapDv:0000030", "HsapDv:0000015", "HsapDv:0000002", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000031": [
"HsapDv:0000031",
"HsapDv:0000009",
"HsapDv:0000006",
"HsapDv:0000002",
"HsapDv:0000045",
"HsapDv:0000001",
],
"HsapDv:0000032": [
"HsapDv:0000032",
"HsapDv:0000009",
"HsapDv:0000006",
"HsapDv:0000002",
"HsapDv:0000045",
"HsapDv:0000001",
],
"HsapDv:0000034": [
"HsapDv:0000034",
"HsapDv:0000011",
"HsapDv:0000010",
"HsapDv:0000002",
"HsapDv:0000045",
"HsapDv:0000001",
],
"HsapDv:0000037": ["HsapDv:0000037", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000046": ["HsapDv:0000046", "HsapDv:0000197", "HsapDv:0000037", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000197": ["HsapDv:0000197", "HsapDv:0000037", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000047": ["HsapDv:0000047", "HsapDv:0000197", "HsapDv:0000037", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000048": ["HsapDv:0000048", "HsapDv:0000197", "HsapDv:0000037", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000049": ["HsapDv:0000049", "HsapDv:0000198", "HsapDv:0000037", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000198": ["HsapDv:0000198", "HsapDv:0000037", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000050": ["HsapDv:0000050", "HsapDv:0000198", "HsapDv:0000037", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000051": ["HsapDv:0000051", "HsapDv:0000198", "HsapDv:0000037", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000052": ["HsapDv:0000052", "HsapDv:0000198", "HsapDv:0000037", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000053": ["HsapDv:0000053", "HsapDv:0000199", "HsapDv:0000037", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000199": ["HsapDv:0000199", "HsapDv:0000037", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000054": ["HsapDv:0000054", "HsapDv:0000199", "HsapDv:0000037", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000055": ["HsapDv:0000055", "HsapDv:0000199", "HsapDv:0000037", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000056": ["HsapDv:0000056", "HsapDv:0000199", "HsapDv:0000037", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000057": ["HsapDv:0000057", "HsapDv:0000200", "HsapDv:0000037", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000200": ["HsapDv:0000200", "HsapDv:0000037", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000058": ["HsapDv:0000058", "HsapDv:0000200", "HsapDv:0000037", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000059": ["HsapDv:0000059", "HsapDv:0000200", "HsapDv:0000037", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000060": ["HsapDv:0000060", "HsapDv:0000200", "HsapDv:0000037", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000061": ["HsapDv:0000061", "HsapDv:0000200", "HsapDv:0000037", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000062": ["HsapDv:0000062", "HsapDv:0000201", "HsapDv:0000037", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000201": ["HsapDv:0000201", "HsapDv:0000037", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000063": ["HsapDv:0000063", "HsapDv:0000201", "HsapDv:0000037", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000064": ["HsapDv:0000064", "HsapDv:0000201", "HsapDv:0000037", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000065": ["HsapDv:0000065", "HsapDv:0000201", "HsapDv:0000037", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000066": ["HsapDv:0000066", "HsapDv:0000202", "HsapDv:0000037", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000202": ["HsapDv:0000202", "HsapDv:0000037", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000067": ["HsapDv:0000067", "HsapDv:0000202", "HsapDv:0000037", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000068": ["HsapDv:0000068", "HsapDv:0000202", "HsapDv:0000037", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000069": ["HsapDv:0000069", "HsapDv:0000202", "HsapDv:0000037", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000070": ["HsapDv:0000070", "HsapDv:0000202", "HsapDv:0000037", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000071": ["HsapDv:0000071", "HsapDv:0000203", "HsapDv:0000037", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000203": ["HsapDv:0000203", "HsapDv:0000037", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000072": ["HsapDv:0000072", "HsapDv:0000203", "HsapDv:0000037", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000073": ["HsapDv:0000073", "HsapDv:0000203", "HsapDv:0000037", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000074": ["HsapDv:0000074", "HsapDv:0000203", "HsapDv:0000037", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000075": ["HsapDv:0000075", "HsapDv:0000203", "HsapDv:0000037", "HsapDv:0000045", "HsapDv:0000001"],
"HsapDv:0000080": ["HsapDv:0000080", "HsapDv:0000001"],
"HsapDv:0000081": ["HsapDv:0000081", "HsapDv:0000080", "HsapDv:0000001"],
"HsapDv:0000083": ["HsapDv:0000083", "HsapDv:0000080", "HsapDv:0000001"],
"HsapDv:0000082": ["HsapDv:0000082", "HsapDv:0000080", "HsapDv:0000001"],
"HsapDv:0000084": ["HsapDv:0000084", "HsapDv:0000081", "HsapDv:0000080", "HsapDv:0000001"],
"HsapDv:0000085": ["HsapDv:0000085", "HsapDv:0000081", "HsapDv:0000080", "HsapDv:0000001"],
"HsapDv:0000086": ["HsapDv:0000086", "HsapDv:0000204", "HsapDv:0000001"],
"HsapDv:0000204": ["HsapDv:0000204", "HsapDv:0000001"],
"HsapDv:0000087": ["HsapDv:0000087", "HsapDv:0000204", "HsapDv:0000001"],
"HsapDv:0000088": ["HsapDv:0000088", "HsapDv:0000087", "HsapDv:0000204", "HsapDv:0000001"],
"HsapDv:0000089": ["HsapDv:0000089", "HsapDv:0000088", "HsapDv:0000087", "HsapDv:0000204", "HsapDv:0000001"],
"HsapDv:0000090": ["HsapDv:0000090", "HsapDv:0000088", "HsapDv:0000087", "HsapDv:0000204", "HsapDv:0000001"],
"HsapDv:0000091": ["HsapDv:0000091", "HsapDv:0000087", "HsapDv:0000204", "HsapDv:0000001"],
"HsapDv:0000092": ["HsapDv:0000092", "HsapDv:0000091", "HsapDv:0000087", "HsapDv:0000204", "HsapDv:0000001"],
"HsapDv:0000093": ["HsapDv:0000093", "HsapDv:0000091", "HsapDv:0000087", "HsapDv:0000204", "HsapDv:0000001"],
"HsapDv:0000094": [
"HsapDv:0000094",
"HsapDv:0000093",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000095": [
"HsapDv:0000095",
"HsapDv:0000093",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000173": [
"HsapDv:0000173",
"HsapDv:0000094",
"HsapDv:0000093",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000096": ["HsapDv:0000096", "HsapDv:0000084", "HsapDv:0000081", "HsapDv:0000080", "HsapDv:0000001"],
"HsapDv:0000097": ["HsapDv:0000097", "HsapDv:0000084", "HsapDv:0000081", "HsapDv:0000080", "HsapDv:0000001"],
"HsapDv:0000098": ["HsapDv:0000098", "HsapDv:0000084", "HsapDv:0000081", "HsapDv:0000080", "HsapDv:0000001"],
"HsapDv:0000099": ["HsapDv:0000099", "HsapDv:0000084", "HsapDv:0000081", "HsapDv:0000080", "HsapDv:0000001"],
"HsapDv:0000100": ["HsapDv:0000100", "HsapDv:0000085", "HsapDv:0000081", "HsapDv:0000080", "HsapDv:0000001"],
"HsapDv:0000101": ["HsapDv:0000101", "HsapDv:0000085", "HsapDv:0000081", "HsapDv:0000080", "HsapDv:0000001"],
"HsapDv:0000102": ["HsapDv:0000102", "HsapDv:0000085", "HsapDv:0000081", "HsapDv:0000080", "HsapDv:0000001"],
"HsapDv:0000103": ["HsapDv:0000103", "HsapDv:0000085", "HsapDv:0000081", "HsapDv:0000080", "HsapDv:0000001"],
"HsapDv:0000104": ["HsapDv:0000104", "HsapDv:0000085", "HsapDv:0000081", "HsapDv:0000080", "HsapDv:0000001"],
"HsapDv:0000105": ["HsapDv:0000105", "HsapDv:0000085", "HsapDv:0000081", "HsapDv:0000080", "HsapDv:0000001"],
"HsapDv:0000106": ["HsapDv:0000106", "HsapDv:0000085", "HsapDv:0000081", "HsapDv:0000080", "HsapDv:0000001"],
"HsapDv:0000107": ["HsapDv:0000107", "HsapDv:0000086", "HsapDv:0000204", "HsapDv:0000001"],
"HsapDv:0000108": ["HsapDv:0000108", "HsapDv:0000086", "HsapDv:0000204", "HsapDv:0000001"],
"HsapDv:0000109": ["HsapDv:0000109", "HsapDv:0000086", "HsapDv:0000204", "HsapDv:0000001"],
"HsapDv:0000110": ["HsapDv:0000110", "HsapDv:0000086", "HsapDv:0000204", "HsapDv:0000001"],
"HsapDv:0000111": ["HsapDv:0000111", "HsapDv:0000086", "HsapDv:0000204", "HsapDv:0000001"],
"HsapDv:0000112": ["HsapDv:0000112", "HsapDv:0000086", "HsapDv:0000204", "HsapDv:0000001"],
"HsapDv:0000113": [
"HsapDv:0000113",
"HsapDv:0000089",
"HsapDv:0000088",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000114": [
"HsapDv:0000114",
"HsapDv:0000089",
"HsapDv:0000088",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000115": [
"HsapDv:0000115",
"HsapDv:0000089",
"HsapDv:0000088",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000116": [
"HsapDv:0000116",
"HsapDv:0000089",
"HsapDv:0000088",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000117": [
"HsapDv:0000117",
"HsapDv:0000089",
"HsapDv:0000088",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000118": [
"HsapDv:0000118",
"HsapDv:0000089",
"HsapDv:0000088",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000119": [
"HsapDv:0000119",
"HsapDv:0000090",
"HsapDv:0000088",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000120": [
"HsapDv:0000120",
"HsapDv:0000090",
"HsapDv:0000088",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000121": [
"HsapDv:0000121",
"HsapDv:0000090",
"HsapDv:0000088",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000122": [
"HsapDv:0000122",
"HsapDv:0000090",
"HsapDv:0000088",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000123": [
"HsapDv:0000123",
"HsapDv:0000090",
"HsapDv:0000088",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000124": [
"HsapDv:0000124",
"HsapDv:0000090",
"HsapDv:0000088",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000125": [
"HsapDv:0000125",
"HsapDv:0000090",
"HsapDv:0000088",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000126": [
"HsapDv:0000126",
"HsapDv:0000090",
"HsapDv:0000088",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000127": [
"HsapDv:0000127",
"HsapDv:0000090",
"HsapDv:0000088",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000128": [
"HsapDv:0000128",
"HsapDv:0000090",
"HsapDv:0000088",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000129": [
"HsapDv:0000129",
"HsapDv:0000090",
"HsapDv:0000088",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000130": [
"HsapDv:0000130",
"HsapDv:0000090",
"HsapDv:0000088",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000131": [
"HsapDv:0000131",
"HsapDv:0000090",
"HsapDv:0000088",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000132": [
"HsapDv:0000132",
"HsapDv:0000090",
"HsapDv:0000088",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000133": [
"HsapDv:0000133",
"HsapDv:0000090",
"HsapDv:0000088",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000134": [
"HsapDv:0000134",
"HsapDv:0000090",
"HsapDv:0000088",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000135": [
"HsapDv:0000135",
"HsapDv:0000090",
"HsapDv:0000088",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000136": [
"HsapDv:0000136",
"HsapDv:0000090",
"HsapDv:0000088",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000137": [
"HsapDv:0000137",
"HsapDv:0000090",
"HsapDv:0000088",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000138": [
"HsapDv:0000138",
"HsapDv:0000090",
"HsapDv:0000088",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000139": [
"HsapDv:0000139",
"HsapDv:0000092",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000140": [
"HsapDv:0000140",
"HsapDv:0000092",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000141": [
"HsapDv:0000141",
"HsapDv:0000092",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000142": [
"HsapDv:0000142",
"HsapDv:0000092",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000143": [
"HsapDv:0000143",
"HsapDv:0000092",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000144": [
"HsapDv:0000144",
"HsapDv:0000092",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000145": [
"HsapDv:0000145",
"HsapDv:0000092",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000146": [
"HsapDv:0000146",
"HsapDv:0000092",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000147": [
"HsapDv:0000147",
"HsapDv:0000092",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000148": [
"HsapDv:0000148",
"HsapDv:0000092",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000149": [
"HsapDv:0000149",
"HsapDv:0000092",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000150": [
"HsapDv:0000150",
"HsapDv:0000092",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000151": [
"HsapDv:0000151",
"HsapDv:0000092",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000152": [
"HsapDv:0000152",
"HsapDv:0000092",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000153": [
"HsapDv:0000153",
"HsapDv:0000092",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000154": [
"HsapDv:0000154",
"HsapDv:0000092",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000155": [
"HsapDv:0000155",
"HsapDv:0000092",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000156": [
"HsapDv:0000156",
"HsapDv:0000092",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000157": [
"HsapDv:0000157",
"HsapDv:0000092",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000158": [
"HsapDv:0000158",
"HsapDv:0000092",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000159": [
"HsapDv:0000159",
"HsapDv:0000094",
"HsapDv:0000093",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000160": [
"HsapDv:0000160",
"HsapDv:0000094",
"HsapDv:0000093",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000161": [
"HsapDv:0000161",
"HsapDv:0000094",
"HsapDv:0000093",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000162": [
"HsapDv:0000162",
"HsapDv:0000094",
"HsapDv:0000093",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000163": [
"HsapDv:0000163",
"HsapDv:0000094",
"HsapDv:0000093",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000164": [
"HsapDv:0000164",
"HsapDv:0000094",
"HsapDv:0000093",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000165": [
"HsapDv:0000165",
"HsapDv:0000094",
"HsapDv:0000093",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000166": [
"HsapDv:0000166",
"HsapDv:0000094",
"HsapDv:0000093",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000167": [
"HsapDv:0000167",
"HsapDv:0000094",
"HsapDv:0000093",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000168": [
"HsapDv:0000168",
"HsapDv:0000094",
"HsapDv:0000093",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000169": [
"HsapDv:0000169",
"HsapDv:0000094",
"HsapDv:0000093",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000170": [
"HsapDv:0000170",
"HsapDv:0000094",
"HsapDv:0000093",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000171": [
"HsapDv:0000171",
"HsapDv:0000094",
"HsapDv:0000093",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000172": [
"HsapDv:0000172",
"HsapDv:0000094",
"HsapDv:0000093",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000174": ["HsapDv:0000174", "HsapDv:0000256", "HsapDv:0000083", "HsapDv:0000080", "HsapDv:0000001"],
"HsapDv:0000256": ["HsapDv:0000256", "HsapDv:0000083", "HsapDv:0000080", "HsapDv:0000001"],
"HsapDv:0000175": ["HsapDv:0000175", "HsapDv:0000256", "HsapDv:0000083", "HsapDv:0000080", "HsapDv:0000001"],
"HsapDv:0000176": ["HsapDv:0000176", "HsapDv:0000256", "HsapDv:0000083", "HsapDv:0000080", "HsapDv:0000001"],
"HsapDv:0000177": ["HsapDv:0000177", "HsapDv:0000256", "HsapDv:0000083", "HsapDv:0000080", "HsapDv:0000001"],
"HsapDv:0000178": ["HsapDv:0000178", "HsapDv:0000256", "HsapDv:0000083", "HsapDv:0000080", "HsapDv:0000001"],
"HsapDv:0000179": ["HsapDv:0000179", "HsapDv:0000256", "HsapDv:0000083", "HsapDv:0000080", "HsapDv:0000001"],
"HsapDv:0000180": ["HsapDv:0000180", "HsapDv:0000256", "HsapDv:0000083", "HsapDv:0000080", "HsapDv:0000001"],
"HsapDv:0000181": ["HsapDv:0000181", "HsapDv:0000256", "HsapDv:0000083", "HsapDv:0000080", "HsapDv:0000001"],
"HsapDv:0000182": ["HsapDv:0000182", "HsapDv:0000256", "HsapDv:0000083", "HsapDv:0000080", "HsapDv:0000001"],
"HsapDv:0000183": ["HsapDv:0000183", "HsapDv:0000256", "HsapDv:0000083", "HsapDv:0000080", "HsapDv:0000001"],
"HsapDv:0000184": ["HsapDv:0000184", "HsapDv:0000256", "HsapDv:0000083", "HsapDv:0000080", "HsapDv:0000001"],
"HsapDv:0000185": ["HsapDv:0000185", "HsapDv:0000246", "HsapDv:0000083", "HsapDv:0000080", "HsapDv:0000001"],
"HsapDv:0000246": ["HsapDv:0000246", "HsapDv:0000083", "HsapDv:0000080", "HsapDv:0000001"],
"HsapDv:0000186": ["HsapDv:0000186", "HsapDv:0000246", "HsapDv:0000083", "HsapDv:0000080", "HsapDv:0000001"],
"HsapDv:0000187": ["HsapDv:0000187", "HsapDv:0000246", "HsapDv:0000083", "HsapDv:0000080", "HsapDv:0000001"],
"HsapDv:0000188": ["HsapDv:0000188", "HsapDv:0000246", "HsapDv:0000083", "HsapDv:0000080", "HsapDv:0000001"],
"HsapDv:0000189": ["HsapDv:0000189", "HsapDv:0000246", "HsapDv:0000083", "HsapDv:0000080", "HsapDv:0000001"],
"HsapDv:0000190": ["HsapDv:0000190", "HsapDv:0000246", "HsapDv:0000083", "HsapDv:0000080", "HsapDv:0000001"],
"HsapDv:0000191": ["HsapDv:0000191", "HsapDv:0000246", "HsapDv:0000083", "HsapDv:0000080", "HsapDv:0000001"],
"HsapDv:0000192": ["HsapDv:0000192", "HsapDv:0000246", "HsapDv:0000083", "HsapDv:0000080", "HsapDv:0000001"],
"HsapDv:0000193": ["HsapDv:0000193", "HsapDv:0000246", "HsapDv:0000083", "HsapDv:0000080", "HsapDv:0000001"],
"HsapDv:0000194": ["HsapDv:0000194", "HsapDv:0000246", "HsapDv:0000083", "HsapDv:0000080", "HsapDv:0000001"],
"HsapDv:0000195": ["HsapDv:0000195", "HsapDv:0000246", "HsapDv:0000083", "HsapDv:0000080", "HsapDv:0000001"],
"HsapDv:0000196": ["HsapDv:0000196", "HsapDv:0000246", "HsapDv:0000083", "HsapDv:0000080", "HsapDv:0000001"],
"HsapDv:0000206": [
"HsapDv:0000206",
"HsapDv:0000095",
"HsapDv:0000093",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000207": [
"HsapDv:0000207",
"HsapDv:0000095",
"HsapDv:0000093",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000208": [
"HsapDv:0000208",
"HsapDv:0000095",
"HsapDv:0000093",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000209": [
"HsapDv:0000209",
"HsapDv:0000095",
"HsapDv:0000093",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000210": [
"HsapDv:0000210",
"HsapDv:0000095",
"HsapDv:0000093",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000211": [
"HsapDv:0000211",
"HsapDv:0000095",
"HsapDv:0000093",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000212": [
"HsapDv:0000212",
"HsapDv:0000095",
"HsapDv:0000093",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000213": [
"HsapDv:0000213",
"HsapDv:0000095",
"HsapDv:0000093",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000214": [
"HsapDv:0000214",
"HsapDv:0000095",
"HsapDv:0000093",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000215": [
"HsapDv:0000215",
"HsapDv:0000095",
"HsapDv:0000093",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000216": [
"HsapDv:0000216",
"HsapDv:0000095",
"HsapDv:0000093",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000217": [
"HsapDv:0000217",
"HsapDv:0000095",
"HsapDv:0000093",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000218": [
"HsapDv:0000218",
"HsapDv:0000095",
"HsapDv:0000093",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000219": [
"HsapDv:0000219",
"HsapDv:0000095",
"HsapDv:0000093",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000220": [
"HsapDv:0000220",
"HsapDv:0000095",
"HsapDv:0000093",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000221": [
"HsapDv:0000221",
"HsapDv:0000095",
"HsapDv:0000093",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000222": [
"HsapDv:0000222",
"HsapDv:0000095",
"HsapDv:0000093",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000223": [
"HsapDv:0000223",
"HsapDv:0000095",
"HsapDv:0000093",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000224": [
"HsapDv:0000224",
"HsapDv:0000095",
"HsapDv:0000093",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000225": [
"HsapDv:0000225",
"HsapDv:0000095",
"HsapDv:0000093",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000228": [
"HsapDv:0000228",
"HsapDv:0000247",
"HsapDv:0000095",
"HsapDv:0000093",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000247": [
"HsapDv:0000247",
"HsapDv:0000095",
"HsapDv:0000093",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000229": [
"HsapDv:0000229",
"HsapDv:0000247",
"HsapDv:0000095",
"HsapDv:0000093",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000230": [
"HsapDv:0000230",
"HsapDv:0000247",
"HsapDv:0000095",
"HsapDv:0000093",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000231": [
"HsapDv:0000231",
"HsapDv:0000247",
"HsapDv:0000095",
"HsapDv:0000093",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000232": [
"HsapDv:0000232",
"HsapDv:0000247",
"HsapDv:0000095",
"HsapDv:0000093",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000233": [
"HsapDv:0000233",
"HsapDv:0000247",
"HsapDv:0000095",
"HsapDv:0000093",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000234": [
"HsapDv:0000234",
"HsapDv:0000247",
"HsapDv:0000095",
"HsapDv:0000093",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000235": ["HsapDv:0000235", "HsapDv:0000081", "HsapDv:0000080", "HsapDv:0000001"],
"HsapDv:0000236": ["HsapDv:0000236"],
"HsapDv:0000237": ["HsapDv:0000237", "HsapDv:0000088", "HsapDv:0000087", "HsapDv:0000204", "HsapDv:0000001"],
"HsapDv:0000238": ["HsapDv:0000238", "HsapDv:0000088", "HsapDv:0000087", "HsapDv:0000204", "HsapDv:0000001"],
"HsapDv:0000239": ["HsapDv:0000239"],
"HsapDv:0000240": [
"HsapDv:0000240",
"HsapDv:0000092",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000241": ["HsapDv:0000241"],
"HsapDv:0000242": [
"HsapDv:0000242",
"HsapDv:0000093",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000243": [
"HsapDv:0000243",
"HsapDv:0000093",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000244": [
"HsapDv:0000244",
"HsapDv:0000093",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000245": [
"HsapDv:0000245",
"HsapDv:0000093",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000248": [
"HsapDv:0000248",
"HsapDv:0000247",
"HsapDv:0000095",
"HsapDv:0000093",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000249": [
"HsapDv:0000249",
"HsapDv:0000247",
"HsapDv:0000095",
"HsapDv:0000093",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000250": [
"HsapDv:0000250",
"HsapDv:0000247",
"HsapDv:0000095",
"HsapDv:0000093",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000251": [
"HsapDv:0000251",
"HsapDv:0000247",
"HsapDv:0000095",
"HsapDv:0000093",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000252": [
"HsapDv:0000252",
"HsapDv:0000247",
"HsapDv:0000095",
"HsapDv:0000093",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000253": [
"HsapDv:0000253",
"HsapDv:0000247",
"HsapDv:0000095",
"HsapDv:0000093",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000254": [
"HsapDv:0000254",
"HsapDv:0000247",
"HsapDv:0000095",
"HsapDv:0000093",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"HsapDv:0000255": [
"HsapDv:0000255",
"HsapDv:0000247",
"HsapDv:0000095",
"HsapDv:0000093",
"HsapDv:0000091",
"HsapDv:0000087",
"HsapDv:0000204",
"HsapDv:0000001",
],
"MmusDv:0000000": ["MmusDv:0000000"],
"MmusDv:0000001": ["MmusDv:0000001"],
"MmusDv:0000002": ["MmusDv:0000002", "MmusDv:0000042", "MmusDv:0000001"],
"MmusDv:0000042": ["MmusDv:0000042", "MmusDv:0000001"],
"MmusDv:0000003": ["MmusDv:0000003", "MmusDv:0000002", "MmusDv:0000042", "MmusDv:0000001"],
"MmusDv:0000004": ["MmusDv:0000004", "MmusDv:0000002", "MmusDv:0000042", "MmusDv:0000001"],
"MmusDv:0000005": ["MmusDv:0000005", "MmusDv:0000004", "MmusDv:0000002", "MmusDv:0000042", "MmusDv:0000001"],
"MmusDv:0000006": ["MmusDv:0000006", "MmusDv:0000004", "MmusDv:0000002", "MmusDv:0000042", "MmusDv:0000001"],
"MmusDv:0000007": ["MmusDv:0000007", "MmusDv:0000002", "MmusDv:0000042", "MmusDv:0000001"],
"MmusDv:0000008": ["MmusDv:0000008", "MmusDv:0000007", "MmusDv:0000002", "MmusDv:0000042", "MmusDv:0000001"],
"MmusDv:0000009": ["MmusDv:0000009", "MmusDv:0000007", "MmusDv:0000002", "MmusDv:0000042", "MmusDv:0000001"],
"MmusDv:0000010": ["MmusDv:0000010", "MmusDv:0000007", "MmusDv:0000002", "MmusDv:0000042", "MmusDv:0000001"],
"MmusDv:0000011": ["MmusDv:0000011", "MmusDv:0000007", "MmusDv:0000002", "MmusDv:0000042", "MmusDv:0000001"],
"MmusDv:0000012": ["MmusDv:0000012", "MmusDv:0000007", "MmusDv:0000002", "MmusDv:0000042", "MmusDv:0000001"],
"MmusDv:0000013": ["MmusDv:0000013", "MmusDv:0000002", "MmusDv:0000042", "MmusDv:0000001"],
"MmusDv:0000014": ["MmusDv:0000014", "MmusDv:0000013", "MmusDv:0000002", "MmusDv:0000042", "MmusDv:0000001"],
"MmusDv:0000015": ["MmusDv:0000015", "MmusDv:0000013", "MmusDv:0000002", "MmusDv:0000042", "MmusDv:0000001"],
"MmusDv:0000016": ["MmusDv:0000016"],
"MmusDv:0000017": ["MmusDv:0000017", "MmusDv:0000002", "MmusDv:0000042", "MmusDv:0000001"],
"MmusDv:0000018": ["MmusDv:0000018", "MmusDv:0000002", "MmusDv:0000042", "MmusDv:0000001"],
"MmusDv:0000095": ["MmusDv:0000095", "MmusDv:0000017", "MmusDv:0000002", "MmusDv:0000042", "MmusDv:0000001"],
"MmusDv:0000019": ["MmusDv:0000019", "MmusDv:0000018", "MmusDv:0000002", "MmusDv:0000042", "MmusDv:0000001"],
"MmusDv:0000020": ["MmusDv:0000020", "MmusDv:0000018", "MmusDv:0000002", "MmusDv:0000042", "MmusDv:0000001"],
"MmusDv:0000021": ["MmusDv:0000021", "MmusDv:0000018", "MmusDv:0000002", "MmusDv:0000042", "MmusDv:0000001"],
"MmusDv:0000022": ["MmusDv:0000022", "MmusDv:0000018", "MmusDv:0000002", "MmusDv:0000042", "MmusDv:0000001"],
"MmusDv:0000023": ["MmusDv:0000023", "MmusDv:0000018", "MmusDv:0000002", "MmusDv:0000042", "MmusDv:0000001"],
"MmusDv:0000024": ["MmusDv:0000024", "MmusDv:0000018", "MmusDv:0000002", "MmusDv:0000042", "MmusDv:0000001"],
"MmusDv:0000025": ["MmusDv:0000025", "MmusDv:0000018", "MmusDv:0000002", "MmusDv:0000042", "MmusDv:0000001"],
"MmusDv:0000026": ["MmusDv:0000026", "MmusDv:0000018", "MmusDv:0000002", "MmusDv:0000042", "MmusDv:0000001"],
"MmusDv:0000027": ["MmusDv:0000027", "MmusDv:0000018", "MmusDv:0000002", "MmusDv:0000042", "MmusDv:0000001"],
"MmusDv:0000028": ["MmusDv:0000028", "MmusDv:0000018", "MmusDv:0000002", "MmusDv:0000042", "MmusDv:0000001"],
"MmusDv:0000029": ["MmusDv:0000029", "MmusDv:0000018", "MmusDv:0000002", "MmusDv:0000042", "MmusDv:0000001"],
"MmusDv:0000030": ["MmusDv:0000030"],
"MmusDv:0000031": ["MmusDv:0000031", "MmusDv:0000042", "MmusDv:0000001"],
"MmusDv:0000032": ["MmusDv:0000032", "MmusDv:0000031", "MmusDv:0000042", "MmusDv:0000001"],
"MmusDv:0000033": ["MmusDv:0000033", "MmusDv:0000031", "MmusDv:0000042", "MmusDv:0000001"],
"MmusDv:0000034": ["MmusDv:0000034", "MmusDv:0000031", "MmusDv:0000042", "MmusDv:0000001"],
"MmusDv:0000035": ["MmusDv:0000035", "MmusDv:0000031", "MmusDv:0000042", "MmusDv:0000001"],
"MmusDv:0000036": ["MmusDv:0000036", "MmusDv:0000092", "MmusDv:0000001"],
"MmusDv:0000092": ["MmusDv:0000092", "MmusDv:0000001"],
"MmusDv:0000037": ["MmusDv:0000037", "MmusDv:0000092", "MmusDv:0000001"],
"MmusDv:0000038": ["MmusDv:0000038", "MmusDv:0000043", "MmusDv:0000092", "MmusDv:0000001"],
"MmusDv:0000043": ["MmusDv:0000043", "MmusDv:0000092", "MmusDv:0000001"],
"MmusDv:0000096": ["MmusDv:0000096", "MmusDv:0000043", "MmusDv:0000092", "MmusDv:0000001"],
"MmusDv:0000039": ["MmusDv:0000039"],
"MmusDv:0000040": ["MmusDv:0000040"],
"MmusDv:0000041": ["MmusDv:0000041", "MmusDv:0000001"],
"MmusDv:0000044": ["MmusDv:0000044", "MmusDv:0000096", "MmusDv:0000043", "MmusDv:0000092", "MmusDv:0000001"],
"MmusDv:0000045": ["MmusDv:0000045", "MmusDv:0000112", "MmusDv:0000037", "MmusDv:0000092", "MmusDv:0000001"],
"MmusDv:0000112": ["MmusDv:0000112", "MmusDv:0000037", "MmusDv:0000092", "MmusDv:0000001"],
"MmusDv:0000113": ["MmusDv:0000113", "MmusDv:0000112", "MmusDv:0000037", "MmusDv:0000092", "MmusDv:0000001"],
"MmusDv:0000046": ["MmusDv:0000046", "MmusDv:0000112", "MmusDv:0000037", "MmusDv:0000092", "MmusDv:0000001"],
"MmusDv:0000047": ["MmusDv:0000047", "MmusDv:0000112", "MmusDv:0000037", "MmusDv:0000092", "MmusDv:0000001"],
"MmusDv:0000048": ["MmusDv:0000048", "MmusDv:0000112", "MmusDv:0000037", "MmusDv:0000092", "MmusDv:0000001"],
"MmusDv:0000049": ["MmusDv:0000049", "MmusDv:0000112", "MmusDv:0000037", "MmusDv:0000092", "MmusDv:0000001"],
"MmusDv:0000050": [
"MmusDv:0000050",
"MmusDv:0000061",
"MmusDv:0000110",
"MmusDv:0000037",
"MmusDv:0000092",
"MmusDv:0000001",
],
"MmusDv:0000061": ["MmusDv:0000061", "MmusDv:0000110", "MmusDv:0000037", "MmusDv:0000092", "MmusDv:0000001"],
"MmusDv:0000051": [
"MmusDv:0000051",
"MmusDv:0000061",
"MmusDv:0000110",
"MmusDv:0000037",
"MmusDv:0000092",
"MmusDv:0000001",
],
"MmusDv:0000052": [
"MmusDv:0000052",
"MmusDv:0000062",
"MmusDv:0000061",
"MmusDv:0000110",
"MmusDv:0000037",
"MmusDv:0000092",
"MmusDv:0000001",
],
"MmusDv:0000062": [
"MmusDv:0000062",
"MmusDv:0000061",
"MmusDv:0000110",
"MmusDv:0000037",
"MmusDv:0000092",
"MmusDv:0000001",
],
"MmusDv:0000053": [
"MmusDv:0000053",
"MmusDv:0000062",
"MmusDv:0000061",
"MmusDv:0000110",
"MmusDv:0000037",
"MmusDv:0000092",
"MmusDv:0000001",
],
"MmusDv:0000054": [
"MmusDv:0000054",
"MmusDv:0000062",
"MmusDv:0000061",
"MmusDv:0000110",
"MmusDv:0000037",
"MmusDv:0000092",
"MmusDv:0000001",
],
"MmusDv:0000055": [
"MmusDv:0000055",
"MmusDv:0000062",
"MmusDv:0000061",
"MmusDv:0000110",
"MmusDv:0000037",
"MmusDv:0000092",
"MmusDv:0000001",
],
"MmusDv:0000056": [
"MmusDv:0000056",
"MmusDv:0000063",
"MmusDv:0000061",
"MmusDv:0000110",
"MmusDv:0000037",
"MmusDv:0000092",
"MmusDv:0000001",
],
"MmusDv:0000063": [
"MmusDv:0000063",
"MmusDv:0000061",
"MmusDv:0000110",
"MmusDv:0000037",
"MmusDv:0000092",
"MmusDv:0000001",
],
"MmusDv:0000057": [
"MmusDv:0000057",
"MmusDv:0000063",
"MmusDv:0000061",
"MmusDv:0000110",
"MmusDv:0000037",
"MmusDv:0000092",
"MmusDv:0000001",
],
"MmusDv:0000058": [
"MmusDv:0000058",
"MmusDv:0000063",
"MmusDv:0000061",
"MmusDv:0000110",
"MmusDv:0000037",
"MmusDv:0000092",
"MmusDv:0000001",
],
"MmusDv:0000059": [
"MmusDv:0000059",
"MmusDv:0000063",
"MmusDv:0000061",
"MmusDv:0000110",
"MmusDv:0000037",
"MmusDv:0000092",
"MmusDv:0000001",
],
"MmusDv:0000110": ["MmusDv:0000110", "MmusDv:0000037", "MmusDv:0000092", "MmusDv:0000001"],
"MmusDv:0000064": [
"MmusDv:0000064",
"MmusDv:0000061",
"MmusDv:0000110",
"MmusDv:0000037",
"MmusDv:0000092",
"MmusDv:0000001",
],
"MmusDv:0000065": [
"MmusDv:0000065",
"MmusDv:0000064",
"MmusDv:0000061",
"MmusDv:0000110",
"MmusDv:0000037",
"MmusDv:0000092",
"MmusDv:0000001",
],
"MmusDv:0000066": [
"MmusDv:0000066",
"MmusDv:0000064",
"MmusDv:0000061",
"MmusDv:0000110",
"MmusDv:0000037",
"MmusDv:0000092",
"MmusDv:0000001",
],
"MmusDv:0000067": [
"MmusDv:0000067",
"MmusDv:0000064",
"MmusDv:0000061",
"MmusDv:0000110",
"MmusDv:0000037",
"MmusDv:0000092",
"MmusDv:0000001",
],
"MmusDv:0000068": [
"MmusDv:0000068",
"MmusDv:0000064",
"MmusDv:0000061",
"MmusDv:0000110",
"MmusDv:0000037",
"MmusDv:0000092",
"MmusDv:0000001",
],
"MmusDv:0000069": [
"MmusDv:0000069",
"MmusDv:0000061",
"MmusDv:0000110",
"MmusDv:0000037",
"MmusDv:0000092",
"MmusDv:0000001",
],
"MmusDv:0000070": [
"MmusDv:0000070",
"MmusDv:0000064",
"MmusDv:0000061",
"MmusDv:0000110",
"MmusDv:0000037",
"MmusDv:0000092",
"MmusDv:0000001",
],
"MmusDv:0000071": [
"MmusDv:0000071",
"MmusDv:0000069",
"MmusDv:0000061",
"MmusDv:0000110",
"MmusDv:0000037",
"MmusDv:0000092",
"MmusDv:0000001",
],
"MmusDv:0000072": [
"MmusDv:0000072",
"MmusDv:0000069",
"MmusDv:0000061",
"MmusDv:0000110",
"MmusDv:0000037",
"MmusDv:0000092",
"MmusDv:0000001",
],
"MmusDv:0000073": [
"MmusDv:0000073",
"MmusDv:0000069",
"MmusDv:0000061",
"MmusDv:0000110",
"MmusDv:0000037",
"MmusDv:0000092",
"MmusDv:0000001",
],
"MmusDv:0000074": [
"MmusDv:0000074",
"MmusDv:0000069",
"MmusDv:0000061",
"MmusDv:0000110",
"MmusDv:0000037",
"MmusDv:0000092",
"MmusDv:0000001",
],
"MmusDv:0000075": [
"MmusDv:0000075",
"MmusDv:0000097",
"MmusDv:0000110",
"MmusDv:0000037",
"MmusDv:0000092",
"MmusDv:0000001",
],
"MmusDv:0000097": ["MmusDv:0000097", "MmusDv:0000110", "MmusDv:0000037", "MmusDv:0000092", "MmusDv:0000001"],
"MmusDv:0000076": [
"MmusDv:0000076",
"MmusDv:0000097",
"MmusDv:0000110",
"MmusDv:0000037",
"MmusDv:0000092",
"MmusDv:0000001",
],
"MmusDv:0000077": [
"MmusDv:0000077",
"MmusDv:0000061",
"MmusDv:0000110",
"MmusDv:0000037",
"MmusDv:0000092",
"MmusDv:0000001",
],
"MmusDv:0000078": [
"MmusDv:0000078",
"MmusDv:0000075",
"MmusDv:0000097",
"MmusDv:0000110",
"MmusDv:0000037",
"MmusDv:0000092",
"MmusDv:0000001",
],
"MmusDv:0000079": [
"MmusDv:0000079",
"MmusDv:0000075",
"MmusDv:0000097",
"MmusDv:0000110",
"MmusDv:0000037",
"MmusDv:0000092",
"MmusDv:0000001",
],
"MmusDv:0000080": [
"MmusDv:0000080",
"MmusDv:0000075",
"MmusDv:0000097",
"MmusDv:0000110",
"MmusDv:0000037",
"MmusDv:0000092",
"MmusDv:0000001",
],
"MmusDv:0000081": [
"MmusDv:0000081",
"MmusDv:0000075",
"MmusDv:0000097",
"MmusDv:0000110",
"MmusDv:0000037",
"MmusDv:0000092",
"MmusDv:0000001",
],
"MmusDv:0000082": [
"MmusDv:0000082",
"MmusDv:0000075",
"MmusDv:0000097",
"MmusDv:0000110",
"MmusDv:0000037",
"MmusDv:0000092",
"MmusDv:0000001",
],
"MmusDv:0000083": [
"MmusDv:0000083",
"MmusDv:0000075",
"MmusDv:0000097",
"MmusDv:0000110",
"MmusDv:0000037",
"MmusDv:0000092",
"MmusDv:0000001",
],
"MmusDv:0000084": [
"MmusDv:0000084",
"MmusDv:0000075",
"MmusDv:0000097",
"MmusDv:0000110",
"MmusDv:0000037",
"MmusDv:0000092",
"MmusDv:0000001",
],
"MmusDv:0000085": [
"MmusDv:0000085",
"MmusDv:0000075",
"MmusDv:0000097",
"MmusDv:0000110",
"MmusDv:0000037",
"MmusDv:0000092",
"MmusDv:0000001",
],
"MmusDv:0000086": [
"MmusDv:0000086",
"MmusDv:0000076",
"MmusDv:0000097",
"MmusDv:0000110",
"MmusDv:0000037",
"MmusDv:0000092",
"MmusDv:0000001",
],
"MmusDv:0000087": [
"MmusDv:0000087",
"MmusDv:0000076",
"MmusDv:0000097",
"MmusDv:0000110",
"MmusDv:0000037",
"MmusDv:0000092",
"MmusDv:0000001",
],
"MmusDv:0000088": [
"MmusDv:0000088",
"MmusDv:0000076",
"MmusDv:0000097",
"MmusDv:0000110",
"MmusDv:0000037",
"MmusDv:0000092",
"MmusDv:0000001",
],
"MmusDv:0000089": [
"MmusDv:0000089",
"MmusDv:0000076",
"MmusDv:0000097",
"MmusDv:0000110",
"MmusDv:0000037",
"MmusDv:0000092",
"MmusDv:0000001",
],
"MmusDv:0000090": [
"MmusDv:0000090",
"MmusDv:0000076",
"MmusDv:0000097",
"MmusDv:0000110",
"MmusDv:0000037",
"MmusDv:0000092",
"MmusDv:0000001",
],
"MmusDv:0000091": [
"MmusDv:0000091",
"MmusDv:0000076",
"MmusDv:0000097",
"MmusDv:0000110",
"MmusDv:0000037",
"MmusDv:0000092",
"MmusDv:0000001",
],
"MmusDv:0000093": ["MmusDv:0000093", "MmusDv:0000017", "MmusDv:0000002", "MmusDv:0000042", "MmusDv:0000001"],
"MmusDv:0000111": ["MmusDv:0000111", "MmusDv:0000017", "MmusDv:0000002", "MmusDv:0000042", "MmusDv:0000001"],
"MmusDv:0000094": ["MmusDv:0000094", "MmusDv:0000017", "MmusDv:0000002", "MmusDv:0000042", "MmusDv:0000001"],
"MmusDv:0000098": [
"MmusDv:0000098",
"MmusDv:0000077",
"MmusDv:0000061",
"MmusDv:0000110",
"MmusDv:0000037",
"MmusDv:0000092",
"MmusDv:0000001",
],
"MmusDv:0000099": [
"MmusDv:0000099",
"MmusDv:0000077",
"MmusDv:0000061",
"MmusDv:0000110",
"MmusDv:0000037",
"MmusDv:0000092",
"MmusDv:0000001",
],
"MmusDv:0000100": [
"MmusDv:0000100",
"MmusDv:0000077",
"MmusDv:0000061",
"MmusDv:0000110",
"MmusDv:0000037",
"MmusDv:0000092",
"MmusDv:0000001",
],
"MmusDv:0000101": [
"MmusDv:0000101",
"MmusDv:0000077",
"MmusDv:0000061",
"MmusDv:0000110",
"MmusDv:0000037",
"MmusDv:0000092",
"MmusDv:0000001",
],
"MmusDv:0000102": [
"MmusDv:0000102",
"MmusDv:0000077",
"MmusDv:0000061",
"MmusDv:0000110",
"MmusDv:0000037",
"MmusDv:0000092",
"MmusDv:0000001",
],
"MmusDv:0000103": [
"MmusDv:0000103",
"MmusDv:0000014",
"MmusDv:0000013",
"MmusDv:0000002",
"MmusDv:0000042",
"MmusDv:0000001",
],
"MmusDv:0000104": [
"MmusDv:0000104",
"MmusDv:0000014",
"MmusDv:0000013",
"MmusDv:0000002",
"MmusDv:0000042",
"MmusDv:0000001",
],
"MmusDv:0000105": [
"MmusDv:0000105",
"MmusDv:0000015",
"MmusDv:0000013",
"MmusDv:0000002",
"MmusDv:0000042",
"MmusDv:0000001",
],
"MmusDv:0000106": [
"MmusDv:0000106",
"MmusDv:0000015",
"MmusDv:0000013",
"MmusDv:0000002",
"MmusDv:0000042",
"MmusDv:0000001",
],
"MmusDv:0000107": [
"MmusDv:0000107",
"MmusDv:0000015",
"MmusDv:0000013",
"MmusDv:0000002",
"MmusDv:0000042",
"MmusDv:0000001",
],
"MmusDv:0000108": [
"MmusDv:0000108",
"MmusDv:0000019",
"MmusDv:0000018",
"MmusDv:0000002",
"MmusDv:0000042",
"MmusDv:0000001",
],
"MmusDv:0000109": [
"MmusDv:0000109",
"MmusDv:0000019",
"MmusDv:0000018",
"MmusDv:0000002",
"MmusDv:0000042",
"MmusDv:0000001",
],
"UBERON:0007236": ["UBERON:0007236", "UBERON:0000107", "UBERON:0000068", "UBERON:0000104"],
"UBERON:0000106": ["UBERON:0000106", "UBERON:0000068", "UBERON:0000104", "UBERON:0000104"],
"UBERON:0014859": ["UBERON:0014859"],
"UBERON:0008264": ["UBERON:0008264"],
"UBERON:0007233": ["UBERON:0007233", "UBERON:0000107", "UBERON:0000068", "UBERON:0000104"],
"UBERON:0000112": ["UBERON:0000112", "UBERON:0000066", "UBERON:0000092", "UBERON:0000104"],
"UBERON:8000003": ["UBERON:8000003"],
"UBERON:0014857": ["UBERON:0014857", "UBERON:0018378"],
"UBERON:0009849": ["UBERON:0009849"],
"UBERON:0034920": ["UBERON:0034920", "UBERON:0018685", "UBERON:0000092", "UBERON:0000104"],
"UBERON:0000069": ["UBERON:0000069", "UBERON:0000092", "UBERON:0000104"],
"UBERON:0000109": ["UBERON:0000109", "UBERON:0000068", "UBERON:0000104"],
"UBERON:8000001": ["UBERON:8000001"],
"UBERON:0000068": ["UBERON:0000068", "UBERON:0000104"],
"UBERON:0018685": ["UBERON:0018685", "UBERON:0000092", "UBERON:0000104"],
"UBERON:0000107": ["UBERON:0000107", "UBERON:0000068", "UBERON:0000104"],
"UBERON:0007222": ["UBERON:0007222", "UBERON:0000113", "UBERON:0000066", "UBERON:0000092", "UBERON:0000104"],
"UBERON:0000092": ["UBERON:0000092", "UBERON:0000104"],
"UBERON:0018378": ["UBERON:0018378"],
"UBERON:0014864": ["UBERON:0014864"],
"UBERON:0004730": ["UBERON:0004730"],
"UBERON:0000111": ["UBERON:0000111", "UBERON:0000068", "UBERON:0000104"],
"UBERON:0007220": ["UBERON:0007220", "UBERON:0000068", "UBERON:0000104"],
"UBERON:0014405": ["UBERON:0014405"],
"UBERON:0014862": ["UBERON:0014862", "UBERON:0000069", "UBERON:0000092", "UBERON:0000104"],
"UBERON:8000000": ["UBERON:8000000"],
"UBERON:0000071": ["UBERON:0000071", "UBERON:0000104"],
"UBERON:0014860": ["UBERON:0014860", "UBERON:0018378"],
"UBERON:0012101": ["UBERON:0012101"],
"UBERON:0000113": ["UBERON:0000113", "UBERON:0000066", "UBERON:0000092", "UBERON:0000104"],
"UBERON:0014858": ["UBERON:0014858", "UBERON:0018378"],
"UBERON:0007232": ["UBERON:0007232", "UBERON:0000107", "UBERON:0000068", "UBERON:0000104"],
"UBERON:0000070": ["UBERON:0000070", "UBERON:0000092", "UBERON:0000104"],
"UBERON:0000110": ["UBERON:0000110", "UBERON:0000068", "UBERON:0000104"],
"UBERON:8000002": ["UBERON:8000002"],
"UBERON:0014856": ["UBERON:0014856", "UBERON:0000069", "UBERON:0000092", "UBERON:0000104"],
"UBERON:0004728": ["UBERON:0004728"],
"UBERON:0034919": ["UBERON:0034919", "UBERON:0000112", "UBERON:0000066", "UBERON:0000092", "UBERON:0000104"],
"UBERON:0000108": ["UBERON:0000108", "UBERON:0000068", "UBERON:0000104"],
"UBERON:0000066": ["UBERON:0000066", "UBERON:0000092", "UBERON:0000104"],
"UBERON:0004707": ["UBERON:0004707", "UBERON:0000111", "UBERON:0000068", "UBERON:0000104"],
"UBERON:0000105": ["UBERON:0000105"],
"UBERON:0018241": ["UBERON:0018241", "UBERON:0000113", "UBERON:0000066", "UBERON:0000092", "UBERON:0000104"],
"UBERON:0007221": ["UBERON:0007221", "UBERON:0000112", "UBERON:0000066", "UBERON:0000092", "UBERON:0000104"],
"UBERON:0014406": ["UBERON:0014406", "UBERON:0018378"],
"UBERON:0014863": ["UBERON:0014863", "UBERON:0000069", "UBERON:0000092", "UBERON:0000104"],
"UBERON:0004729": ["UBERON:0004729"],
"UBERON:0014861": ["UBERON:0014861", "UBERON:0018378"],
}
| 34.249848
| 120
| 0.566728
| 4,912
| 56,341
| 6.499796
| 0.062704
| 0.097723
| 0.142231
| 0.098537
| 0.762771
| 0.747737
| 0.747737
| 0.695587
| 0.695587
| 0.432173
| 0
| 0.401101
| 0.242026
| 56,341
| 1,644
| 121
| 34.270681
| 0.346517
| 0.002094
| 0
| 0.658953
| 1
| 0
| 0.609345
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
83324b3ff73837c9ee3759756fa81b0a8eccddc4
| 29
|
py
|
Python
|
app/__init__.py
|
wmcgee3/garden-brew-backend
|
37972b8335deb88b7ab9683fdccd0fc3d0e310c6
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
wmcgee3/garden-brew-backend
|
37972b8335deb88b7ab9683fdccd0fc3d0e310c6
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
wmcgee3/garden-brew-backend
|
37972b8335deb88b7ab9683fdccd0fc3d0e310c6
|
[
"MIT"
] | null | null | null |
from .main import app as app
| 14.5
| 28
| 0.758621
| 6
| 29
| 3.666667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.206897
| 29
| 1
| 29
| 29
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
36d38f5babfe135a1e0170ea890135c4c74be0c4
| 2,777
|
py
|
Python
|
src/resize_img_script.py
|
arthurlirui/refsepECCV2020
|
3a58d5ff2c908f6d30c1c1d9278d4b335fd65f42
|
[
"BSD-3-Clause"
] | 2
|
2021-04-26T21:04:49.000Z
|
2021-07-06T07:17:31.000Z
|
src/resize_img_script.py
|
arthurlirui/Multi-bouncePolarizationState
|
3a58d5ff2c908f6d30c1c1d9278d4b335fd65f42
|
[
"BSD-3-Clause"
] | null | null | null |
src/resize_img_script.py
|
arthurlirui/Multi-bouncePolarizationState
|
3a58d5ff2c908f6d30c1c1d9278d4b335fd65f42
|
[
"BSD-3-Clause"
] | 1
|
2022-03-04T08:45:27.000Z
|
2022-03-04T08:45:27.000Z
|
import torchvision.models
import torch.nn as nn
from torchvision.transforms import ToTensor, ToPILImage
import torch
from PIL import Image
import os
import torch.optim as optim
import random
import torchvision.utils as vutils
import torch.nn.functional as F
from torchvision.transforms.functional import center_crop
if __name__ == '__main__':
path = '/home/lir0b/Code/TransparenceDetection/draft_eccv/figure/ablation/row1'
import glob
import numpy as np
import cv2
#fl = glob.glob(os.path.join(path, '*.png'))
if False:
img0 = Image.open(os.path.join(path, 'LUCID_PHX050S-Q_190100163__20200225214309862_image0_0d.png'))
img1 = Image.open(os.path.join(path, 'LUCID_PHX050S-Q_190100163__20200225214309862_image0_45d.png'))
img2 = Image.open(os.path.join(path, 'LUCID_PHX050S-Q_190100163__20200225214309862_image0_90d.png'))
img3 = Image.open(os.path.join(path, 'LUCID_PHX050S-Q_190100163__20200225214309862_image0_135d.png'))
img0 = center_crop(img0, (1024, 1024))
img1 = center_crop(img1, (1024, 1024))
img2 = center_crop(img2, (1024, 1024))
img3 = center_crop(img3, (1024, 1024))
img0 = np.asarray(img0).astype(float)
img1 = np.asarray(img1).astype(float)
img2 = np.asarray(img2).astype(float)
img3 = np.asarray(img3).astype(float)
img_tot = 0.25*(img0+img1+img2+img3)
cv2.imwrite('/home/lir0b/Code/TransparenceDetection/draft_eccv/figure/ablation/row1/tot.png', img_tot)
if True:
img0 = Image.open(os.path.join(path, 'LUCID_PHX050S-Q_190100163__20200225214309862_image0_0d.png'))
img1 = Image.open(os.path.join(path, 'LUCID_PHX050S-Q_190100163__20200225214309862_image0_45d.png'))
img2 = Image.open(os.path.join(path, 'LUCID_PHX050S-Q_190100163__20200225214309862_image0_90d.png'))
img3 = Image.open(os.path.join(path, 'LUCID_PHX050S-Q_190100163__20200225214309862_image0_135d.png'))
img0 = center_crop(img0, (1024, 1024))
img1 = center_crop(img1, (1024, 1024))
img2 = center_crop(img2, (1024, 1024))
img3 = center_crop(img3, (1024, 1024))
img0 = np.asarray(img0).astype(float)
img1 = np.asarray(img1).astype(float)
img2 = np.asarray(img2).astype(float)
img3 = np.asarray(img3).astype(float)
img_tot = 0.25 * (img0 + img1 + img2 + img3)
cv2.imwrite('/home/lir0b/Code/TransparenceDetection/draft_eccv/figure/ablation/row1/tot.png', img_tot)
# for idx, f in enumerate(fl):
# print(f)
# img = Image.open(f)
# img = center_crop(img, (1024, 1024))
# img512 = img.resize((512, 512))
# filename = f.split('/')[-1]
# img512.save(os.path.join(path, str(idx+1)+'_rsz.png'))
| 44.079365
| 110
| 0.683831
| 380
| 2,777
| 4.802632
| 0.213158
| 0.054795
| 0.054795
| 0.076712
| 0.730411
| 0.730411
| 0.730411
| 0.730411
| 0.730411
| 0.696986
| 0
| 0.179124
| 0.185812
| 2,777
| 63
| 111
| 44.079365
| 0.628041
| 0.099748
| 0
| 0.608696
| 0
| 0
| 0.283193
| 0.279984
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.304348
| 0
| 0.304348
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
3d1aa3ea7efe9dd6a6c1cb2f1c327a20ff2029ee
| 118
|
py
|
Python
|
etudiant/admin.py
|
sandratraJovanie/torolalagna
|
5984b2ef0ff1537ae7ce2385306783ae7a1c15e0
|
[
"Apache-2.0"
] | null | null | null |
etudiant/admin.py
|
sandratraJovanie/torolalagna
|
5984b2ef0ff1537ae7ce2385306783ae7a1c15e0
|
[
"Apache-2.0"
] | null | null | null |
etudiant/admin.py
|
sandratraJovanie/torolalagna
|
5984b2ef0ff1537ae7ce2385306783ae7a1c15e0
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
from .models import *
admin.site.register(etudiants)
admin.site.register(relation)
| 16.857143
| 32
| 0.805085
| 16
| 118
| 5.9375
| 0.625
| 0.231579
| 0.357895
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.101695
| 118
| 6
| 33
| 19.666667
| 0.896226
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
3d2e47c799ec38a6f239e86c9e790c7938db75f6
| 38
|
py
|
Python
|
segmentation/ptsemseg/metrics/__init__.py
|
GT-RIPL/UNO-IC
|
6a95f2c6bc52ad80bfb1da53fd046a3d4db310d0
|
[
"MIT"
] | 24
|
2020-11-11T03:49:50.000Z
|
2022-03-21T04:23:32.000Z
|
segmentation/ptsemseg/metrics/__init__.py
|
GT-RIPL/UNO-IC
|
6a95f2c6bc52ad80bfb1da53fd046a3d4db310d0
|
[
"MIT"
] | 1
|
2021-07-15T02:46:34.000Z
|
2021-07-15T02:46:34.000Z
|
segmentation/ptsemseg/metrics/__init__.py
|
GT-RIPL/UNO-IC
|
6a95f2c6bc52ad80bfb1da53fd046a3d4db310d0
|
[
"MIT"
] | 2
|
2021-02-04T01:28:19.000Z
|
2021-02-25T09:20:27.000Z
|
from ptsemseg.metrics.metrics import *
| 38
| 38
| 0.842105
| 5
| 38
| 6.4
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078947
| 38
| 1
| 38
| 38
| 0.914286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3d5d9ae86591db9d1b2917601695f1127d7691d2
| 485
|
py
|
Python
|
lib/dom/__init__.py
|
zepheira/amara
|
d3ffe07d6e2266b34d72b012a82d572c8edbf1e7
|
[
"Apache-2.0"
] | 6
|
2015-01-30T03:50:36.000Z
|
2022-03-20T16:09:58.000Z
|
lib/dom/__init__.py
|
zepheira/amara
|
d3ffe07d6e2266b34d72b012a82d572c8edbf1e7
|
[
"Apache-2.0"
] | 2
|
2015-02-04T17:18:47.000Z
|
2019-09-27T23:39:52.000Z
|
lib/dom/__init__.py
|
zepheira/amara
|
d3ffe07d6e2266b34d72b012a82d572c8edbf1e7
|
[
"Apache-2.0"
] | 6
|
2015-02-04T16:16:18.000Z
|
2019-10-30T20:07:48.000Z
|
########################################################################
# amara/dom/__init__.py
"""
Old school W3C DOM...mostly
"""
import nodes
def parse(obj, uri=None, entity_factory=None, standalone=False, validate=False):
from amara import tree
if not entity_factory:
entity_factory = nodes.Document
return tree.parse(obj, uri, entity_factory=entity_factory, standalone=standalone, validate=validate)
#FIXME: Use proper L10N (gettext)
def _(t): return t
| 24.25
| 104
| 0.620619
| 57
| 485
| 5.105263
| 0.561404
| 0.223368
| 0.075601
| 0.178694
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007194
| 0.140206
| 485
| 19
| 105
| 25.526316
| 0.690647
| 0.169072
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.052632
| 0
| 1
| 0.285714
| false
| 0
| 0.285714
| 0.142857
| 0.714286
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
e9f8b22e6ebf5265be6723fff4df9000b2efa347
| 120
|
py
|
Python
|
Curso_de_Python_ Curso_em_Video/PythonTeste/operadoresAritmeticosEx001.py
|
DanilooSilva/Cursos_de_Python
|
8f167a4c6e16f01601e23b6f107578aa1454472d
|
[
"MIT"
] | null | null | null |
Curso_de_Python_ Curso_em_Video/PythonTeste/operadoresAritmeticosEx001.py
|
DanilooSilva/Cursos_de_Python
|
8f167a4c6e16f01601e23b6f107578aa1454472d
|
[
"MIT"
] | null | null | null |
Curso_de_Python_ Curso_em_Video/PythonTeste/operadoresAritmeticosEx001.py
|
DanilooSilva/Cursos_de_Python
|
8f167a4c6e16f01601e23b6f107578aa1454472d
|
[
"MIT"
] | null | null | null |
n = int(input('Digite um número '))
print('Seu sucessor é {}'.format(n + 1))
print('Seu antecessor é {}'.format(n - 1))
| 30
| 42
| 0.625
| 20
| 120
| 3.75
| 0.65
| 0.213333
| 0.213333
| 0.24
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019608
| 0.15
| 120
| 4
| 42
| 30
| 0.715686
| 0
| 0
| 0
| 0
| 0
| 0.438017
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.666667
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
18375dcff2c6dce1604d11cb29e9a7b72c5840f1
| 89
|
py
|
Python
|
ghri/commands/__init__.py
|
shawalli/ghri
|
0e6e908b60b98afa795b9ac169353dc9bcd2625f
|
[
"MIT"
] | 1
|
2018-11-19T21:01:39.000Z
|
2018-11-19T21:01:39.000Z
|
ghri/commands/__init__.py
|
shawalli/ghri
|
0e6e908b60b98afa795b9ac169353dc9bcd2625f
|
[
"MIT"
] | null | null | null |
ghri/commands/__init__.py
|
shawalli/ghri
|
0e6e908b60b98afa795b9ac169353dc9bcd2625f
|
[
"MIT"
] | null | null | null |
from ghri.commands.list import list_releases
from ghri.commands.show import show_release
| 29.666667
| 44
| 0.865169
| 14
| 89
| 5.357143
| 0.571429
| 0.213333
| 0.426667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.089888
| 89
| 2
| 45
| 44.5
| 0.925926
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
1838c7cfd499a62d29e952afac3004f1dc7c40d7
| 31
|
py
|
Python
|
Round1/string_analysis/__init__.py
|
NavneelSinghal/HCLHackIITK
|
91ceb865d1ff7c1ff109fbbbcfda8005d3b9cf93
|
[
"MIT"
] | null | null | null |
Round1/string_analysis/__init__.py
|
NavneelSinghal/HCLHackIITK
|
91ceb865d1ff7c1ff109fbbbcfda8005d3b9cf93
|
[
"MIT"
] | null | null | null |
Round1/string_analysis/__init__.py
|
NavneelSinghal/HCLHackIITK
|
91ceb865d1ff7c1ff109fbbbcfda8005d3b9cf93
|
[
"MIT"
] | null | null | null |
from .model import StringModel
| 15.5
| 30
| 0.83871
| 4
| 31
| 6.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129032
| 31
| 1
| 31
| 31
| 0.962963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
18453b8cb41459974a4317c20eabd6878b810ca9
| 199
|
py
|
Python
|
LeetCode/python3/398.py
|
ZintrulCre/LeetCode_Archiver
|
de23e16ead29336b5ee7aa1898a392a5d6463d27
|
[
"MIT"
] | 279
|
2019-02-19T16:00:32.000Z
|
2022-03-23T12:16:30.000Z
|
LeetCode/python3/398.py
|
ZintrulCre/LeetCode_Archiver
|
de23e16ead29336b5ee7aa1898a392a5d6463d27
|
[
"MIT"
] | 2
|
2019-03-31T08:03:06.000Z
|
2021-03-07T04:54:32.000Z
|
LeetCode/python3/398.py
|
ZintrulCre/LeetCode_Crawler
|
de23e16ead29336b5ee7aa1898a392a5d6463d27
|
[
"MIT"
] | 12
|
2019-01-29T11:45:32.000Z
|
2019-02-04T16:31:46.000Z
|
class Solution(object):
def __init__(self, nums):
self.nums = nums
def pick(self, target):
return random.choice([k for k, v in enumerate(self.nums) if v == target])
| 24.875
| 81
| 0.59799
| 28
| 199
| 4.107143
| 0.642857
| 0.208696
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.281407
| 199
| 8
| 81
| 24.875
| 0.804196
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
43f75b367d002498265e8e32d3ca6b1c5d62bf95
| 39
|
py
|
Python
|
commons/utils/loggers/__init__.py
|
ham2qur/paper_monolithic_microservices
|
0442dabe9e05d92b176257c111002ff688c4b3cb
|
[
"MIT"
] | 17
|
2018-08-07T03:59:19.000Z
|
2020-12-03T14:28:46.000Z
|
commons/utils/loggers/__init__.py
|
ham2qur/paper_monolithic_microservices
|
0442dabe9e05d92b176257c111002ff688c4b3cb
|
[
"MIT"
] | 3
|
2020-06-05T18:24:14.000Z
|
2021-06-10T20:28:20.000Z
|
commons/utils/loggers/__init__.py
|
shreybatra/Blog-O-Mania-Backend
|
8847e7b9c29c402b30d439294fc9deaf7005d0ce
|
[
"MIT"
] | 3
|
2018-08-11T18:17:24.000Z
|
2020-04-28T06:56:05.000Z
|
from .error_logger import error_logger
| 19.5
| 38
| 0.871795
| 6
| 39
| 5.333333
| 0.666667
| 0.6875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102564
| 39
| 1
| 39
| 39
| 0.914286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
a1020689953e81b3abea85f0a014e83e2528f727
| 4,008
|
py
|
Python
|
sql_modul_alchemy.py
|
SerjShepelevich/18--
|
442f9817648a2525da451511dfa36a5efdc3efc1
|
[
"MIT"
] | null | null | null |
sql_modul_alchemy.py
|
SerjShepelevich/18--
|
442f9817648a2525da451511dfa36a5efdc3efc1
|
[
"MIT"
] | null | null | null |
sql_modul_alchemy.py
|
SerjShepelevich/18--
|
442f9817648a2525da451511dfa36a5efdc3efc1
|
[
"MIT"
] | null | null | null |
class Sql_modul_alchemy():
# name TEXT, mid_salary INT, max_salary INT, min_salary INT, common_skills TEXT'
def __init__(self, db_name, **kwargs):
self.db_name = db_name
def create_db(self):
from sqlalchemy import create_engine, MetaData, Table
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import mapper, sessionmaker, clear_mappers
engine = create_engine(f'sqlite:///{self.db_name}', echo = False)
Base = declarative_base()
class Record(Base):
from sqlalchemy import Column, Integer, String, Float
__tablename__ = 'table'
id = Column(Integer, primary_key = True)
name = Column(String)
mid_salary = Column(Float)
max_salary = Column(Integer)
min_salary = Column(Integer)
common_skills = Column(String)
def __init__(self, name, mid_salary, max_salary, min_salary, common_skills):
self.name = name
self.mid_salary = mid_salary
self.max_salary = max_salary
self.min_salary = min_salary
self.common_skills = common_skills
def __str__(self):
return f'{self.id}, {self.name}, {self.mid_salary}, {self.max_salary}, {self.min_salary}, {self.common_skills}'
#Record(Base)
Base.metadata.create_all(engine)
Session = sessionmaker(bind = engine)
session = Session()
session.commit()
clear_mappers()
def insert_record(self, data):
from sqlalchemy import create_engine, MetaData, Table
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import mapper, sessionmaker, clear_mappers
engine = create_engine(f'sqlite:///{self.db_name}', echo = False)
metadata = MetaData(engine)
meta_param = Table('table',metadata,autoload = True)
clear_mappers()
mapper(Record, meta_param)
Session = sessionmaker(bind = engine)
session = Session()
session.add(Record(data[0], data[1], data[2], data[3], data[4]))
session.commit()
clear_mappers()
def loadSession(self):
from sqlalchemy import create_engine, MetaData, Table
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import mapper, sessionmaker, clear_mappers
engine = create_engine(f'sqlite:///{self.db_name}', echo = False)
clear_mappers()
metadata = MetaData(engine)
meta_param = Table('table', metadata, autoload = True)
mapper(Record, meta_param)
Session = sessionmaker(bind = engine)
session = Session()
return session
class Record(object):
from sqlalchemy import Column, Integer, String, Float
__tablename__ = 'table'
id = Column(Integer, primary_key = True)
name = Column(String)
mid_salary = Column(Float)
max_salary = Column(Integer)
min_salary = Column(Integer)
common_skills = Column(String)
def __init__(self, name, mid_salary, max_salary, min_salary, common_skills):
self.name = name
self.mid_salary = mid_salary
self.max_salary = max_salary
self.min_salary = min_salary
self.common_skills = common_skills
def __str__(self):
return f'{self.id}, {self.name}, {self.mid_salary}, {self.max_salary}, {self.min_salary}, {self.common_skills}'
def convert(self):
return [self.id, self.name, self.mid_salary, self.max_salary, self.min_salary, self.common_skills]
# data = ('python', 3454656 / 345, 150000, 65000, '<sdfswerweve', )
#
# Sql_modul_alchemy('alchemy.sqlite').create_db()
# Sql_modul_alchemy('alchemy.sqlite').insert_record(data)
# session = Sql_modul_alchemy('alchemy.sqlite').loadSession()
# records = session.query(Record).all()
#
# # for record in records:
# # print(record)
# rec = records[len(records)-1]
# print(rec.convert()[1])
| 38.538462
| 127
| 0.648703
| 475
| 4,008
| 5.225263
| 0.168421
| 0.060435
| 0.020145
| 0.034247
| 0.791297
| 0.737712
| 0.737712
| 0.714746
| 0.714746
| 0.714746
| 0
| 0.009259
| 0.245509
| 4,008
| 104
| 128
| 38.538462
| 0.811508
| 0.113523
| 0
| 0.815789
| 0
| 0.026316
| 0.083098
| 0.02035
| 0
| 0
| 0
| 0
| 0
| 1
| 0.118421
| false
| 0
| 0.144737
| 0.039474
| 0.447368
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a18b8abdb18dffc8c50774fd239ee58f76dc7f7a
| 2,546
|
py
|
Python
|
test/programytest/storage/stores/sql/store/test_rdfs.py
|
motazsaad/fit-bot-fb-clt
|
580477aa1ec91855b621d9ae276f2705962f6a87
|
[
"MIT"
] | 5
|
2018-08-21T00:13:45.000Z
|
2018-09-01T20:00:55.000Z
|
test/programytest/storage/stores/sql/store/test_rdfs.py
|
motazsaad/fit-bot-fb-clt
|
580477aa1ec91855b621d9ae276f2705962f6a87
|
[
"MIT"
] | 1
|
2018-09-12T18:30:17.000Z
|
2018-09-12T18:30:17.000Z
|
test/programytest/storage/stores/sql/store/test_rdfs.py
|
motazsaad/fit-bot-fb-clt
|
580477aa1ec91855b621d9ae276f2705962f6a87
|
[
"MIT"
] | 5
|
2018-08-21T00:08:36.000Z
|
2018-09-23T06:11:04.000Z
|
import unittest
from programytest.storage.asserts.store.assert_rdfs import RDFStoreAsserts
from programy.storage.stores.sql.store.rdfs import SQLRDFsStore
from programy.storage.stores.sql.engine import SQLStorageEngine
from programy.storage.stores.sql.config import SQLStorageConfiguration
import programytest.storage.engines as Engines
class SQLRDFsStoreTests(RDFStoreAsserts):
@unittest.skipIf(Engines.sql is False, Engines.sql_disabled)
def test_initialise(self):
config = SQLStorageConfiguration()
engine = SQLStorageEngine(config)
engine.initialise()
store = SQLRDFsStore(engine)
self.assertEqual(store.storage_engine, engine)
@unittest.skipIf(Engines.sql is False, Engines.sql_disabled)
def test_rdf_storage(self):
config = SQLStorageConfiguration()
engine = SQLStorageEngine(config)
engine.initialise()
store = SQLRDFsStore(engine)
self.assert_rdf_storage(store)
@unittest.skipIf(Engines.sql is False, Engines.sql_disabled)
def test_upload_from_text(self):
config = SQLStorageConfiguration()
engine = SQLStorageEngine(config)
engine.initialise()
store = SQLRDFsStore(engine)
self.assert_upload_from_text(store)
@unittest.skipIf(Engines.sql is False, Engines.sql_disabled)
def test_upload_from_text_file(self):
config = SQLStorageConfiguration()
engine = SQLStorageEngine(config)
engine.initialise()
store = SQLRDFsStore(engine)
self.assert_upload_from_text_file(store)
@unittest.skipIf(Engines.sql is False, Engines.sql_disabled)
def test_upload_text_files_from_directory_no_subdir(self):
config = SQLStorageConfiguration()
engine = SQLStorageEngine(config)
engine.initialise()
store = SQLRDFsStore(engine)
self.assert_upload_text_files_from_directory_no_subdir(store)
@unittest.skip("CSV not supported yet")
def test_upload_from_csv_file(self):
config = SQLStorageConfiguration()
engine = SQLStorageEngine(config)
engine.initialise()
store = SQLRDFsStore(engine)
self.assert_upload_from_csv_file(store)
@unittest.skip("CSV not supported yet")
def test_upload_csv_files_from_directory_with_subdir(self):
config = SQLStorageConfiguration()
engine = SQLStorageEngine(config)
engine.initialise()
store = SQLRDFsStore(engine)
self.assert_upload_csv_files_from_directory_with_subdir(store)
| 33.946667
| 74
| 0.727023
| 272
| 2,546
| 6.577206
| 0.165441
| 0.055897
| 0.129122
| 0.152599
| 0.812186
| 0.765232
| 0.765232
| 0.703745
| 0.703745
| 0.703745
| 0
| 0
| 0.195601
| 2,546
| 74
| 75
| 34.405405
| 0.873535
| 0
| 0
| 0.625
| 0
| 0
| 0.016496
| 0
| 0
| 0
| 0
| 0
| 0.160714
| 1
| 0.125
| false
| 0
| 0.107143
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a1cd708163db9363862e1a10c5506331eb44cf08
| 33,444
|
py
|
Python
|
1.experimental_data/IMAS_data/TCV2IMAS_forward_field.py
|
dsoliveir/TCV-X21
|
784c55adb33417e21a6736e2504a3895a9348dbe
|
[
"CC-BY-4.0"
] | 1
|
2021-12-13T11:52:39.000Z
|
2021-12-13T11:52:39.000Z
|
1.experimental_data/IMAS_data/TCV2IMAS_forward_field.py
|
dsoliveir/TCV-X21
|
784c55adb33417e21a6736e2504a3895a9348dbe
|
[
"CC-BY-4.0"
] | 2
|
2021-12-18T17:18:52.000Z
|
2022-01-26T09:23:23.000Z
|
1.experimental_data/IMAS_data/TCV2IMAS_forward_field.py
|
dsoliveir/TCV-X21
|
784c55adb33417e21a6736e2504a3895a9348dbe
|
[
"CC-BY-4.0"
] | 2
|
2021-12-13T12:56:09.000Z
|
2022-01-25T20:30:28.000Z
|
import imas
from imas import imasdef
import numpy as np
from netCDF4 import Dataset
# Mapping the TCV-X21 dataset to IMAS format
# Author : F. Imbeaux, 2021
# General description of the dataset --> dataset_description IDS, occurrence 0
# Diagnostic LFS-LP --> langmuir_probes IDS, occurrence 0
# Diagnostic HFS-LP --> langmuir_probes IDS, occurrence 1
# Diagnostic LFS-IR --> camera_ir IDS, occurrence 0
# Diagnostic FHRP --> langmuir_probes IDS, occurrence 2
# Diagnostic RPDA --> langmuir_probes IDS, occurrence 3
# Diagnostic TS --> thomson_scattering IDS, occurrence 0
# Open the original netCDF files
tcv_data = Dataset("./TCV_forward_field.nc")
# ne = tcv_data['LFS-LP/observables/density']
# print(ne['value'][:])
# print(ne['Rsep_omp'][:]) # Caution, Rsep units are cm !
# plt.errorbar(jsat_lfs['Rsep_omp'][:], jsat_lfs['value'][:], jsat_lfs['error'][:])
# Create the output file
pulse = 10
run = 0
imas_entry = imas.DBEntry(imasdef.HDF5_BACKEND, "tcv", pulse, run)
imas_entry.create()
##################### Dataset description
dd = imas.dataset_description()
dd.ids_properties.homogeneous_time = 1
dd.ids_properties.comment = (
"TCV-X21 dataset, for forward field. Data has been processed over multiple pulses and time slices, and mapped onto the distance to separatrix at outboard midplane Rsep_omp (distance_separatrix_midplane in IMAS). Due to this process, langmuir probes array indices in IMAS don"
"t correspond to real probes but rather to a given Rsep_omp position of measurement collected over this multi-pulse dataset. Only some physical quantities for Langmuir probes are processed at a given position, e.g. electron density is not recorded at the same Rsep_omp positions as the saturation current, so they are recorded in different indices of the "
"embedded"
" or of the "
"reciprocating"
" array of structure in IMAS"
)
dd.ids_properties.source = "TCV_forward_field.nc"
dd.ids_properties.provider = "F. Imbeaux (for the IMAS conversion)"
dd.dd_version = "3.33.0"
dd.time = np.array([0.0]) # Time has no meaning for this IDS
# IDS variable is filled, we write it now to the data entry
imas_entry.put(dd, 0)
##################### LFS-LP data
lfs_lp = imas.langmuir_probes()
lfs_lp.ids_properties.homogeneous_time = 1
lfs_lp.ids_properties.comment = tcv_data["LFS-LP"].diagnostic_name
lfs_lp.ids_properties.provider = "F. Imbeaux (for the IMAS conversion)"
lfs_lp.time = np.array(
[0.0]
) # Time has no meaning for this dataset which is processed over several pulses and time slices
# midplane definition
lfs_lp.midplane.name = "dr_dz_zero_sep"
lfs_lp.midplane.index = 2
lfs_lp.midplane.description = "Midplane defined by the height of the outboard point on the separatrix on which dr/dz = 0 (local maximum of the major radius of the separatrix). In case of multiple local maxima, the closest one from z=z_magnetic_axis is chosen. equilibrium/time_slice/boundary_separatrix/dr_dz_zero_point/z"
# Different physical data have been gathered during different group of TCV pulses, therefore they are also measured at different locations. Typically : density is not measured at the same Rsep_omp positions as jsat. For each set of measurement positions, we create a set of indices in the "embedded" AoS, since this one assumes that a given probe is located at given position
# Get number of channels for density/electron_temp/potential group
channels_n = len(tcv_data["LFS-LP/observables/density/value"])
# Get number of channels for current/current_std group
channels_c = len(tcv_data["LFS-LP/observables/current/value"])
# Get number of channels for jsat/jsat_std/jsat_skew/jsat_kurtosis group
channels_j = len(tcv_data["LFS-LP/observables/jsat/value"])
# Get number of channels for vfloat/vfloat_std group
channels_v = len(tcv_data["LFS-LP/observables/vfloat/value"])
lfs_lp.embedded.resize(channels_n + channels_c + channels_j + channels_v)
for channel in range(channels_n):
print(channel)
# Positions
lfs_lp.embedded[channel].distance_separatrix_midplane.data = np.array(
[tcv_data["LFS-LP/observables/density/Rsep_omp"][channel] / 100.0]
)
# Electron density
lfs_lp.embedded[channel].n_e.data = np.array(
[tcv_data["LFS-LP/observables/density/value"][channel]]
)
lfs_lp.embedded[channel].n_e.data_error_upper = np.array(
[tcv_data["LFS-LP/observables/density/error"][channel]]
)
# Electron temperature
lfs_lp.embedded[channel].t_e.data = np.array(
[tcv_data["LFS-LP/observables/electron_temp/value"][channel]]
)
lfs_lp.embedded[channel].t_e.data_error_upper = np.array(
[tcv_data["LFS-LP/observables/electron_temp/error"][channel]]
)
# Plasma potential
lfs_lp.embedded[channel].v_plasma.data = np.array(
[tcv_data["LFS-LP/observables/potential/value"][channel]]
)
lfs_lp.embedded[channel].v_plasma.data_error_upper = np.array(
[tcv_data["LFS-LP/observables/potential/error"][channel]]
)
for channel in range(channels_n, channels_n + channels_c):
print(channel)
# Positions
lfs_lp.embedded[channel].distance_separatrix_midplane.data = np.array(
[tcv_data["LFS-LP/observables/current/Rsep_omp"][channel - channels_n] / 100.0]
)
# Parallel current density
lfs_lp.embedded[channel].j_i_parallel.data = np.array(
[tcv_data["LFS-LP/observables/current/value"][channel - channels_n]]
)
lfs_lp.embedded[channel].j_i_parallel.data_error_upper = np.array(
[tcv_data["LFS-LP/observables/current/error"][channel - channels_n]]
)
# Parallel current density standard deviation
lfs_lp.embedded[channel].j_i_parallel_sigma.data = np.array(
[tcv_data["LFS-LP/observables/current_std/value"][channel - channels_n]]
)
lfs_lp.embedded[channel].j_i_parallel_sigma.data_error_upper = np.array(
[tcv_data["LFS-LP/observables/current_std/error"][channel - channels_n]]
)
for channel in range(channels_n + channels_c, channels_n + channels_c + channels_j):
print(channel)
# Positions
lfs_lp.embedded[channel].distance_separatrix_midplane.data = np.array(
[
tcv_data["LFS-LP/observables/jsat/Rsep_omp"][
channel - channels_n - channels_c
]
/ 100.0
]
)
# Ion saturation current density
lfs_lp.embedded[channel].j_i_saturation.data = np.array(
[tcv_data["LFS-LP/observables/jsat/value"][channel - channels_n - channels_c]]
)
lfs_lp.embedded[channel].j_i_saturation.data_error_upper = np.array(
[tcv_data["LFS-LP/observables/jsat/error"][channel - channels_n - channels_c]]
)
# Ion saturation current density standard deviation
lfs_lp.embedded[channel].j_i_saturation_sigma.data = np.array(
[
tcv_data["LFS-LP/observables/jsat_std/value"][
channel - channels_n - channels_c
]
]
)
lfs_lp.embedded[channel].j_i_saturation_sigma.data_error_upper = np.array(
[
tcv_data["LFS-LP/observables/jsat_std/error"][
channel - channels_n - channels_c
]
]
)
# Ion saturation current density skew
lfs_lp.embedded[channel].j_i_saturation_skew.data = np.array(
[
tcv_data["LFS-LP/observables/jsat_skew/value"][
channel - channels_n - channels_c
]
]
)
lfs_lp.embedded[channel].j_i_saturation_skew.data_error_upper = np.array(
[
tcv_data["LFS-LP/observables/jsat_skew/error"][
channel - channels_n - channels_c
]
]
)
# Ion saturation current density kurtosis
lfs_lp.embedded[channel].j_i_saturation_kurtosis.data = np.array(
[
tcv_data["LFS-LP/observables/jsat_kurtosis/value"][
channel - channels_n - channels_c
]
]
)
lfs_lp.embedded[channel].j_i_saturation_kurtosis.data_error_upper = np.array(
[
tcv_data["LFS-LP/observables/jsat_kurtosis/error"][
channel - channels_n - channels_c
]
]
)
for channel in range(
channels_n + channels_c + channels_j,
channels_n + channels_c + channels_j + channels_v,
):
print(channel)
# Positions
lfs_lp.embedded[channel].distance_separatrix_midplane.data = np.array(
[
tcv_data["LFS-LP/observables/vfloat/Rsep_omp"][
channel - channels_n - channels_c - channels_j
]
/ 100.0
]
)
# Floating potential
lfs_lp.embedded[channel].v_floating.data = np.array(
[
tcv_data["LFS-LP/observables/vfloat/value"][
channel - channels_n - channels_c - channels_j
]
]
)
lfs_lp.embedded[channel].v_floating.data_error_upper = np.array(
[
tcv_data["LFS-LP/observables/vfloat/error"][
channel - channels_n - channels_c - channels_j
]
]
)
# Floating potential standard deviation
lfs_lp.embedded[channel].v_floating_sigma.data = np.array(
[
tcv_data["LFS-LP/observables/vfloat_std/value"][
channel - channels_n - channels_c - channels_j
]
]
)
lfs_lp.embedded[channel].v_floating_sigma.data_error_upper = np.array(
[
tcv_data["LFS-LP/observables/vfloat_std/error"][
channel - channels_n - channels_c - channels_j
]
]
)
# IDS variable is filled, we write it now to the data entry
imas_entry.put(lfs_lp, 0)
##################### HFS-LP data
hfs_lp = imas.langmuir_probes()
hfs_lp.ids_properties.homogeneous_time = 1
hfs_lp.ids_properties.comment = tcv_data["HFS-LP"].diagnostic_name
hfs_lp.ids_properties.provider = "F. Imbeaux (for the IMAS conversion)"
hfs_lp.time = np.array(
[0.0]
) # Time has no meaning for this dataset which is processed over several pulses and time slices
# midplane definition
hfs_lp.midplane.name = "dr_dz_zero_sep"
hfs_lp.midplane.index = 2
hfs_lp.midplane.description = "Midplane defined by the height of the outboard point on the separatrix on which dr/dz = 0 (local maximum of the major radius of the separatrix). In case of multiple local maxima, the closest one from z=z_magnetic_axis is chosen. equilibrium/time_slice/boundary_separatrix/dr_dz_zero_point/z"
# Different physical data have been gathered during different group of TCV pulses, therefore they are also measured at different locations. Typically : density is not measured at the same positions as jsat. For each set of measurement positions, we create a set of indices in the "embedded" AoS, since this one assumes that a given probe is located at given position
# Get number of channels for density/electron_temp/potential group
channels_n = len(tcv_data["HFS-LP/observables/density/value"])
# Get number of channels for current/current_std group
channels_c = len(tcv_data["HFS-LP/observables/current/value"])
# Get number of channels for jsat/jsat_std/jsat_skew/jsat_kurtosis group
channels_j = len(tcv_data["HFS-LP/observables/jsat/value"])
# Get number of channels for vfloat/vfloat_std group
channels_v = len(tcv_data["HFS-LP/observables/vfloat/value"])
hfs_lp.embedded.resize(channels_n + channels_c + channels_j + channels_v)
for channel in range(channels_n):
print(channel)
# Positions
hfs_lp.embedded[channel].distance_separatrix_midplane.data = np.array(
[tcv_data["HFS-LP/observables/density/Rsep_omp"][channel] / 100.0]
)
# Electron density
hfs_lp.embedded[channel].n_e.data = np.array(
[tcv_data["HFS-LP/observables/density/value"][channel]]
)
hfs_lp.embedded[channel].n_e.data_error_upper = np.array(
[tcv_data["HFS-LP/observables/density/error"][channel]]
)
# Electron temperature
hfs_lp.embedded[channel].t_e.data = np.array(
[tcv_data["HFS-LP/observables/electron_temp/value"][channel]]
)
hfs_lp.embedded[channel].t_e.data_error_upper = np.array(
[tcv_data["HFS-LP/observables/electron_temp/error"][channel]]
)
# Plasma potential
hfs_lp.embedded[channel].v_plasma.data = np.array(
[tcv_data["HFS-LP/observables/potential/value"][channel]]
)
hfs_lp.embedded[channel].v_plasma.data_error_upper = np.array(
[tcv_data["HFS-LP/observables/potential/error"][channel]]
)
for channel in range(channels_n, channels_n + channels_c):
print(channel)
# Positions
hfs_lp.embedded[channel].distance_separatrix_midplane.data = np.array(
[tcv_data["HFS-LP/observables/current/Rsep_omp"][channel - channels_n] / 100.0]
)
# Parallel current density
hfs_lp.embedded[channel].j_i_parallel.data = np.array(
[tcv_data["HFS-LP/observables/current/value"][channel - channels_n]]
)
hfs_lp.embedded[channel].j_i_parallel.data_error_upper = np.array(
[tcv_data["HFS-LP/observables/current/error"][channel - channels_n]]
)
# Parallel current density standard deviation
hfs_lp.embedded[channel].j_i_parallel_sigma.data = np.array(
[tcv_data["HFS-LP/observables/current_std/value"][channel - channels_n]]
)
hfs_lp.embedded[channel].j_i_parallel_sigma.data_error_upper = np.array(
[tcv_data["HFS-LP/observables/current_std/error"][channel - channels_n]]
)
for channel in range(channels_n + channels_c, channels_n + channels_c + channels_j):
print(channel)
# Positions
hfs_lp.embedded[channel].distance_separatrix_midplane.data = np.array(
[
tcv_data["HFS-LP/observables/jsat/Rsep_omp"][
channel - channels_n - channels_c
]
/ 100.0
]
)
# Ion saturation current density
hfs_lp.embedded[channel].j_i_saturation.data = np.array(
[tcv_data["HFS-LP/observables/jsat/value"][channel - channels_n - channels_c]]
)
hfs_lp.embedded[channel].j_i_saturation.data_error_upper = np.array(
[tcv_data["HFS-LP/observables/jsat/error"][channel - channels_n - channels_c]]
)
# Ion saturation current density standard deviation
hfs_lp.embedded[channel].j_i_saturation_sigma.data = np.array(
[
tcv_data["HFS-LP/observables/jsat_std/value"][
channel - channels_n - channels_c
]
]
)
hfs_lp.embedded[channel].j_i_saturation_sigma.data_error_upper = np.array(
[
tcv_data["HFS-LP/observables/jsat_std/error"][
channel - channels_n - channels_c
]
]
)
# Ion saturation current density skew
hfs_lp.embedded[channel].j_i_saturation_skew.data = np.array(
[
tcv_data["HFS-LP/observables/jsat_skew/value"][
channel - channels_n - channels_c
]
]
)
hfs_lp.embedded[channel].j_i_saturation_skew.data_error_upper = np.array(
[
tcv_data["HFS-LP/observables/jsat_skew/error"][
channel - channels_n - channels_c
]
]
)
# Ion saturation current density kurtosis
hfs_lp.embedded[channel].j_i_saturation_kurtosis.data = np.array(
[
tcv_data["HFS-LP/observables/jsat_kurtosis/value"][
channel - channels_n - channels_c
]
]
)
hfs_lp.embedded[channel].j_i_saturation_kurtosis.data_error_upper = np.array(
[
tcv_data["HFS-LP/observables/jsat_kurtosis/error"][
channel - channels_n - channels_c
]
]
)
for channel in range(
channels_n + channels_c + channels_j,
channels_n + channels_c + channels_j + channels_v,
):
print(channel)
# Positions
hfs_lp.embedded[channel].distance_separatrix_midplane.data = np.array(
[
tcv_data["HFS-LP/observables/vfloat/Rsep_omp"][
channel - channels_n - channels_c - channels_j
]
/ 100.0
]
)
# Floating potential
hfs_lp.embedded[channel].v_floating.data = np.array(
[
tcv_data["HFS-LP/observables/vfloat/value"][
channel - channels_n - channels_c - channels_j
]
]
)
hfs_lp.embedded[channel].v_floating.data_error_upper = np.array(
[
tcv_data["HFS-LP/observables/vfloat/error"][
channel - channels_n - channels_c - channels_j
]
]
)
# Floating potential standard deviation
hfs_lp.embedded[channel].v_floating_sigma.data = np.array(
[
tcv_data["HFS-LP/observables/vfloat_std/value"][
channel - channels_n - channels_c - channels_j
]
]
)
hfs_lp.embedded[channel].v_floating_sigma.data_error_upper = np.array(
[
tcv_data["HFS-LP/observables/vfloat_std/error"][
channel - channels_n - channels_c - channels_j
]
]
)
# IDS variable is filled, we write it now to the data entry
imas_entry.put(hfs_lp, 1)
##################### LFS_IR data
lfs_ir = imas.camera_ir()
lfs_ir.ids_properties.homogeneous_time = 1
lfs_ir.ids_properties.comment = tcv_data["LFS-IR"].diagnostic_name
lfs_ir.ids_properties.provider = "F. Imbeaux (for the IMAS conversion)"
lfs_ir.time = np.array(
[0.0]
) # Time has no meaning for this dataset which is processed over several pulses and time slices
# midplane definition
lfs_ir.midplane.name = "dr_dz_zero_sep"
lfs_ir.midplane.index = 2
lfs_ir.midplane.description = "Midplane defined by the height of the outboard point on the separatrix on which dr/dz = 0 (local maximum of the major radius of the separatrix). In case of multiple local maxima, the closest one from z=z_magnetic_axis is chosen. equilibrium/time_slice/boundary_separatrix/dr_dz_zero_point/z"
lfs_ir.frame_analysis.resize(1) # 1 time slice
# Position
lfs_ir.frame_analysis[0].distance_separatrix_midplane = np.array(
tcv_data["LFS-IR/observables/q_parallel/Rsep_omp"][:] / 100.0
)
lfs_ir.frame_analysis[0].power_flux_parallel = np.array(
tcv_data["LFS-IR/observables/q_parallel/value"]
)
lfs_ir.frame_analysis[0].power_flux_parallel_error_upper = np.array(
tcv_data["LFS-IR/observables/q_parallel/error"]
)
# IDS variable is filled, we write it now to the data entry
imas_entry.put(lfs_ir, 0)
##################### FHRP data
fhrp = imas.langmuir_probes()
fhrp.ids_properties.homogeneous_time = 1
fhrp.ids_properties.comment = tcv_data["FHRP"].diagnostic_name
fhrp.ids_properties.provider = "F. Imbeaux (for the IMAS conversion)"
fhrp.time = np.array(
[0.0]
) # Time has no meaning for this dataset which is processed over several pulses and time slices
# midplane definition
fhrp.midplane.name = "dr_dz_zero_sep"
fhrp.midplane.index = 2
fhrp.midplane.description = "Midplane defined by the height of the outboard point on the separatrix on which dr/dz = 0 (local maximum of the major radius of the separatrix). In case of multiple local maxima, the closest one from z=z_magnetic_axis is chosen. equilibrium/time_slice/boundary_separatrix/dr_dz_zero_point/z"
# Different physical data have been gathered during different group of TCV pulses, therefore they are also measured at different locations. Typically : density is not measured at the same positions as jsat. For each set of measurement positions, we create a set of indices in the "reciprocating" AoS, since this one assumes that a given probe is located at given position
# Get number of channels for density/electron_temp/potential group
channels_n = len(tcv_data["FHRP/observables/density/value"])
# Get number of channels for jsat/jsat_std/jsat_skew/jsat_kurtosis/vfloat/vfloat_std/mach_number group
channels_j = len(tcv_data["FHRP/observables/jsat/value"])
fhrp.reciprocating.resize(channels_n + channels_j)
for channel in range(channels_n):
print(channel)
fhrp.reciprocating[channel].plunge.resize(1)
fhrp.reciprocating[channel].plunge[0].collector.resize(1)
# Positions
fhrp.reciprocating[channel].plunge[0].distance_separatrix_midplane.data = np.array(
[tcv_data["FHRP/observables/density/Rsep_omp"][channel] / 100.0]
)
# Electron density
fhrp.reciprocating[channel].plunge[0].n_e.data = np.array(
[tcv_data["FHRP/observables/density/value"][channel]]
)
fhrp.reciprocating[channel].plunge[0].n_e.data_error_upper = np.array(
[tcv_data["FHRP/observables/density/error"][channel]]
)
# Electron temperature
fhrp.reciprocating[channel].plunge[0].collector[0].t_e.data = np.array(
[tcv_data["FHRP/observables/electron_temp/value"][channel]]
)
fhrp.reciprocating[channel].plunge[0].collector[0].t_e.data_error_upper = np.array(
[tcv_data["FHRP/observables/electron_temp/error"][channel]]
)
# Plasma potential
fhrp.reciprocating[channel].plunge[0].v_plasma.data = np.array(
[tcv_data["FHRP/observables/potential/value"][channel]]
)
fhrp.reciprocating[channel].plunge[0].v_plasma.data_error_upper = np.array(
[tcv_data["FHRP/observables/potential/error"][channel]]
)
for channel in range(channels_n, channels_n + channels_j):
print(channel)
fhrp.reciprocating[channel].plunge.resize(1)
fhrp.reciprocating[channel].plunge[0].collector.resize(1)
# Positions
fhrp.reciprocating[channel].plunge[0].distance_separatrix_midplane.data = np.array(
[tcv_data["FHRP/observables/jsat/Rsep_omp"][channel - channels_n] / 100.0]
)
# Ion saturation current density
fhrp.reciprocating[channel].plunge[0].collector[0].j_i_saturation.data = np.array(
[tcv_data["FHRP/observables/jsat/value"][channel - channels_n]]
)
fhrp.reciprocating[channel].plunge[0].collector[
0
].j_i_saturation.data_error_upper = np.array(
[tcv_data["FHRP/observables/jsat/error"][channel - channels_n]]
)
# Ion saturation current density standard deviation
fhrp.reciprocating[channel].plunge[0].collector[0].j_i_sigma.data = np.array(
[tcv_data["FHRP/observables/jsat_std/value"][channel - channels_n]]
)
fhrp.reciprocating[channel].plunge[0].collector[
0
].j_i_sigma.data_error_upper = np.array(
[tcv_data["FHRP/observables/jsat_std/error"][channel - channels_n]]
)
# Ion saturation current density skew
fhrp.reciprocating[channel].plunge[0].collector[0].j_i_skew.data = np.array(
[tcv_data["FHRP/observables/jsat_skew/value"][channel - channels_n]]
)
fhrp.reciprocating[channel].plunge[0].collector[
0
].j_i_skew.data_error_upper = np.array(
[tcv_data["FHRP/observables/jsat_skew/error"][channel - channels_n]]
)
# Ion saturation current density kurtosis
fhrp.reciprocating[channel].plunge[0].collector[0].j_i_kurtosis.data = np.array(
[tcv_data["FHRP/observables/jsat_kurtosis/value"][channel - channels_n]]
)
fhrp.reciprocating[channel].plunge[0].collector[
0
].j_i_kurtosis.data_error_upper = np.array(
[tcv_data["FHRP/observables/jsat_kurtosis/error"][channel - channels_n]]
)
# Floating potential
fhrp.reciprocating[channel].plunge[0].collector[0].v_floating.data = np.array(
[tcv_data["FHRP/observables/vfloat/value"][channel - channels_n]]
)
fhrp.reciprocating[channel].plunge[0].collector[
0
].v_floating.data_error_upper = np.array(
[tcv_data["FHRP/observables/vfloat/error"][channel - channels_n]]
)
# Floating potential standard deviation
fhrp.reciprocating[channel].plunge[0].collector[0].v_floating_sigma.data = np.array(
[tcv_data["FHRP/observables/vfloat_std/value"][channel - channels_n]]
)
fhrp.reciprocating[channel].plunge[0].collector[
0
].v_floating_sigma.data_error_upper = np.array(
[tcv_data["FHRP/observables/vfloat_std/error"][channel - channels_n]]
)
# Mach number
fhrp.reciprocating[channel].plunge[0].mach_number_parallel.data = np.array(
[tcv_data["FHRP/observables/mach_number/value"][channel - channels_n]]
)
fhrp.reciprocating[channel].plunge[
0
].mach_number_parallel.data_error_upper = np.array(
[tcv_data["FHRP/observables/mach_number/error"][channel - channels_n]]
)
# IDS variable is filled, we write it now to the data entry
imas_entry.put(fhrp, 2)
##################### RDPA data
rdpa = imas.langmuir_probes()
rdpa.ids_properties.homogeneous_time = 1
rdpa.ids_properties.comment = tcv_data["RDPA"].diagnostic_name
rdpa.ids_properties.provider = "F. Imbeaux (for the IMAS conversion)"
rdpa.time = np.array(
[0.0]
) # Time has no meaning for this dataset which is processed over several pulses and time slices
# midplane definition
rdpa.midplane.name = "dr_dz_zero_sep"
rdpa.midplane.index = 2
rdpa.midplane.description = "Midplane defined by the height of the outboard point on the separatrix on which dr/dz = 0 (local maximum of the major radius of the separatrix). In case of multiple local maxima, the closest one from z=z_magnetic_axis is chosen. equilibrium/time_slice/boundary_separatrix/dr_dz_zero_point/z"
# Different physical data have been gathered during different group of TCV pulses, therefore they are also measured at different locations. Typically : density is not measured at the same positions as jsat. For each set of measurement positions, we create a set of indices in the "reciprocating" AoS, since this one assumes that a given probe is located at given position
# Get number of channels for density/electron_temp/potential/mach_number group
channels_n = len(tcv_data["RDPA/observables/density/value"])
# Get number of channels for jsat/jsat_std/jsat_skew/jsat_kurtosis/vfloat/vfloat_std/mach_number group
channels_j = len(tcv_data["RDPA/observables/jsat/value"])
# Get number of channels for vfloat/vfloat_std group
channels_v = len(tcv_data["RDPA/observables/vfloat/value"])
rdpa.reciprocating.resize(channels_n + channels_j + channels_v)
for channel in range(channels_n):
print(channel)
rdpa.reciprocating[channel].plunge.resize(1)
rdpa.reciprocating[channel].plunge[0].collector.resize(1)
# Positions
rdpa.reciprocating[channel].plunge[0].distance_separatrix_midplane.data = np.array(
[tcv_data["RDPA/observables/density/Rsep_omp"][channel] / 100.0]
)
rdpa.reciprocating[channel].plunge[0].distance_x_point_z.data = np.array(
[tcv_data["RDPA/observables/density/Zx"][channel]]
)
# Electron density
rdpa.reciprocating[channel].plunge[0].n_e.data = np.array(
[tcv_data["RDPA/observables/density/value"][channel]]
)
rdpa.reciprocating[channel].plunge[0].n_e.data_error_upper = np.array(
[tcv_data["RDPA/observables/density/error"][channel]]
)
# Electron temperature
rdpa.reciprocating[channel].plunge[0].collector[0].t_e.data = np.array(
[tcv_data["RDPA/observables/electron_temp/value"][channel]]
)
rdpa.reciprocating[channel].plunge[0].collector[0].t_e.data_error_upper = np.array(
[tcv_data["RDPA/observables/electron_temp/error"][channel]]
)
# Plasma potential
rdpa.reciprocating[channel].plunge[0].v_plasma.data = np.array(
[tcv_data["RDPA/observables/potential/value"][channel]]
)
rdpa.reciprocating[channel].plunge[0].v_plasma.data_error_upper = np.array(
[tcv_data["RDPA/observables/potential/error"][channel]]
)
# Mach number
rdpa.reciprocating[channel].plunge[0].mach_number_parallel.data = np.array(
[tcv_data["RDPA/observables/mach_number/value"][channel]]
)
rdpa.reciprocating[channel].plunge[
0
].mach_number_parallel.data_error_upper = np.array(
[tcv_data["RDPA/observables/mach_number/error"][channel]]
)
for channel in range(channels_n, channels_n + channels_j):
print(channel)
rdpa.reciprocating[channel].plunge.resize(1)
rdpa.reciprocating[channel].plunge[0].collector.resize(1)
# Positions
rdpa.reciprocating[channel].plunge[0].distance_separatrix_midplane.data = np.array(
[tcv_data["RDPA/observables/jsat/Rsep_omp"][channel - channels_n] / 100.0]
)
rdpa.reciprocating[channel].plunge[0].distance_x_point_z.data = np.array(
[tcv_data["RDPA/observables/jsat/Zx"][channel - channels_n]]
)
# Ion saturation current density
rdpa.reciprocating[channel].plunge[0].collector[0].j_i_saturation.data = np.array(
[tcv_data["RDPA/observables/jsat/value"][channel - channels_n]]
)
rdpa.reciprocating[channel].plunge[0].collector[
0
].j_i_saturation.data_error_upper = np.array(
[tcv_data["RDPA/observables/jsat/error"][channel - channels_n]]
)
# Ion saturation current density standard deviation
rdpa.reciprocating[channel].plunge[0].collector[0].j_i_sigma.data = np.array(
[tcv_data["RDPA/observables/jsat_std/value"][channel - channels_n]]
)
rdpa.reciprocating[channel].plunge[0].collector[
0
].j_i_sigma.data_error_upper = np.array(
[tcv_data["RDPA/observables/jsat_std/error"][channel - channels_n]]
)
# Ion saturation current density skew
rdpa.reciprocating[channel].plunge[0].collector[0].j_i_skew.data = np.array(
[tcv_data["RDPA/observables/jsat_skew/value"][channel - channels_n]]
)
rdpa.reciprocating[channel].plunge[0].collector[
0
].j_i_skew.data_error_upper = np.array(
[tcv_data["RDPA/observables/jsat_skew/error"][channel - channels_n]]
)
# Ion saturation current density kurtosis
rdpa.reciprocating[channel].plunge[0].collector[0].j_i_kurtosis.data = np.array(
[tcv_data["RDPA/observables/jsat_kurtosis/value"][channel - channels_n]]
)
rdpa.reciprocating[channel].plunge[0].collector[
0
].j_i_kurtosis.data_error_upper = np.array(
[tcv_data["RDPA/observables/jsat_kurtosis/error"][channel - channels_n]]
)
for channel in range(channels_n + channels_j, channels_n + channels_j + channels_v):
print(channel)
rdpa.reciprocating[channel].plunge.resize(1)
rdpa.reciprocating[channel].plunge[0].collector.resize(1)
# Positions
rdpa.reciprocating[channel].plunge[0].distance_separatrix_midplane.data = np.array(
[
tcv_data["RDPA/observables/vfloat/Rsep_omp"][
channel - channels_n - channels_j
]
/ 100.0
]
)
rdpa.reciprocating[channel].plunge[0].distance_x_point_z.data = np.array(
[tcv_data["RDPA/observables/vfloat/Zx"][channel - channels_n - channels_j]]
)
# Floating potential
rdpa.reciprocating[channel].plunge[0].collector[0].v_floating.data = np.array(
[tcv_data["RDPA/observables/vfloat/value"][channel - channels_n - channels_j]]
)
rdpa.reciprocating[channel].plunge[0].collector[
0
].v_floating.data_error_upper = np.array(
[tcv_data["RDPA/observables/vfloat/error"][channel - channels_n - channels_j]]
)
# Floating potential standard deviation
rdpa.reciprocating[channel].plunge[0].collector[0].v_floating_sigma.data = np.array(
[
tcv_data["RDPA/observables/vfloat_std/value"][
channel - channels_n - channels_j
]
]
)
rdpa.reciprocating[channel].plunge[0].collector[
0
].v_floating_sigma.data_error_upper = np.array(
[
tcv_data["RDPA/observables/vfloat_std/error"][
channel - channels_n - channels_j
]
]
)
# IDS variable is filled, we write it now to the data entry
imas_entry.put(rdpa, 3)
##################### TS data
ts = imas.thomson_scattering()
ts.ids_properties.homogeneous_time = 1
ts.ids_properties.comment = tcv_data["TS"].diagnostic_name
ts.ids_properties.provider = "F. Imbeaux (for the IMAS conversion)"
ts.time = np.array(
[0.0]
) # Time has no meaning for this dataset which is processed over several pulses and time slices
# midplane definition
ts.midplane.name = "dr_dz_zero_sep"
ts.midplane.index = 2
ts.midplane.description = "Midplane defined by the height of the outboard point on the separatrix on which dr/dz = 0 (local maximum of the major radius of the separatrix). In case of multiple local maxima, the closest one from z=z_magnetic_axis is chosen. equilibrium/time_slice/boundary_separatrix/dr_dz_zero_point/z"
# Get number of channels for density/electron_temp group
channels_n = len(tcv_data["TS/observables/density/value"])
ts.channel.resize(channels_n)
for channel in range(channels_n):
print(channel)
# Positions
ts.channel[channel].distance_separatrix_midplane.data = np.array(
[tcv_data["RDPA/observables/density/Rsep_omp"][channel] / 100.0]
)
# Electron density
ts.channel[channel].n_e.data = np.array(
[tcv_data["RDPA/observables/density/value"][channel]]
)
ts.channel[channel].n_e.data_error_upper = np.array(
[tcv_data["RDPA/observables/density/error"][channel]]
)
# Electron temperature
ts.channel[channel].t_e.data = np.array(
[tcv_data["RDPA/observables/electron_temp/value"][channel]]
)
ts.channel[channel].t_e.data_error_upper = np.array(
[tcv_data["RDPA/observables/electron_temp/error"][channel]]
)
# IDS variable is filled, we write it now to the data entry
imas_entry.put(ts, 0)
imas_entry.close()
| 42.931964
| 375
| 0.696119
| 4,578
| 33,444
| 4.882918
| 0.051114
| 0.040709
| 0.048314
| 0.067639
| 0.908383
| 0.8955
| 0.863872
| 0.834839
| 0.797978
| 0.757448
| 0
| 0.008116
| 0.189451
| 33,444
| 778
| 376
| 42.987147
| 0.816512
| 0.172527
| 0
| 0.214516
| 0
| 0.012903
| 0.248358
| 0.15993
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.006452
| 0
| 0.006452
| 0.022581
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a1f5ce25d56fce1434097eb25703f8dddf5eb1cb
| 166
|
py
|
Python
|
umaster/mooncell_text/text_repository/__init__.py
|
karanokk/mooncell-text
|
92d4bbb7b8dac8a1e59729e8ec45f6c3374de246
|
[
"MIT"
] | null | null | null |
umaster/mooncell_text/text_repository/__init__.py
|
karanokk/mooncell-text
|
92d4bbb7b8dac8a1e59729e8ec45f6c3374de246
|
[
"MIT"
] | null | null | null |
umaster/mooncell_text/text_repository/__init__.py
|
karanokk/mooncell-text
|
92d4bbb7b8dac8a1e59729e8ec45f6c3374de246
|
[
"MIT"
] | null | null | null |
from .local_source import LocalTextSource
from .remote_source import RemoteTextSource
from .text_repository import TextRepository
from .text_source import TextSource
| 33.2
| 43
| 0.879518
| 20
| 166
| 7.1
| 0.55
| 0.253521
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096386
| 166
| 4
| 44
| 41.5
| 0.946667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
62c3a6c88c250daea6f384881f48b8481c226591
| 4,630
|
py
|
Python
|
tests/task_router/test_tasks.py
|
quippp/twilio-python
|
22b84cdfd19a6b1bde84350053870a7c507af410
|
[
"MIT"
] | 11
|
2016-01-23T04:38:23.000Z
|
2017-11-19T04:03:25.000Z
|
venv/lib/python2.7/site-packages/tests/task_router/test_tasks.py
|
jideobs/twilioAngular
|
eb95308d287d7dbb72fe516a633199a0af8b76b9
|
[
"MIT"
] | 1
|
2016-05-26T21:39:12.000Z
|
2016-05-26T21:39:14.000Z
|
venv/lib/python2.7/site-packages/tests/task_router/test_tasks.py
|
jideobs/twilioAngular
|
eb95308d287d7dbb72fe516a633199a0af8b76b9
|
[
"MIT"
] | 2
|
2019-05-19T06:02:26.000Z
|
2020-12-23T11:27:20.000Z
|
import unittest
from mock import patch, Mock
from tests.tools import create_mock_json
from twilio.rest.resources.task_router.tasks import Tasks, Task
AUTH = ("AC123", "token")
BASE_URI = "https://taskrouter.twilio.com/v1/Accounts/AC123/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
TASK_SID = "WTaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
class TaskTest(unittest.TestCase):
@patch('twilio.rest.resources.base.make_twilio_request')
def test_create(self, request):
resp = create_mock_json('tests/resources/task_router/tasks_instance.json')
resp.status_code = 201
request.return_value = resp
tasks = Tasks(BASE_URI, AUTH)
tasks.create("attributes", "WFaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", timeout=60)
exp_params = {
'Attributes': "attributes",
'WorkflowSid': "WFaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
'Timeout': 60
}
request.assert_called_with("POST", "{0}/Tasks".format(BASE_URI),
data=exp_params, auth=AUTH,
use_json_extension=False)
@patch('twilio.rest.resources.base.make_twilio_request')
def test_delete_instance(self, request):
resp = Mock()
resp.content = ""
resp.status_code = 204
request.return_value = resp
uri = "{0}/Tasks/{1}".format(BASE_URI, TASK_SID)
list_resource = Tasks(BASE_URI, AUTH)
task = Task(list_resource, TASK_SID)
task.delete()
request.assert_called_with("DELETE", uri, auth=AUTH,
use_json_extension=False)
@patch('twilio.rest.resources.base.make_twilio_request')
def test_delete_list(self, request):
resp = Mock()
resp.content = ""
resp.status_code = 204
request.return_value = resp
uri = "{0}/Tasks/{1}".format(BASE_URI, TASK_SID)
list_resource = Tasks(BASE_URI, AUTH)
list_resource.delete(TASK_SID)
request.assert_called_with("DELETE", uri, auth=AUTH,
use_json_extension=False)
@patch('twilio.rest.resources.base.make_twilio_request')
def test_get(self, request):
resp = create_mock_json('tests/resources/task_router/tasks_instance.json')
resp.status_code = 200
request.return_value = resp
uri = "{0}/Tasks/{1}".format(BASE_URI, TASK_SID)
list_resource = Tasks(BASE_URI, AUTH)
list_resource.get(TASK_SID)
request.assert_called_with("GET", uri, auth=AUTH,
use_json_extension=False)
@patch('twilio.rest.resources.base.make_twilio_request')
def test_list(self, request):
resp = create_mock_json('tests/resources/task_router/tasks_list.json')
resp.status_code = 200
request.return_value = resp
uri = "{0}/Tasks".format(BASE_URI)
list_resource = Tasks(BASE_URI, AUTH)
list_resource.list()
request.assert_called_with("GET", uri, params={}, auth=AUTH,
use_json_extension=False)
@patch('twilio.rest.resources.base.make_twilio_request')
def test_update_instance(self, request):
resp = create_mock_json('tests/resources/task_router/tasks_instance.json')
resp.status_code = 201
request.return_value = resp
uri = "{0}/Tasks/{1}".format(BASE_URI, TASK_SID)
list_resource = Tasks(BASE_URI, AUTH)
workflow = Task(list_resource, TASK_SID)
workflow.update(attributes='attributes', workflow_sid='WFaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa')
exp_params = {
'Attributes': "attributes",
'WorkflowSid': "WFaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
request.assert_called_with("POST", uri, data=exp_params, auth=AUTH,
use_json_extension=False)
@patch('twilio.rest.resources.base.make_twilio_request')
def test_update_list(self, request):
resp = create_mock_json('tests/resources/task_router/tasks_instance.json')
resp.status_code = 201
request.return_value = resp
uri = "{0}/Tasks/{1}".format(BASE_URI, TASK_SID)
list_resource = Tasks(BASE_URI, AUTH)
list_resource.update(TASK_SID, attributes='attributes', workflow_sid='WFaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa')
exp_params = {
'Attributes': "attributes",
'WorkflowSid': "WFaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
request.assert_called_with("POST", uri, data=exp_params, auth=AUTH,
use_json_extension=False)
| 39.237288
| 114
| 0.641469
| 519
| 4,630
| 5.460501
| 0.136802
| 0.03705
| 0.053634
| 0.05928
| 0.828158
| 0.792167
| 0.741708
| 0.741708
| 0.727594
| 0.727594
| 0
| 0.01268
| 0.25054
| 4,630
| 117
| 115
| 39.57265
| 0.804035
| 0
| 0
| 0.621053
| 0
| 0
| 0.245572
| 0.170842
| 0
| 0
| 0
| 0
| 0.073684
| 1
| 0.073684
| false
| 0
| 0.042105
| 0
| 0.126316
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
62ef97b124312a957f060f5cf6d6eaf221adf4cb
| 100
|
py
|
Python
|
django_fakery/plugin.py
|
fcurella/django-fakery
|
10f962ad73bd689495bdc04f521eddce4cb01922
|
[
"MIT"
] | 99
|
2015-09-25T19:19:31.000Z
|
2022-03-10T11:48:26.000Z
|
django_fakery/plugin.py
|
sobolevn/django-fakery
|
bac6965d2f780ddbad90ef94ab5b6bb8a79a1e1c
|
[
"MIT"
] | 53
|
2015-10-08T11:54:27.000Z
|
2022-01-11T17:28:09.000Z
|
django_fakery/plugin.py
|
sobolevn/django-fakery
|
bac6965d2f780ddbad90ef94ab5b6bb8a79a1e1c
|
[
"MIT"
] | 6
|
2015-10-08T11:46:11.000Z
|
2022-01-15T16:29:58.000Z
|
import pytest
from django_fakery import factory
@pytest.fixture
def fakery():
return factory
| 11.111111
| 33
| 0.77
| 13
| 100
| 5.846154
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.18
| 100
| 8
| 34
| 12.5
| 0.926829
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.4
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
62fab58097f5040e121f7e725e3d2180e88b333c
| 108,839
|
py
|
Python
|
datasets.py
|
fregu856/retinanet
|
408cc34aac9a30233ac3a23661654997d0cd5641
|
[
"MIT"
] | null | null | null |
datasets.py
|
fregu856/retinanet
|
408cc34aac9a30233ac3a23661654997d0cd5641
|
[
"MIT"
] | 1
|
2019-09-15T11:18:53.000Z
|
2019-09-15T11:22:59.000Z
|
datasets.py
|
fregu856/retinanet
|
408cc34aac9a30233ac3a23661654997d0cd5641
|
[
"MIT"
] | null | null | null |
from kittiloader import LabelLoader2D3D, LabelLoader2D3D_sequence # (this needs to be imported before torch, because cv2 needs to be imported before torch for some reason)
import sys
sys.path.append("/root/retinanet/data_aug")
sys.path.append("/home/fregu856/retinanet/data_aug")
from data_aug import RandomHorizontalFlip, RandomHSV, RandomScale, RandomTranslate, Resize
import torch
import torch.utils.data
import torch.nn.functional as F
from torch.autograd import Variable
import pickle
import numpy as np
import cv2
import math
import os
class_string_to_label = {"Car": 1,
"Pedestrian": 2,
"Cyclist": 3} # (background: 0)
################################################################################
# debug visualization helper functions START
################################################################################
def create2Dbbox_poly(bbox2D):
u_min = bbox2D[0] # (left)
u_max = bbox2D[1] # (rigth)
v_min = bbox2D[2] # (top)
v_max = bbox2D[3] # (bottom)
poly = {}
poly['poly'] = np.array([[u_min, v_min], [u_max, v_min],
[u_max, v_max], [u_min, v_max]], dtype='int32')
return poly
def draw_2d_polys_no_text(img, polys):
img = np.copy(img)
for poly in polys:
if 'color' in poly:
bg = poly['color']
else:
bg = np.array([0, 255, 0], dtype='float64')
cv2.polylines(img, np.int32([poly['poly']]), True, bg, lineType=cv2.LINE_AA, thickness=2)
return img
################################################################################
# debug visualization helper functions END
################################################################################
def bboxes_xxyyc_2_xyxyc(bboxes_xxyyc):
# (bboxes_xxyyc is an array of shape (num_bboxes, 5), (x_min, x_max, y_min, y_max, class_label))
bboxes_xyxyc = np.zeros(bboxes_xxyyc.shape, dtype=bboxes_xxyyc.dtype)
bboxes_xyxyc[:, 0] = bboxes_xxyyc[:, 0]
bboxes_xyxyc[:, 1] = bboxes_xxyyc[:, 2]
bboxes_xyxyc[:, 2] = bboxes_xxyyc[:, 1]
bboxes_xyxyc[:, 3] = bboxes_xxyyc[:, 3]
bboxes_xyxyc[:, 4] = bboxes_xxyyc[:, 4]
# (bboxes_xyxyc is an array of shape (num_bboxes, 5), (x_min, y_min, x_max, y_max, class_label))
return bboxes_xyxyc
def bboxes_xyxyc_2_xxyyc(bboxes_xyxyc):
# (bboxes_xyxyc is an array of shape (num_bboxes, 5), (x_min, y_min, x_max, y_max, class_label))
bboxes_xxyyc = np.zeros(bboxes_xyxyc.shape, dtype=bboxes_xyxyc.dtype)
bboxes_xxyyc[:, 0] = bboxes_xyxyc[:, 0]
bboxes_xxyyc[:, 1] = bboxes_xyxyc[:, 2]
bboxes_xxyyc[:, 2] = bboxes_xyxyc[:, 1]
bboxes_xxyyc[:, 3] = bboxes_xyxyc[:, 3]
bboxes_xxyyc[:, 4] = bboxes_xyxyc[:, 4]
# (bboxes_xxyyc is an array of shape (num_bboxes, 5), (x_min, x_max, y_min, y_max, class_label))
return bboxes_xxyyc
class BboxEncoder:
# NOTE! based off of https://github.com/kuangliu/pytorch-retinanet/blob/master/encoder.py and https://github.com/kuangliu/pytorch-retinanet/blob/master/utils.py
def __init__(self, img_h, img_w):
self.anchor_areas = [32.0*32.0, 64.0*64.0, 128.0*128.0, 256.0*256.0, 512.0*512.0] # (areas for p4, p5, p6, p7, p8)
self.aspect_ratios = [0.5, 1.0, 2.0]
self.scale_ratios = [1.0, pow(2, 1.0/3.0), pow(2, 2.0/3.0)]
self.nms_thresh = 0.5
self.conf_thresh = 0.25
self.img_h = img_h
self.img_w = img_w
self.img_size = torch.Tensor([self.img_w, self.img_h])
self.anchors_per_cell = 9 # (3 aspect ratios * 3 scale ratios)
self.num_feature_maps = len(self.anchor_areas) # (p3, p4, p5, p6, p7)
# (p3 has shape: (batch_size, 256, h/8, w/8))
# (p4 has shape: (batch_size, 256, h/16, w/16))
# (p5 has shape: (batch_size, 256, h/32, w/32))
# (p6 has shape: (batch_size, 256, h/64, w/64))
# (p7 has shape: (batch_size, 256, h/128, w/128))
self.feature_map_sizes = [(self.img_size/pow(2.0, i+3)).ceil() for i in range(self.num_feature_maps)]
self.anchor_sizes = self._get_anchor_sizes() # (Tensor of shape: (num_feature_maps, anchors_per_cell, 2)) (w, h)
self.anchor_bboxes = self._get_anchor_bboxes() # (Tensor of shape: (num_anchors, 4), (x, y, w, h), where num_anchors == fm1_h*fm1_w*anchors_per_cell + ... + fmN_h*fmN_w*anchors_per_cell)
self.num_anchors = self.anchor_bboxes.size(0) # (total number of anchor bboxes, num_anchors == fm1_h*fm1_w*anchors_per_cell + ... + fmN_h*fmN_w*anchors_per_cell)
print (self.num_anchors)
def _get_anchor_sizes(self):
anchor_sizes = []
for area in self.anchor_areas:
for aspect_ratio in self.aspect_ratios:
h = math.sqrt(area/aspect_ratio)
w = aspect_ratio*h
for scale_ratio in self.scale_ratios:
anchor_h = scale_ratio*h
anchor_w = scale_ratio*w
anchor_sizes.append([anchor_w, anchor_h])
anchor_sizes = torch.Tensor(anchor_sizes).view(self.num_feature_maps, self.anchors_per_cell, 2)
return anchor_sizes
def _mesh_grid(self, x, y):
# _mesh_grid(x, y) is a Tensor of shape (x*y, 2)
# _mesh_grid(3, 2):
# 0 0
# 1 0
# 2 0
# 0 1
# 1 1
# 2 1
x_range = torch.arange(0, x) # (Tensor of shape (x, ): (0, 1, 2,..., x-1))
y_range = torch.arange(0, y) # (Tensor of shape (y, ): (0, 1, 2,..., y-1))
xx = x_range.repeat(y).view(-1, 1) # (Tensor of shape: (x*y, 1). x == 3, y == 2: xx == (0, 1, 2, 0, 1, 2))
yy = y_range.view(-1, 1).repeat(1, x).view(-1, 1) # (Tensor of shape: (x*y, 1). x == 3, y ==2: yy == (0, 0, 0, 1, 1, 1))
mesh_grid = torch.cat([xx, yy], 1) # (Tensor of shape: (x*y, 2). mesh_grid[:, 0] == xx, mesh_grid[:, 1] == yy)
mesh_grid = mesh_grid.type(torch.FloatTensor)
return mesh_grid
def _get_anchor_bboxes(self):
anchor_bboxes = []
for i in range(self.num_feature_maps):
fm_size = self.feature_map_sizes[i]
grid_cell_size = self.img_size/fm_size # (Tensor of shape (2, ), (cell_w, cell_h))
fm_w = int(fm_size[0])
fm_h = int(fm_size[1])
grid_cell_centers = self._mesh_grid(fm_w, fm_h) + 0.5
# (Tensor of shape: (fm_w*fm_h, 2). fm_w == 3, fm_h == 2: grid_cell_centers ==
# 0.5 0.5
# 1.5 0.5
# 2.5 0.5
# 0.5 1.5
# 1.5 1.5
# 2.5 1.5)
grid_cell_pixel_centers = grid_cell_size*grid_cell_centers
# (Tensor of shape: (fm_w*fm_h, 2). fm_w == 3, fm_h == 2, grid_cell_size == (10, 10):
# grid_cell_centers ==
# 5 5
# 15 5
# 25 5
# 5 15
# 15 15
# 25 15)
anchor_bboxes_x_y = grid_cell_pixel_centers.view(fm_h, fm_w, 1, 2) # (Tensor of shape: (fm_h, fm_w, 1, 2))
anchor_bboxes_x_y = anchor_bboxes_x_y.expand(fm_h, fm_w, self.anchors_per_cell, 2) # (Tensor of shape: (fm_h, fm_w, anchors_per_cell, 2)
# (fm_w == 2, fm_h == 2, grid_cell_size == (10, 10), anchors_per_cell == 4:
# anchor_bboxes_x_y[0, 0, :, :] ==
# 5 5
# 5 5
# 5 5
# 5 5
# anchor_bboxes_x_y[0, 1, :, :] ==
# 15 5
# 15 5
# 15 5
# 15 5
# anchor_bboxes_x_y[0, 2, :, :] ==
# 25 5
# 25 5
# 25 5
# 25 5
# anchor_bboxes_x_y[1, 0, :, :] ==
# 5 15
# 5 15
# 5 15
# 5 15
# anchor_bboxes_x_y[1, 1, :, :] ==
# 15 15
# 15 15
# 15 15
# 15 15
# anchor_bboxes_x_y[1, 2, :, :] ==
# 25 15
# 25 15
# 25 15
# 25 15
# (self.anchor_sizes is a Tensor of shape: (num_feature_maps, anchors_per_cell, 2)) (w, h)
anchor_bboxes_w_h = self.anchor_sizes[i].view(1, 1, self.anchors_per_cell, 2) # (Tensor of shape: (1, 1, anchors_per_cell, 2))
anchor_bboxes_w_h = anchor_bboxes_w_h.expand(fm_h, fm_w, self.anchors_per_cell, 2) # (Tensor of shape: (fm_h, fm_w, anchors_per_cell, 2))
# (anchor_bboxes_w_h[i, j, :, :] are the same for all i, j)
# (anchor_bboxes_x_y and anchor_bboxes_w_h are both Tensors of shape: (fm_h, fm_w, anchors_per_cell, 2))
anchor_bboxes_x_y_w_h = torch.cat([anchor_bboxes_x_y, anchor_bboxes_w_h], 3) # (Tensor of shape: (fm_h, fm_w, anchors_per_cell, 4), (x, y, w, h))
anchor_bboxes_x_y_w_h = anchor_bboxes_x_y_w_h.view(-1, 4) # (Tensor of shape: (fm_h*fm_w*anchors_per_cell, 4), (x, y, w, h))
anchor_bboxes.append(anchor_bboxes_x_y_w_h)
anchor_bboxes = torch.cat(anchor_bboxes, 0) # (Tensor of shape: (num_anchors, 4), (x, y, w, h), where num_anchors == fm1_h*fm1_w*anchors_per_cell + ... + fmN_h*fmN_w*anchors_per_cell)
return anchor_bboxes
def _xxyy_2_xywh(self, bboxes):
# (bboxes is a Tensor of shape (num_bboxes, 4), (x_min, x_max, y_min, y_max))
x_min = bboxes[:, 0]
x_max = bboxes[:, 1]
y_min = bboxes[:, 2]
y_max = bboxes[:, 3]
w = x_max - x_min
h = y_max - y_min
x = x_min + w/2.0
y = y_min + h/2.0
bboxes = torch.cat([x.view(-1, 1), y.view(-1, 1), w.view(-1, 1), h.view(-1, 1)], 1) # (shape: (num_bboxes, 4), (x, y, w, h))
return bboxes
def _xywh_2_xxyy(self, bboxes):
# (bboxes is a Tensor of shape (num_bboxes, 4), (x, y, w, h))
x = bboxes[:, 0]
y = bboxes[:, 1]
w = bboxes[:, 2]
h = bboxes[:, 3]
x_min = x - w/2.0
x_max = x + w/2.0
y_min = y - h/2.0
y_max = y + h/2.0
bboxes = torch.cat([x_min.view(-1, 1), x_max.view(-1, 1), y_min.view(-1, 1), y_max.view(-1, 1)], 1) # (shape: (num_bboxes, 4), (x_min, x_max, y_min, y_max))
return bboxes
def _bboxes_ious(self, anchor_bboxes, gt_bboxes):
# (anchor_bboxes is a Tensor of shape (num_anchors, 4), (x, y, w, h))
# (gt_bboxes is a Tensor of shape (num_gt_objects, 4), (x, y, w, h))
intersect_xmax = np.minimum(anchor_bboxes[:, None, 0] + 0.5*anchor_bboxes[:, None, 2],
gt_bboxes[:, 0] + 0.5*gt_bboxes[:, 2]) # (shape (num_anchors, num_gt_objects))
intersect_xmin = np.maximum(anchor_bboxes[:, None, 0] - 0.5*anchor_bboxes[:, None, 2],
gt_bboxes[:, 0] - 0.5*gt_bboxes[:, 2]) # (shape (num_anchors, num_gt_objects))
intersect_ymax = np.minimum(anchor_bboxes[:, None, 1] + 0.5*anchor_bboxes[:, None, 3],
gt_bboxes[:, 1] + 0.5*gt_bboxes[:, 3]) # (shape (num_anchors, num_gt_objects))
intersect_ymin = np.maximum(anchor_bboxes[:, None, 1] - 0.5*anchor_bboxes[:, None, 3],
gt_bboxes[:, 1] - 0.5*gt_bboxes[:, 3]) # (shape (num_anchors, num_gt_objects))
zeros = torch.zeros(intersect_xmin.size()) # (shape (num_anchors, num_gt_object))
intersect_w = torch.max(zeros, intersect_xmax - intersect_xmin) # (shape (num_anchors, num_gt_object))
intersect_h = torch.max(zeros, intersect_ymax - intersect_ymin) # (shape (num_anchors, num_gt_object))
intersection_area = intersect_w*intersect_h # (shape (num_anchors, num_gt_object))
union_area = anchor_bboxes[:, None, 2]*anchor_bboxes[:, None, 3] + gt_bboxes[:, 2]*gt_bboxes[:, 3] - intersection_area # (shape (num_anchors, num_gt_object))
ious = intersection_area/union_area # (shape (num_anchors, num_gt_object))
# (ious[i, j]: the IoU of anchor bbox i with gt bbox j)
return ious
def _batch_ious(self, boxes, box):
# (boxes is a Tensor of shape (num_boxes, 4), (x, y, w, h))
# (box is a Tensor of shape (4, ), (x, y, w, h))
intersect_xmax = np.minimum(boxes[:, 0] + 0.5*boxes[:, 2], box[0] + 0.5*box[2]) # (shape (num_boxes, ))
intersect_xmin = np.maximum(boxes[:, 0] - 0.5*boxes[:, 2], box[0] - 0.5*box[2]) # (shape (num_boxes, ))
intersect_ymax = np.minimum(boxes[:, 1] + 0.5*boxes[:, 3], box[1] + 0.5*box[3]) # (shape (num_boxes, ))
intersect_ymin = np.maximum(boxes[:, 1] - 0.5*boxes[:, 3], box[1] - 0.5*box[3]) # (shape (num_boxes, ))
zeros = torch.zeros(intersect_xmin.size()) # (shape (num_boxes, ))
intersect_w = torch.max(zeros, intersect_xmax - intersect_xmin) # (shape (num_boxes, ))
intersect_h = torch.max(zeros, intersect_ymax - intersect_ymin) # (shape (num_boxes, ))
intersection_area = intersect_w*intersect_h # (shape (num_boxes, ))
union_area = boxes[:, 2]*boxes[:, 3] + box[2]*box[3] - intersection_area # (shape (num_boxes, ))
ious = intersection_area/union_area # (shape (num_boxes, ))
# (ious[i]: the IoU of box with boxes[i])
return ious
def _bbox_nms(self, bboxes, scores):
# NOTE! based off of function from github.com/BichenWuUCB/squeezeDet
# (bboxes has shape (num_bboxes, 4), (x, y, w, h))
# (scores has shape (num_bboxes, ))
num_bboxes = bboxes.size(0)
# get indices in descending order according to score:
_, order = scores.sort(0, descending=True) # (shape: (num_bboxes, ))
keep = torch.ones((order.size(0), )).type(torch.LongTensor)
for i in range(num_bboxes-1):
ious = self._batch_ious(bboxes[order[i+1:]], bboxes[order[i]])
for j, iou in enumerate(ious):
if iou > self.nms_thresh:
keep[order[j+i+1]] = 0
keep_inds = keep.nonzero() # (shape: (num_bboxes_after_nms, 1))
keep_inds = keep_inds.squeeze() # (shape: (num_bboxes_after_nms, ))
return keep_inds
def encode(self, gt_bboxes, gt_classes):
# (self.anchor_bboxes is a Tensor of shape: (num_anchors, 4), (x, y, w, h))
# (gt_bboxes is a Tensor of shape (num_gt_objects, 4), (x_min, x_max, y_min, y_max))
# (gt_classes is a Tensor of shape (num_gt_objects, ))
gt_bboxes = self._xxyy_2_xywh(gt_bboxes) # (shape: (num_gt_objects, 4), (x ,y, w ,h))
# compute the IoU of each anchor bbox with each gt bbox:
ious = self._bboxes_ious(self.anchor_bboxes, gt_bboxes) # (shape (num_anchors, num_gt_object))
# (ious[i, j]: the IoU of anchor bbox i with gt bbox j)
# for each anchor bbox, get the maximum IoU and the index of the corresponding gt bbox:
max_ious, max_inds = ious.max(1) # (both has shape: (num_anchors, ))
# for each anchor bbox, get the gt bbox corresponding to the maximum IoU:
assigned_gt_bboxes = gt_bboxes[max_inds] # (shape: (num_anchors, 4), (x, y, w, h))
# (assigned_gt_bboxes[i]: the gt bbox which is closest (in terms of IoU) to anchor bbox i)
# target_x = (gt_x - anchor_x)/anchor_w:
target_x = (assigned_gt_bboxes[:, 0] - self.anchor_bboxes[:, 0])/self.anchor_bboxes[:, 2] # (shape (num_anchors, ))
target_x = target_x.view(-1, 1) # (shape (num_anchors, 1))
# target_y = (gt_y - anchor_y)/anchor_h:
target_y = (assigned_gt_bboxes[:, 1] - self.anchor_bboxes[:, 1])/self.anchor_bboxes[:, 3]
target_y = target_y.view(-1, 1)
# target_w = log(gt_w/anchor_w):
target_w = torch.log(assigned_gt_bboxes[:, 2]/self.anchor_bboxes[:, 2])
target_w = target_w.view(-1, 1)
# target_h = log(gt_h/anchor_h):
target_h = torch.log(assigned_gt_bboxes[:, 3]/self.anchor_bboxes[:, 3])
target_h = target_h.view(-1, 1)
labels_regr = torch.cat([target_x, target_y, target_w, target_h], 1) # (shape (num_anchors, 4), (x, y, w, h))
# for each anchor bbox, get the class label of the gt bbox corresponding to the max IoU:
assigned_gt_classes = gt_classes[max_inds] # (shape (num_anchors, ))
# assign all anchor bboxes with maximum IoU < 0.4 to background (0):
assigned_gt_classes[max_ious < 0.4] = 0
# assign all anchors which should be ignored during training to -1:
ignore_inds = (max_ious >= 0.4) & (max_ious < 0.5)
assigned_gt_classes[ignore_inds] = -1
labels_class = assigned_gt_classes # (shape (num_anchors, ), entries are in {-1, 0, 1,..., num_classes-1})
# (labels_regr has shape (num_anchors, 4), (x, y, w, h))
# (labels_class has shape (num_anchors, ))
return (labels_regr, labels_class)
def decode(self, outputs_regr, outputs_class):
# (outputs_regr has shape (num_anchors, 4), (x, y, w, h))
# (outputs_class has shape (num_anchors, num_classes))
# (self.anchor_bboxes has shape (num_anchors, 4), (x, y, w, h))
# for each anchor bbox, get the pred class label and the corresponding pred score:
pred_scores = F.softmax(Variable(outputs_class), dim=1).data # (shape (num_anchors, num_classes))
pred_max_scores, pred_class_labels = torch.max(pred_scores, 1) # (both have shape (num_anchors, ))
# get the indices of all pred non-background bboxes:
keep_inds = pred_class_labels != 0 # (shape (num_anchors, ), entries in {0, 1})
keep_inds = keep_inds.nonzero() # (shape (num_foreground_preds, 1), entries are unique and in {0, 1,..., num_anchors})
keep_inds = keep_inds.squeeze() # (shape (num_foreground_preds, ), entries are unique and in {0, 1,..., num_anchors})
# get all pred non-background bboxes:
outputs_regr = outputs_regr[keep_inds] # (shape (num_foreground_preds, 4), (x, y, w, h))
anchor_bboxes = self.anchor_bboxes[keep_inds] # (shape (num_foreground_preds, 4), (x, y, w, h))
pred_max_scores = pred_max_scores[keep_inds] # (shape (num_foreground_preds, ))
pred_class_labels = pred_class_labels[keep_inds] # (shape (num_foreground_preds, ))
# print ("Number of predicted bboxes before thresholding:")
# print (outputs_regr.size())
if outputs_regr.size() == torch.Size([4]):
outputs_regr = outputs_regr.unsqueeze(0)
anchor_bboxes = anchor_bboxes.unsqueeze(0)
pred_max_scores = torch.from_numpy(np.array([pred_max_scores.data]))
pred_class_labels = torch.from_numpy(np.array([pred_class_labels.data]))
if outputs_regr.size(0) > 0:
# get the indices for all pred bboxes with a large enough pred class score:
keep_inds = pred_max_scores > self.conf_thresh # (shape (num_foreground_preds, ), entries in {0, 1})
keep_inds = keep_inds.nonzero() # (shape (num_preds_before_nms, 1), entries are unique and in {0, 1,..., num_foreground_preds})
keep_inds = keep_inds.squeeze() # (shape (num_preds_before_nms, ), entries are unique and in {0, 1,..., num_foreground_preds})
# get all pred bboxes with a large enough pred class score:
outputs_regr = outputs_regr[keep_inds] # (shape (num_preds_before_nms, 4), (x, y, w, h))
anchor_bboxes = anchor_bboxes[keep_inds] # (shape (num_preds_before_nms, 4), (x, y, w, h))
pred_max_scores = pred_max_scores[keep_inds] # (shape (num_preds_before_nms, ))
pred_class_labels = pred_class_labels[keep_inds] # (shape (num_preds_before_nms, ))
# print ("Number of predicted bboxes before NMS:")
# print (outputs_regr.size())
if outputs_regr.size() == torch.Size([4]):
outputs_regr = outputs_regr.unsqueeze(0)
anchor_bboxes = anchor_bboxes.unsqueeze(0)
pred_max_scores = torch.from_numpy(np.array([pred_max_scores.data]))
pred_class_labels = torch.from_numpy(np.array([pred_class_labels.data]))
if outputs_regr.size(0) > 0:
# pred_x = anchor_w*output_x + anchor_x:
pred_x = anchor_bboxes[:, 2]*outputs_regr[:, 0] + anchor_bboxes[:, 0] # (shape (num_anchors, ))
pred_x = pred_x.view(-1, 1) # (shape (num_anchors, 1))
# pred_y = anchor_h*output_y + anchor_y:
pred_y = anchor_bboxes[:, 3]*outputs_regr[:, 1] + anchor_bboxes[:, 1]
pred_y = pred_y.view(-1, 1)
# pred_w = exp(output_w)*anchor_w:
pred_w = torch.exp(outputs_regr[:, 2])*anchor_bboxes[:, 2]
pred_w = pred_w.view(-1, 1)
# pred_h = exp(output_h)*anchor_h:
pred_h = torch.exp(outputs_regr[:, 3])*anchor_bboxes[:, 3]
pred_h = pred_h.view(-1, 1)
pred_bboxes = torch.cat([pred_x, pred_y, pred_w, pred_h], 1) # (shape (num_preds_before_nms, 4), (x, y, w, h))
# filter bboxes by performing nms:
keep_inds = self._bbox_nms(pred_bboxes, pred_max_scores) # (shape: (num_preds_after_nms, ))
pred_bboxes = pred_bboxes[keep_inds]
pred_max_scores = pred_max_scores[keep_inds]
pred_class_labels = pred_class_labels[keep_inds]
# (pred_bboxes has shape (num_preds_after_nms, 4), (x, y, w, h))
# (pred_max_scores has shape (num_preds_after_nms, ))
# (pred_class_labels has shape (num_preds_after_nms, ))
return (pred_bboxes, pred_max_scores, pred_class_labels)
else:
#print ("None!")
return (None, None, None)
else:
return (None, None, None)
def decode_gt_single(self, labels_regr):
# (labels_regr has shape (num_anchors, 4), (x, y, w, h))
# (self.anchor_bboxes has shape (num_anchors, 4), (x, y, w, h))
# gt_x = anchor_w*label_x + anchor_x:
gt_x = self.anchor_bboxes[:, 2]*labels_regr[:, 0] + self.anchor_bboxes[:, 0] # (shape (num_anchors, ))
gt_x = gt_x.view(-1, 1) # (shape (num_anchors, 1))
# gt_y = anchor_h*label_y + anchor_y:
gt_y = self.anchor_bboxes[:, 3]*labels_regr[:, 1] + self.anchor_bboxes[:, 1]
gt_y = gt_y.view(-1, 1) # (shape (num_anchors, 1))
# gt_w = exp(label_w)*anchor_w:
gt_w = torch.exp(labels_regr[:, 2])*self.anchor_bboxes[:, 2]
gt_w = gt_w.view(-1, 1) # (shape (num_anchors, 1))
# gt_h = exp(label_h)*anchor_h:
gt_h = torch.exp(labels_regr[:, 3])*self.anchor_bboxes[:, 3]
gt_h = gt_h.view(-1, 1) # (shape (num_anchors, 1))
gt_bboxes = torch.cat([gt_x, gt_y, gt_w, gt_h], 1) # (shape (num_anchors, 4), (x, y, w, h))
return gt_bboxes
# bbox_encoder = BboxEncoder()
# bbox_encoder.encode(torch.Tensor([[600, 800, 300, 400], [640, 810, 200, 300]]), torch.Tensor([1, 2]))
# bbox_encoder.decode(torch.ones((bbox_encoder.num_anchors, 4)), torch.ones((bbbox_encoder.num_anchors, 4)))
class DatasetAugmentation(torch.utils.data.Dataset):
def __init__(self, kitti_data_path, kitti_meta_path, type):
self.img_dir = kitti_data_path + "/object/training/image_2/"
self.label_dir = kitti_data_path + "/object/training/label_2/"
self.calib_dir = kitti_data_path + "/object/training/calib/"
with open(kitti_meta_path + "/%s_img_ids.pkl" % type, "rb") as file: # (needed for python3)
img_ids = pickle.load(file)
self.img_height = 375
self.img_width = 1242
self.bbox_encoder = BboxEncoder(img_h=self.img_height, img_w=self.img_width)
self.num_classes = 4 # (car, pedestrian, cyclist, background)
self.examples = []
for img_id in img_ids:
example = {}
example["img_id"] = img_id
labels = LabelLoader2D3D(img_id, self.label_dir, ".txt", self.calib_dir, ".txt")
bboxes = np.zeros((len(labels), 4), dtype=np.float32)
class_labels = np.zeros((len(labels), ), dtype=np.float32)
counter = 0
for label in labels:
label_2d = label["label_2D"]
if label_2d["class"] in ["Car", "Pedestrian", "Cyclist"]:
bbox = label_2d["poly"]
u_min = bbox[0, 0] # (left)
u_max = bbox[1, 0] # (rigth)
v_min = bbox[0, 1] # (top)
v_max = bbox[2, 1] # (bottom)
bboxes[counter] = np.array([u_min, u_max, v_min, v_max])
class_labels[counter] = class_string_to_label[label_2d["class"]]
counter += 1
bboxes = bboxes[0:counter]
class_labels = class_labels[0:counter]
example["bboxes"] = bboxes
example["class_labels"] = class_labels
self.examples.append(example)
self.num_examples = len(self.examples)
def __getitem__(self, index):
example = self.examples[index]
img_id = example["img_id"]
img_path = self.img_dir + img_id + ".png"
img = cv2.imread(img_path, -1)
img = cv2.resize(img, (self.img_width, self.img_height)) # (shape: (img_height, img_width, 3))
gt_bboxes = example["bboxes"] # (shape: (num_gt_objects, 4), (x_min, x_max, y_min, y_max))
########################################################################
# flip the img and the labels with 0.5 probability:
########################################################################
flip = np.random.randint(low=0, high=2)
if flip == 1:
img = cv2.flip(img, 1)
img_w = self.img_width
gt_bboxes[:, 0:2] = img_w - gt_bboxes[:, 0:2]
temp = np.copy(gt_bboxes[:, 0])
gt_bboxes[:, 0] = gt_bboxes[:, 1]
gt_bboxes[:, 1] = temp
# # # # # # debug visualization:
# bbox_polys = []
# for i in range(gt_bboxes.shape[0]):
# bbox = gt_bboxes[i]
# bbox_poly = create2Dbbox_poly(bbox)
# bbox_polys.append(bbox_poly)
# img_with_gt_bboxes = draw_2d_polys_no_text(img, bbox_polys)
# cv2.imshow("test", img_with_gt_bboxes)
# cv2.waitKey(0)
# # # # # #
########################################################################
# normalize the img:
########################################################################
img = img/255.0
img = img - np.array([0.485, 0.456, 0.406])
img = img/np.array([0.229, 0.224, 0.225]) # (shape: (img_height, img_width, 3))
img = np.transpose(img, (2, 0, 1)) # (shape: (3, img_height, img_width))
img = img.astype(np.float32)
########################################################################
# get ground truth:
########################################################################
gt_bboxes = torch.from_numpy(gt_bboxes) # (shape: (num_gt_objects, 4), (x_min, x_max, y_min, y_max))
gt_classes = torch.from_numpy(example["class_labels"]) # (shape (num_gt_objects, ))
label_regr, label_class = self.bbox_encoder.encode(gt_bboxes, gt_classes)
# (label_regr is a Tensor of shape: (num_anchors, 4)) (x_resid, y_resid, w_resid, h_resid)
# (label_class is a Tensor of shape: (num_anchors, ))
########################################################################
# convert numpy -> torch:
########################################################################
img = torch.from_numpy(img) # (shape: (3, img_height, img_width))
# (img has shape: (3, img_height, img_width))
# (label_regr has shape: (num_anchors, 4)) (x_resid, y_resid, w_resid, h_resid)
# (label_class has shape: (num_anchors, ))
return (img, label_regr, label_class)
def __len__(self):
return self.num_examples
# test = DatasetAugmentation("/home/fregu856/exjobb/data/kitti", "/home/fregu856/exjobb/data/kitti/meta", type="train")
# for i in range(10):
# _ = test.__getitem__(i)
class DatasetMoreAugmentation2(torch.utils.data.Dataset):
def __init__(self, kitti_data_path, kitti_meta_path, type):
self.img_dir = kitti_data_path + "/object/training/image_2/"
self.label_dir = kitti_data_path + "/object/training/label_2/"
self.calib_dir = kitti_data_path + "/object/training/calib/"
with open(kitti_meta_path + "/%s_img_ids.pkl" % type, "rb") as file: # (needed for python3)
img_ids = pickle.load(file)
self.img_height = 375
self.img_width = 1242
self.random_horizontal_flip = RandomHorizontalFlip(p=0.5)
self.random_hsv = RandomHSV(hue=10, saturation=20, brightness=20)
self.random_scale = RandomScale(scale=0.3)
self.random_translate = RandomTranslate(translate=0.2)
self.bbox_encoder = BboxEncoder(img_h=self.img_height, img_w=self.img_width)
self.num_classes = 4 # (car, pedestrian, cyclist, background)
self.examples = []
for img_id in img_ids:
example = {}
example["img_id"] = img_id
labels = LabelLoader2D3D(img_id, self.label_dir, ".txt", self.calib_dir, ".txt")
bboxes = np.zeros((len(labels), 4), dtype=np.float32)
class_labels = np.zeros((len(labels), ), dtype=np.float32)
counter = 0
for label in labels:
label_2d = label["label_2D"]
if label_2d["class"] in ["Car", "Pedestrian", "Cyclist"]:
bbox = label_2d["poly"]
u_min = bbox[0, 0] # (left)
u_max = bbox[1, 0] # (rigth)
v_min = bbox[0, 1] # (top)
v_max = bbox[2, 1] # (bottom)
bboxes[counter] = np.array([u_min, u_max, v_min, v_max])
class_labels[counter] = class_string_to_label[label_2d["class"]]
counter += 1
bboxes = bboxes[0:counter]
class_labels = class_labels[0:counter]
example["bboxes"] = bboxes
example["class_labels"] = class_labels
self.examples.append(example)
self.num_examples = len(self.examples)
def __getitem__(self, index):
example = self.examples[index]
img_id = example["img_id"]
img_path = self.img_dir + img_id + ".png"
img = cv2.imread(img_path, -1)
img = cv2.resize(img, (self.img_width, self.img_height)) # (shape: (img_height, img_width, 3))
gt_bboxes_xxyy = example["bboxes"] # (shape: (num_gt_objects, 4), (x_min, x_max, y_min, y_max))
gt_classes = example["class_labels"] # (shape: (num_gt_objects, ))
gt_bboxes_xxyyc = np.zeros((gt_bboxes_xxyy.shape[0], 5), dtype=gt_bboxes_xxyy.dtype) # (shape: (num_gt_objects, 5), (x_min, x_max, y_min, y_max, class_label))
gt_bboxes_xxyyc[:, 0:4] = gt_bboxes_xxyy
gt_bboxes_xxyyc[:, 4] = gt_classes
########################################################################
# data augmentation START:
########################################################################
# # # # # # debug visualization:
# bbox_polys = []
# for i in range(gt_bboxes_xxyy.shape[0]):
# bbox = gt_bboxes_xxyy[i]
# bbox_poly = create2Dbbox_poly(bbox)
# bbox_polys.append(bbox_poly)
# img_with_gt_bboxes = draw_2d_polys_no_text(img, bbox_polys)
# cv2.imshow("test", img_with_gt_bboxes)
# cv2.waitKey(0)
# # # # # #
# flip the img and the labels with 0.5 probability:
img, gt_bboxes_xyxyc = self.random_horizontal_flip(img, bboxes_xxyyc_2_xyxyc(gt_bboxes_xxyyc))
gt_bboxes_xxyyc = bboxes_xyxyc_2_xxyyc(gt_bboxes_xyxyc)
# # # # # # debug visualization:
# bbox_polys = []
# for i in range(gt_bboxes_xxyyc.shape[0]):
# bbox = gt_bboxes_xxyyc[i, 0:4]
# bbox_poly = create2Dbbox_poly(bbox)
# bbox_polys.append(bbox_poly)
# img_with_gt_bboxes = draw_2d_polys_no_text(img, bbox_polys)
# cv2.imshow("test", img_with_gt_bboxes)
# cv2.waitKey(0)
# # # # # #
# randomly modify the hue, saturation and brightness of the image:
img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
img_hsv, gt_bboxes_xyxyc = self.random_hsv(img_hsv, bboxes_xxyyc_2_xyxyc(gt_bboxes_xxyyc))
img = cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR)
gt_bboxes_xxyyc = bboxes_xyxyc_2_xxyyc(gt_bboxes_xyxyc)
# # # # # # debug visualization:
# bbox_polys = []
# for i in range(gt_bboxes_xxyyc.shape[0]):
# bbox = gt_bboxes_xxyyc[i, 0:4]
# bbox_poly = create2Dbbox_poly(bbox)
# bbox_polys.append(bbox_poly)
# img_with_gt_bboxes = draw_2d_polys_no_text(img, bbox_polys)
# cv2.imshow("test", img_with_gt_bboxes)
# cv2.waitKey(0)
# # # # # #
# scale the image and the labels with a factor drawn from Uniform[1-scale, 1+scale]:
img, gt_bboxes_xyxyc = self.random_scale(img, bboxes_xxyyc_2_xyxyc(gt_bboxes_xxyyc))
gt_bboxes_xxyyc = bboxes_xyxyc_2_xxyyc(gt_bboxes_xyxyc)
# # # # # # debug visualization:
# bbox_polys = []
# for i in range(gt_bboxes_xxyyc.shape[0]):
# bbox = gt_bboxes_xxyyc[i, 0:4]
# bbox_poly = create2Dbbox_poly(bbox)
# bbox_polys.append(bbox_poly)
# img_with_gt_bboxes = draw_2d_polys_no_text(img, bbox_polys)
# cv2.imshow("test", img_with_gt_bboxes)
# cv2.waitKey(0)
# # # # # #
# randomly translate the image and the labels:
img, gt_bboxes_xyxyc = self.random_translate(img, bboxes_xxyyc_2_xyxyc(gt_bboxes_xxyyc))
gt_bboxes_xxyyc = bboxes_xyxyc_2_xxyyc(gt_bboxes_xyxyc)
# # # # # # debug visualization:
# bbox_polys = []
# for i in range(gt_bboxes_xxyyc.shape[0]):
# bbox = gt_bboxes_xxyyc[i, 0:4]
# bbox_poly = create2Dbbox_poly(bbox)
# bbox_polys.append(bbox_poly)
# img_with_gt_bboxes = draw_2d_polys_no_text(img, bbox_polys)
# cv2.imshow("test", img_with_gt_bboxes)
# cv2.waitKey(0)
# # # # # #
########################################################################
# data augmentation END:
########################################################################
########################################################################
# normalize the img:
########################################################################
img = img/255.0
img = img - np.array([0.485, 0.456, 0.406])
img = img/np.array([0.229, 0.224, 0.225]) # (shape: (img_height, img_width, 3))
img = np.transpose(img, (2, 0, 1)) # (shape: (3, img_height, img_width))
img = img.astype(np.float32)
########################################################################
# get ground truth:
########################################################################
gt_bboxes_xxyy = gt_bboxes_xxyyc[:, 0:4] # (shape: (num_gt_objects, 4), (x_min, x_max, y_min, y_max))
gt_classes = gt_bboxes_xxyyc[:, 4] # (shape (num_gt_objects, ))
gt_bboxes_xxyy = torch.from_numpy(gt_bboxes_xxyy) # (shape: (num_gt_objects, 4), (x_min, x_max, y_min, y_max))
gt_classes = torch.from_numpy(gt_classes) # (shape (num_gt_objects, ))
if gt_bboxes_xxyy.size(0) == 0: # (if 0 gt objects)
return self.__getitem__(index+1)
if gt_bboxes_xxyy.size() == torch.Size([4]): # (if 1 gt object)
gt_bboxes_xxyy = gt_bboxes_xxyy.unsqueeze(0)
gt_classes = torch.from_numpy(np.array([gt_classes.data]))
label_regr, label_class = self.bbox_encoder.encode(gt_bboxes_xxyy, gt_classes)
# (label_regr is a Tensor of shape: (num_anchors, 4)) (x_resid, y_resid, w_resid, h_resid)
# (label_class is a Tensor of shape: (num_anchors, ))
########################################################################
# convert numpy -> torch:
########################################################################
img = torch.from_numpy(img) # (shape: (3, img_height, img_width))
# (img has shape: (3, img_height, img_width))
# (label_regr has shape: (num_anchors, 4)) (x_resid, y_resid, w_resid, h_resid)
# (label_class has shape: (num_anchors, ))
return (img, label_regr, label_class)
def __len__(self):
return self.num_examples
class DatasetEval(torch.utils.data.Dataset):
def __init__(self, kitti_data_path, kitti_meta_path, type):
self.img_dir = kitti_data_path + "/object/training/image_2/"
self.label_dir = kitti_data_path + "/object/training/label_2/"
self.calib_dir = kitti_data_path + "/object/training/calib/"
with open(kitti_meta_path + "/%s_img_ids.pkl" % type, "rb") as file: # (needed for python3)
img_ids = pickle.load(file)
self.img_height = 375
self.img_width = 1242
self.bbox_encoder = BboxEncoder(img_h=self.img_height, img_w=self.img_width)
self.num_classes = 4 # (car, pedestrian, cyclist, background)
self.examples = []
for img_id in img_ids:
example = {}
example["img_id"] = img_id
labels = LabelLoader2D3D(img_id, self.label_dir, ".txt", self.calib_dir, ".txt")
bboxes = np.zeros((len(labels), 4), dtype=np.float32)
class_labels = np.zeros((len(labels), ), dtype=np.float32)
counter = 0
for label in labels:
label_2d = label["label_2D"]
if label_2d["class"] in ["Car", "Pedestrian", "Cyclist"]:
bbox = label_2d["poly"]
u_min = bbox[0, 0] # (left)
u_max = bbox[1, 0] # (rigth)
v_min = bbox[0, 1] # (top)
v_max = bbox[2, 1] # (bottom)
bboxes[counter] = np.array([u_min, u_max, v_min, v_max])
class_labels[counter] = class_string_to_label[label_2d["class"]]
counter += 1
bboxes = bboxes[0:counter]
class_labels = class_labels[0:counter]
example["bboxes"] = bboxes
example["class_labels"] = class_labels
self.examples.append(example)
self.num_examples = len(self.examples)
def __getitem__(self, index):
example = self.examples[index]
img_id = example["img_id"]
img_path = self.img_dir + img_id + ".png"
img = cv2.imread(img_path, -1)
img = cv2.resize(img, (self.img_width, self.img_height)) # (shape: (img_height, img_width, 3))
gt_bboxes = example["bboxes"] # (shape: (num_gt_objects, 4), (x_min, x_max, y_min, y_max))
# # # # # # debug visualization:
# bbox_polys = []
# for i in range(gt_bboxes.shape[0]):
# bbox = gt_bboxes[i]
# bbox_poly = create2Dbbox_poly(bbox)
# bbox_polys.append(bbox_poly)
# img_with_gt_bboxes = draw_2d_polys_no_text(img, bbox_polys)
# cv2.imshow("test", img_with_gt_bboxes)
# cv2.waitKey(0)
# # # # # #
########################################################################
# normalize the img:
########################################################################
img = img/255.0
img = img - np.array([0.485, 0.456, 0.406])
img = img/np.array([0.229, 0.224, 0.225]) # (shape: (img_height, img_width, 3))
img = np.transpose(img, (2, 0, 1)) # (shape: (3, img_height, img_width))
img = img.astype(np.float32)
########################################################################
# get ground truth:
########################################################################
gt_bboxes = torch.from_numpy(gt_bboxes) # (shape: (num_gt_objects, 4), (x_min, x_max, y_min, y_max))
gt_classes = torch.from_numpy(example["class_labels"]) # (shape (num_gt_objects, ))
label_regr, label_class = self.bbox_encoder.encode(gt_bboxes, gt_classes)
# (label_regr is a Tensor of shape: (num_anchors, 4)) (x_resid, y_resid, w_resid, h_resid)
# (label_class is a Tensor of shape: (num_anchors, ))
########################################################################
# convert numpy -> torch:
########################################################################
img = torch.from_numpy(img) # (shape: (3, img_height, img_width))
# (img has shape: (3, img_height, img_width))
# (label_regr has shape: (num_anchors, 4)) (x_resid, y_resid, w_resid, h_resid)
# (label_class has shape: (num_anchors, ))
return (img, label_regr, label_class, img_id)
def __len__(self):
return self.num_examples
class DatasetEvalSeq(torch.utils.data.Dataset):
def __init__(self, kitti_data_path, kitti_meta_path, sequence):
self.img_dir = kitti_data_path + "/tracking/training/image_02/" + sequence + "/"
self.label_path = kitti_data_path + "/tracking/training/label_02/" + sequence + ".txt"
self.calib_path = kitti_meta_path + "/tracking/training/calib/" + sequence + ".txt" # NOTE! NOTE! the data format for the calib files was sliightly different for tracking, so I manually modifed the 20 files and saved them in the kitti_meta folder
self.img_height = 375
self.img_width = 1242
self.bbox_encoder = BboxEncoder(img_h=self.img_height, img_w=self.img_width)
self.num_classes = 4 # (car, pedestrian, cyclist, background)
img_ids = []
img_names = os.listdir(self.img_dir)
for img_name in img_names:
img_id = img_name.split(".png")[0]
img_ids.append(img_id)
self.examples = []
for img_id in img_ids:
example = {}
example["img_id"] = img_id
if img_id.lstrip('0') == '':
img_id_float = 0.0
else:
img_id_float = float(img_id.lstrip('0'))
labels = LabelLoader2D3D_sequence(img_id, img_id_float, self.label_path, self.calib_path)
bboxes = np.zeros((len(labels), 4), dtype=np.float32)
class_labels = np.zeros((len(labels), ), dtype=np.float32)
counter = 0
for label in labels:
label_2d = label["label_2D"]
if label_2d["class"] in ["Car", "Pedestrian", "Cyclist"]:
bbox = label_2d["poly"]
u_min = bbox[0, 0] # (left)
u_max = bbox[1, 0] # (rigth)
v_min = bbox[0, 1] # (top)
v_max = bbox[2, 1] # (bottom)
bboxes[counter] = np.array([u_min, u_max, v_min, v_max])
class_labels[counter] = class_string_to_label[label_2d["class"]]
counter += 1
bboxes = bboxes[0:counter]
class_labels = class_labels[0:counter]
example["bboxes"] = bboxes
example["class_labels"] = class_labels
self.examples.append(example)
self.num_examples = len(self.examples)
def __getitem__(self, index):
example = self.examples[index]
img_id = example["img_id"]
img_path = self.img_dir + img_id + ".png"
img = cv2.imread(img_path, -1)
img = cv2.resize(img, (self.img_width, self.img_height)) # (shape: (img_height, img_width, 3))
gt_bboxes = example["bboxes"] # (shape: (num_gt_objects, 4), (x_min, x_max, y_min, y_max))
if gt_bboxes.shape[0] == 0:
return self.__getitem__(index-1)
# # # # # # debug visualization:
# bbox_polys = []
# for i in range(gt_bboxes.shape[0]):
# bbox = gt_bboxes[i]
# bbox_poly = create2Dbbox_poly(bbox)
# bbox_polys.append(bbox_poly)
# img_with_gt_bboxes = draw_2d_polys_no_text(img, bbox_polys)
# cv2.imshow("test", img_with_gt_bboxes)
# cv2.waitKey(0)
# # # # # #
########################################################################
# normalize the img:
########################################################################
img = img/255.0
img = img - np.array([0.485, 0.456, 0.406])
img = img/np.array([0.229, 0.224, 0.225]) # (shape: (img_height, img_width, 3))
img = np.transpose(img, (2, 0, 1)) # (shape: (3, img_height, img_width))
img = img.astype(np.float32)
########################################################################
# get ground truth:
########################################################################
gt_bboxes = torch.from_numpy(gt_bboxes) # (shape: (num_gt_objects, 4), (x_min, x_max, y_min, y_max))
gt_classes = torch.from_numpy(example["class_labels"]) # (shape (num_gt_objects, ))
label_regr, label_class = self.bbox_encoder.encode(gt_bboxes, gt_classes)
# (label_regr is a Tensor of shape: (num_anchors, 4)) (x_resid, y_resid, w_resid, h_resid)
# (label_class is a Tensor of shape: (num_anchors, ))
########################################################################
# convert numpy -> torch:
########################################################################
img = torch.from_numpy(img) # (shape: (3, img_height, img_width))
# (img has shape: (3, img_height, img_width))
# (label_regr has shape: (num_anchors, 4)) (x_resid, y_resid, w_resid, h_resid)
# (label_class has shape: (num_anchors, ))
return (img, label_regr, label_class, img_id)
def __len__(self):
return self.num_examples
class DatasetTest(torch.utils.data.Dataset):
def __init__(self, kitti_data_path, kitti_meta_path):
self.img_dir = kitti_data_path + "/object/testing/image_2/"
self.img_height = 375
self.img_width = 1242
img_ids = []
img_names = os.listdir(self.img_dir)
for img_name in img_names:
img_id = img_name.split(".png")[0]
img_ids.append(img_id)
self.examples = img_ids
self.num_examples = len(self.examples)
def __getitem__(self, index):
img_id = self.examples[index]
img_path = self.img_dir + img_id + ".png"
img = cv2.imread(img_path, -1)
img = cv2.resize(img, (self.img_width, self.img_height)) # (shape: (img_height, img_width, 3))
# # # # # # debug visualization:
# cv2.imshow("test", img)
# cv2.waitKey(0)
# # # # # #
########################################################################
# normalize the img:
########################################################################
img = img/255.0
img = img - np.array([0.485, 0.456, 0.406])
img = img/np.array([0.229, 0.224, 0.225]) # (shape: (img_height, img_width, 3))
img = np.transpose(img, (2, 0, 1)) # (shape: (3, img_height, img_width))
img = img.astype(np.float32)
########################################################################
# convert numpy -> torch:
########################################################################
img = torch.from_numpy(img) # (shape: (3, img_height, img_width))
# (img has shape: (3, img_height, img_width))
return (img, img_id)
def __len__(self):
return self.num_examples
class DatasetTestSeq(torch.utils.data.Dataset):
def __init__(self, kitti_data_path, kitti_meta_path, sequence):
self.img_dir = kitti_data_path + "/tracking/testing/image_02/" + sequence + "/"
self.img_height = 375
self.img_width = 1242
img_ids = []
img_names = os.listdir(self.img_dir)
for img_name in img_names:
img_id = img_name.split(".png")[0]
img_ids.append(img_id)
self.examples = img_ids
self.num_examples = len(self.examples)
def __getitem__(self, index):
img_id = self.examples[index]
img_path = self.img_dir + img_id + ".png"
img = cv2.imread(img_path, -1)
img = cv2.resize(img, (self.img_width, self.img_height)) # (shape: (img_height, img_width, 3))
# # # # # # debug visualization:
# cv2.imshow("test", img)
# cv2.waitKey(0)
# # # # # #
########################################################################
# normalize the img:
########################################################################
img = img/255.0
img = img - np.array([0.485, 0.456, 0.406])
img = img/np.array([0.229, 0.224, 0.225]) # (shape: (img_height, img_width, 3))
img = np.transpose(img, (2, 0, 1)) # (shape: (3, img_height, img_width))
img = img.astype(np.float32)
########################################################################
# convert numpy -> torch:
########################################################################
img = torch.from_numpy(img) # (shape: (3, img_height, img_width))
# (img has shape: (3, img_height, img_width))
return (img, img_id)
def __len__(self):
return self.num_examples
class DatasetThnSeq(torch.utils.data.Dataset):
def __init__(self, thn_data_path):
self.img_dir = thn_data_path + "/"
self.orig_img_height = 512
self.orig_img_width = 1024
self.img_height = 375
self.img_width = 1242
self.examples = []
img_ids = []
img_names = os.listdir(self.img_dir)
for img_name in img_names:
img_id = img_name.split(".png")[0]
img_ids.append(img_id)
self.examples = img_ids
self.num_examples = len(self.examples)
def __getitem__(self, index):
img_id = self.examples[index]
img_path = self.img_dir + img_id + ".png"
img = cv2.imread(img_path, -1)
img = cv2.resize(img, (self.img_width, int((self.img_width/self.orig_img_width)*self.img_height)))
img = img[(int((self.img_width/self.orig_img_width)*self.img_height) - self.img_height):int((self.img_width/self.orig_img_width)*self.img_height)]
########################################################################
# normalize the img:
########################################################################
img = img/255.0
img = img - np.array([0.485, 0.456, 0.406])
img = img/np.array([0.229, 0.224, 0.225]) # (shape: (img_height, img_width, 3))
img = np.transpose(img, (2, 0, 1)) # (shape: (3, img_height, img_width))
img = img.astype(np.float32)
########################################################################
# convert numpy -> torch:
########################################################################
img = torch.from_numpy(img) # (shape: (3, img_height, img_width))
# (img has shape: (3, img_height, img_width))
return (img, img_id)
def __len__(self):
return self.num_examples
class DatasetThnSeqSynscapes(torch.utils.data.Dataset):
def __init__(self, thn_data_path):
self.img_dir = thn_data_path + "/"
self.orig_img_height = 512
self.orig_img_width = 1024
self.img_height = 720
self.img_width = 1440
self.examples = []
img_ids = []
img_names = os.listdir(self.img_dir)
for img_name in img_names:
img_id = img_name.split(".png")[0]
img_ids.append(img_id)
self.examples = img_ids
self.num_examples = len(self.examples)
def __getitem__(self, index):
img_id = self.examples[index]
img_path = self.img_dir + img_id + ".png"
img = cv2.imread(img_path, -1)
img = cv2.resize(img, (self.img_width, self.img_height))
########################################################################
# normalize the img:
########################################################################
img = img/255.0
img = img - np.array([0.485, 0.456, 0.406])
img = img/np.array([0.229, 0.224, 0.225]) # (shape: (img_height, img_width, 3))
img = np.transpose(img, (2, 0, 1)) # (shape: (3, img_height, img_width))
img = img.astype(np.float32)
########################################################################
# convert numpy -> torch:
########################################################################
img = torch.from_numpy(img) # (shape: (3, img_height, img_width))
# (img has shape: (3, img_height, img_width))
return (img, img_id)
def __len__(self):
return self.num_examples
from synscapesloader import LabelLoader2D3D_Synscapes
class_string_to_label_synscapes = {"car": 1,
"person": 2,
"bicyclist": 3} # (background: 0)
class DatasetSynscapesAugmentation(torch.utils.data.Dataset):
def __init__(self, synscapes_path, synscapes_meta_path, type):
self.img_dir = synscapes_path + "/img/rgb/"
self.meta_dir = synscapes_path + "/meta/"
with open(synscapes_meta_path + "/%s_img_ids.pkl" % type, "rb") as file: # (needed for python3)
img_ids = pickle.load(file)
self.orig_img_height = 720
self.orig_img_width = 1440
self.img_height = 375
self.img_width = 1242
self.random_horizontal_flip = RandomHorizontalFlip(p=0.5)
self.random_hsv = RandomHSV(hue=10, saturation=20, brightness=20)
self.random_scale = RandomScale(scale=0.3)
self.random_translate = RandomTranslate(translate=0.2)
self.bbox_encoder = BboxEncoder(img_h=self.img_height, img_w=self.img_width)
self.num_classes = 4 # (background, car, pedestrian, cyclist)
self.examples = []
for img_id in img_ids:
example = {}
example["img_id"] = img_id
labels = LabelLoader2D3D_Synscapes(meta_dir=self.meta_dir, file_id=img_id)
bboxes = np.zeros((len(labels), 4), dtype=np.float32)
class_labels = np.zeros((len(labels), ), dtype=np.float32)
counter = 0
for label in labels:
label_2d = label["label_2D"]
if label_2d["class"] in ["car", "person", "bicyclist"] and label_2d["occluded"] < 0.7 and label_2d["truncated"] < 0.7:
bbox = label_2d["poly"]
u_min = bbox[0, 0] # (left)
u_max = bbox[1, 0] # (rigth)
v_min = bbox[0, 1] # (top)
v_max = bbox[2, 1] # (bottom)
bboxes[counter] = np.array([u_min, u_max, v_min, v_max])
class_labels[counter] = class_string_to_label_synscapes[label_2d["class"]]
counter += 1
bboxes = bboxes[0:counter]
class_labels = class_labels[0:counter]
example["bboxes"] = bboxes
example["class_labels"] = class_labels
self.examples.append(example)
self.num_examples = len(self.examples)
def __getitem__(self, index):
example = self.examples[index]
img_id = example["img_id"]
img_path = self.img_dir + img_id + ".png"
img = cv2.imread(img_path, -1) # (shape: (orig_img_height, orig_img_width, 3))
gt_bboxes_xxyy = example["bboxes"] # (shape: (num_gt_objects, 4), (x_min, x_max, y_min, y_max))
gt_classes = example["class_labels"] # (shape: (num_gt_objects, ))
gt_bboxes_xxyyc = np.zeros((gt_bboxes_xxyy.shape[0], 5), dtype=gt_bboxes_xxyy.dtype) # (shape: (num_gt_objects, 5), (x_min, x_max, y_min, y_max, class_label))
gt_bboxes_xxyyc[:, 0:4] = gt_bboxes_xxyy
gt_bboxes_xxyyc[:, 4] = gt_classes
# # # # # # debug visualization:
# bbox_polys = []
# for i in range(gt_bboxes_xxyyc.shape[0]):
# bbox = gt_bboxes_xxyyc[i, 0:4]
# bbox_poly = create2Dbbox_poly(bbox)
# if gt_bboxes_xxyyc[i, 4] == 1: # (Car)
# bbox_poly["color"] = np.array([255, 0, 0], dtype='float64')
# elif gt_bboxes_xxyyc[i, 4] == 2: # (Pedestrian)
# bbox_poly["color"] = np.array([0, 200, 0], dtype='float64')
# elif gt_bboxes_xxyyc[i, 4] == 3: # (Cyclist)
# bbox_poly["color"] = np.array([0, 0, 255], dtype='float64')
# bbox_polys.append(bbox_poly)
# img_with_gt_bboxes = draw_2d_polys_no_text(img, bbox_polys)
# cv2.imshow("test", img_with_gt_bboxes)
# cv2.waitKey(0)
# # # # # #
scale = float(float(self.img_width)/float(self.orig_img_width))
img = cv2.resize(img, (self.img_width, int(scale*self.orig_img_height))) # (shape: (621, img_width, 3))
gt_bboxes_xxyyc[:, 0:4] = gt_bboxes_xxyyc[:, 0:4]*scale
# # # # # # debug visualization:
# bbox_polys = []
# for i in range(gt_bboxes_xxyyc.shape[0]):
# bbox = gt_bboxes_xxyyc[i, 0:4]
# bbox_poly = create2Dbbox_poly(bbox)
# if gt_bboxes_xxyyc[i, 4] == 1: # (Car)
# bbox_poly["color"] = np.array([255, 0, 0], dtype='float64')
# elif gt_bboxes_xxyyc[i, 4] == 2: # (Pedestrian)
# bbox_poly["color"] = np.array([0, 200, 0], dtype='float64')
# elif gt_bboxes_xxyyc[i, 4] == 3: # (Cyclist)
# bbox_poly["color"] = np.array([0, 0, 255], dtype='float64')
# bbox_polys.append(bbox_poly)
# img_with_gt_bboxes = draw_2d_polys_no_text(img, bbox_polys)
# cv2.imshow("test", img_with_gt_bboxes)
# cv2.waitKey(0)
# # # # # #
start = int(np.random.uniform(low=0, high=(img.shape[0] - self.img_height)))
img = img[start:(start + self.img_height)] # (shape: (img_height, img_width, 3))
gt_bboxes_xxyyc[:, 2:4] = gt_bboxes_xxyyc[:, 2:4] - start
# # # # # # debug visualization:
# bbox_polys = []
# for i in range(gt_bboxes_xxyyc.shape[0]):
# bbox = gt_bboxes_xxyyc[i, 0:4]
# bbox_poly = create2Dbbox_poly(bbox)
# if gt_bboxes_xxyyc[i, 4] == 1: # (Car)
# bbox_poly["color"] = np.array([255, 0, 0], dtype='float64')
# elif gt_bboxes_xxyyc[i, 4] == 2: # (Pedestrian)
# bbox_poly["color"] = np.array([0, 200, 0], dtype='float64')
# elif gt_bboxes_xxyyc[i, 4] == 3: # (Cyclist)
# bbox_poly["color"] = np.array([0, 0, 255], dtype='float64')
# bbox_polys.append(bbox_poly)
# img_with_gt_bboxes = draw_2d_polys_no_text(img, bbox_polys)
# cv2.imshow("test", img_with_gt_bboxes)
# cv2.waitKey(0)
# # # # # #
########################################################################
# data augmentation START:
########################################################################
# # # # # # debug visualization:
# bbox_polys = []
# for i in range(gt_bboxes_xxyyc.shape[0]):
# bbox = gt_bboxes_xxyyc[i, 0:4]
# bbox_poly = create2Dbbox_poly(bbox)
# if gt_bboxes_xxyyc[i, 4] == 1: # (Car)
# bbox_poly["color"] = np.array([255, 0, 0], dtype='float64')
# elif gt_bboxes_xxyyc[i, 4] == 2: # (Pedestrian)
# bbox_poly["color"] = np.array([0, 200, 0], dtype='float64')
# elif gt_bboxes_xxyyc[i, 4] == 3: # (Cyclist)
# bbox_poly["color"] = np.array([0, 0, 255], dtype='float64')
# bbox_polys.append(bbox_poly)
# img_with_gt_bboxes = draw_2d_polys_no_text(img, bbox_polys)
# cv2.imshow("test", img_with_gt_bboxes)
# cv2.waitKey(0)
# # # # # #
# flip the img and the labels with 0.5 probability:
img, gt_bboxes_xyxyc = self.random_horizontal_flip(img, bboxes_xxyyc_2_xyxyc(gt_bboxes_xxyyc))
gt_bboxes_xxyyc = bboxes_xyxyc_2_xxyyc(gt_bboxes_xyxyc)
# # # # # # debug visualization:
# bbox_polys = []
# for i in range(gt_bboxes_xxyyc.shape[0]):
# bbox = gt_bboxes_xxyyc[i, 0:4]
# bbox_poly = create2Dbbox_poly(bbox)
# if gt_bboxes_xxyyc[i, 4] == 1: # (Car)
# bbox_poly["color"] = np.array([255, 0, 0], dtype='float64')
# elif gt_bboxes_xxyyc[i, 4] == 2: # (Pedestrian)
# bbox_poly["color"] = np.array([0, 200, 0], dtype='float64')
# elif gt_bboxes_xxyyc[i, 4] == 3: # (Cyclist)
# bbox_poly["color"] = np.array([0, 0, 255], dtype='float64')
# bbox_polys.append(bbox_poly)
# img_with_gt_bboxes = draw_2d_polys_no_text(img, bbox_polys)
# cv2.imshow("test", img_with_gt_bboxes)
# cv2.waitKey(0)
# # # # # #
# randomly modify the hue, saturation and brightness of the image:
img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
img_hsv, gt_bboxes_xyxyc = self.random_hsv(img_hsv, bboxes_xxyyc_2_xyxyc(gt_bboxes_xxyyc))
img = cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR)
gt_bboxes_xxyyc = bboxes_xyxyc_2_xxyyc(gt_bboxes_xyxyc)
# # # # # # debug visualization:
# bbox_polys = []
# for i in range(gt_bboxes_xxyyc.shape[0]):
# bbox = gt_bboxes_xxyyc[i, 0:4]
# bbox_poly = create2Dbbox_poly(bbox)
# if gt_bboxes_xxyyc[i, 4] == 1: # (Car)
# bbox_poly["color"] = np.array([255, 0, 0], dtype='float64')
# elif gt_bboxes_xxyyc[i, 4] == 2: # (Pedestrian)
# bbox_poly["color"] = np.array([0, 200, 0], dtype='float64')
# elif gt_bboxes_xxyyc[i, 4] == 3: # (Cyclist)
# bbox_poly["color"] = np.array([0, 0, 255], dtype='float64')
# bbox_polys.append(bbox_poly)
# img_with_gt_bboxes = draw_2d_polys_no_text(img, bbox_polys)
# cv2.imshow("test", img_with_gt_bboxes)
# cv2.waitKey(0)
# # # # # #
# scale the image and the labels with a factor drawn from Uniform[1-scale, 1+scale]:
img, gt_bboxes_xyxyc = self.random_scale(img, bboxes_xxyyc_2_xyxyc(gt_bboxes_xxyyc))
gt_bboxes_xxyyc = bboxes_xyxyc_2_xxyyc(gt_bboxes_xyxyc)
# # # # # # debug visualization:
# bbox_polys = []
# for i in range(gt_bboxes_xxyyc.shape[0]):
# bbox = gt_bboxes_xxyyc[i, 0:4]
# bbox_poly = create2Dbbox_poly(bbox)
# if gt_bboxes_xxyyc[i, 4] == 1: # (Car)
# bbox_poly["color"] = np.array([255, 0, 0], dtype='float64')
# elif gt_bboxes_xxyyc[i, 4] == 2: # (Pedestrian)
# bbox_poly["color"] = np.array([0, 200, 0], dtype='float64')
# elif gt_bboxes_xxyyc[i, 4] == 3: # (Cyclist)
# bbox_poly["color"] = np.array([0, 0, 255], dtype='float64')
# bbox_polys.append(bbox_poly)
# img_with_gt_bboxes = draw_2d_polys_no_text(img, bbox_polys)
# cv2.imshow("test", img_with_gt_bboxes)
# cv2.waitKey(0)
# # # # # #
# randomly translate the image and the labels:
img, gt_bboxes_xyxyc = self.random_translate(img, bboxes_xxyyc_2_xyxyc(gt_bboxes_xxyyc))
gt_bboxes_xxyyc = bboxes_xyxyc_2_xxyyc(gt_bboxes_xyxyc)
# # # # # # debug visualization:
# bbox_polys = []
# for i in range(gt_bboxes_xxyyc.shape[0]):
# bbox = gt_bboxes_xxyyc[i, 0:4]
# bbox_poly = create2Dbbox_poly(bbox)
# if gt_bboxes_xxyyc[i, 4] == 1: # (Car)
# bbox_poly["color"] = np.array([255, 0, 0], dtype='float64')
# elif gt_bboxes_xxyyc[i, 4] == 2: # (Pedestrian)
# bbox_poly["color"] = np.array([0, 200, 0], dtype='float64')
# elif gt_bboxes_xxyyc[i, 4] == 3: # (Cyclist)
# bbox_poly["color"] = np.array([0, 0, 255], dtype='float64')
# bbox_polys.append(bbox_poly)
# img_with_gt_bboxes = draw_2d_polys_no_text(img, bbox_polys)
# cv2.imshow("test", img_with_gt_bboxes)
# cv2.waitKey(0)
# # # # # #
########################################################################
# data augmentation END:
########################################################################
########################################################################
# normalize the img:
########################################################################
img = img/255.0
img = img - np.array([0.485, 0.456, 0.406])
img = img/np.array([0.229, 0.224, 0.225]) # (shape: (img_height, img_width, 3))
img = np.transpose(img, (2, 0, 1)) # (shape: (3, img_height, img_width))
img = img.astype(np.float32)
########################################################################
# get ground truth:
########################################################################
gt_bboxes_xxyy = gt_bboxes_xxyyc[:, 0:4] # (shape: (num_gt_objects, 4), (x_min, x_max, y_min, y_max))
gt_classes = gt_bboxes_xxyyc[:, 4] # (shape (num_gt_objects, ))
gt_bboxes_xxyy = torch.from_numpy(gt_bboxes_xxyy) # (shape: (num_gt_objects, 4), (x_min, x_max, y_min, y_max))
gt_classes = torch.from_numpy(gt_classes) # (shape (num_gt_objects, ))
if gt_bboxes_xxyy.size(0) == 0: # (if 0 gt objects)
return self.__getitem__(index+1)
if gt_bboxes_xxyy.size() == torch.Size([4]): # (if 1 gt object)
gt_bboxes_xxyy = gt_bboxes_xxyy.unsqueeze(0)
gt_classes = torch.from_numpy(np.array([gt_classes.data]))
label_regr, label_class = self.bbox_encoder.encode(gt_bboxes_xxyy, gt_classes)
# (label_regr is a Tensor of shape: (num_anchors, 4)) (x_resid, y_resid, w_resid, h_resid)
# (label_class is a Tensor of shape: (num_anchors, ))
########################################################################
# convert numpy -> torch:
########################################################################
img = torch.from_numpy(img) # (shape: (3, img_height, img_width))
# (img has shape: (3, img_height, img_width))
# (label_regr has shape: (num_anchors, 4)) (x_resid, y_resid, w_resid, h_resid)
# (label_class has shape: (num_anchors, ))
return (img, label_regr, label_class)
def __len__(self):
return self.num_examples
# test = DatasetSynscapesAugmentation("/home/fregu856/data/synscapes", "", "")
# for i in range(10):
# _ = test.__getitem__(i)
class DatasetKITTISynscapesAugmentation(torch.utils.data.Dataset):
def __init__(self, synscapes_path, synscapes_meta_path, kitti_data_path, kitti_meta_path, type):
self.synscapes_img_dir = synscapes_path + "/img/rgb/"
self.synscapes_meta_dir = synscapes_path + "/meta/"
with open(synscapes_meta_path + "/%s_img_ids.pkl" % type, "rb") as file: # (needed for python3)
synscapes_img_ids = pickle.load(file)
self.kitti_img_dir = kitti_data_path + "/object/training/image_2/"
self.kitti_label_dir = kitti_data_path + "/object/training/label_2/"
self.kitti_calib_dir = kitti_data_path + "/object/training/calib/"
self.kitti_lidar_dir = kitti_data_path + "/object/training/velodyne/"
with open(kitti_meta_path + "/%s_img_ids.pkl" % type, "rb") as file: # (needed for python3)
kitti_img_ids = pickle.load(file)
num_kitti_imgs = len(kitti_img_ids)
synscapes_img_ids = synscapes_img_ids[0:num_kitti_imgs]
self.synscapes_img_height = 720
self.synscapes_img_width = 1440
self.img_height = 375
self.img_width = 1242
self.random_horizontal_flip = RandomHorizontalFlip(p=0.5)
self.random_hsv = RandomHSV(hue=10, saturation=20, brightness=20)
self.random_scale = RandomScale(scale=0.3)
self.random_translate = RandomTranslate(translate=0.2)
self.bbox_encoder = BboxEncoder(img_h=self.img_height, img_w=self.img_width)
self.num_classes = 4 # (background, car, pedestrian, cyclist)
self.examples = []
for img_id in synscapes_img_ids:
example = {}
example["dataset"] = "synscapes"
example["img_id"] = img_id
labels = LabelLoader2D3D_Synscapes(meta_dir=self.synscapes_meta_dir, file_id=img_id)
bboxes = np.zeros((len(labels), 4), dtype=np.float32)
class_labels = np.zeros((len(labels), ), dtype=np.float32)
counter = 0
for label in labels:
label_2d = label["label_2D"]
if label_2d["class"] in ["car", "person", "bicyclist"] and label_2d["occluded"] < 0.7 and label_2d["truncated"] < 0.7:
bbox = label_2d["poly"]
u_min = bbox[0, 0] # (left)
u_max = bbox[1, 0] # (rigth)
v_min = bbox[0, 1] # (top)
v_max = bbox[2, 1] # (bottom)
bboxes[counter] = np.array([u_min, u_max, v_min, v_max])
class_labels[counter] = class_string_to_label_synscapes[label_2d["class"]]
counter += 1
bboxes = bboxes[0:counter]
class_labels = class_labels[0:counter]
example["bboxes"] = bboxes
example["class_labels"] = class_labels
self.examples.append(example)
for img_id in kitti_img_ids:
example = {}
example["dataset"] = "kitti"
example["img_id"] = img_id
labels = LabelLoader2D3D(img_id, self.kitti_label_dir, ".txt", self.kitti_calib_dir, ".txt")
bboxes = np.zeros((len(labels), 4), dtype=np.float32)
class_labels = np.zeros((len(labels), ), dtype=np.float32)
counter = 0
for label in labels:
label_2d = label["label_2D"]
if label_2d["class"] in ["Car", "Pedestrian", "Cyclist"]:
bbox = label_2d["poly"]
u_min = bbox[0, 0] # (left)
u_max = bbox[1, 0] # (rigth)
v_min = bbox[0, 1] # (top)
v_max = bbox[2, 1] # (bottom)
bboxes[counter] = np.array([u_min, u_max, v_min, v_max])
class_labels[counter] = class_string_to_label[label_2d["class"]]
counter += 1
bboxes = bboxes[0:counter]
class_labels = class_labels[0:counter]
example["bboxes"] = bboxes
example["class_labels"] = class_labels
self.examples.append(example)
self.num_examples = len(self.examples)
def __getitem__(self, index):
example = self.examples[index]
if example["dataset"] == "synscapes":
img_id = example["img_id"]
img_path = self.synscapes_img_dir + img_id + ".png"
img = cv2.imread(img_path, -1) # (shape: (orig_img_height, orig_img_width, 3))
gt_bboxes_xxyy = example["bboxes"] # (shape: (num_gt_objects, 4), (x_min, x_max, y_min, y_max))
gt_classes = example["class_labels"] # (shape: (num_gt_objects, ))
if gt_classes.shape[0] == 0:
return self.__getitem__(0)
gt_bboxes_xxyyc = np.zeros((gt_bboxes_xxyy.shape[0], 5), dtype=gt_bboxes_xxyy.dtype) # (shape: (num_gt_objects, 5), (x_min, x_max, y_min, y_max, class_label))
gt_bboxes_xxyyc[:, 0:4] = gt_bboxes_xxyy
gt_bboxes_xxyyc[:, 4] = gt_classes
scale = float(float(self.img_width)/float(self.synscapes_img_width))
img = cv2.resize(img, (self.img_width, int(scale*self.synscapes_img_height))) # (shape: (621, img_width, 3))
gt_bboxes_xxyyc[:, 0:4] = gt_bboxes_xxyyc[:, 0:4]*scale
start = int(np.random.uniform(low=0, high=(img.shape[0] - self.img_height)))
img = img[start:(start + self.img_height)] # (shape: (img_height, img_width, 3))
gt_bboxes_xxyyc[:, 2:4] = gt_bboxes_xxyyc[:, 2:4] - start
# # # # # # debug visualization:
# bbox_polys = []
# for i in range(gt_bboxes_xxyyc.shape[0]):
# bbox = gt_bboxes_xxyyc[i, 0:4]
# bbox_poly = create2Dbbox_poly(bbox)
# if gt_bboxes_xxyyc[i, 4] == 1: # (Car)
# bbox_poly["color"] = np.array([255, 0, 0], dtype='float64')
# elif gt_bboxes_xxyyc[i, 4] == 2: # (Pedestrian)
# bbox_poly["color"] = np.array([0, 200, 0], dtype='float64')
# elif gt_bboxes_xxyyc[i, 4] == 3: # (Cyclist)
# bbox_poly["color"] = np.array([0, 0, 255], dtype='float64')
# bbox_polys.append(bbox_poly)
# img_with_gt_bboxes = draw_2d_polys_no_text(img, bbox_polys)
# cv2.imshow("test", img_with_gt_bboxes)
# cv2.waitKey(0)
# # # # # #
elif example["dataset"] == "kitti":
img_id = example["img_id"]
img_path = self.kitti_img_dir + img_id + ".png"
img = cv2.imread(img_path, -1)
img = cv2.resize(img, (self.img_width, self.img_height)) # (shape: (img_height, img_width, 3))
gt_bboxes_xxyy = example["bboxes"] # (shape: (num_gt_objects, 4), (x_min, x_max, y_min, y_max))
gt_classes = example["class_labels"] # (shape: (num_gt_objects, ))
if gt_classes.shape[0] == 0:
return self.__getitem__(0)
gt_bboxes_xxyyc = np.zeros((gt_bboxes_xxyy.shape[0], 5), dtype=gt_bboxes_xxyy.dtype) # (shape: (num_gt_objects, 5), (x_min, x_max, y_min, y_max, class_label))
gt_bboxes_xxyyc[:, 0:4] = gt_bboxes_xxyy
gt_bboxes_xxyyc[:, 4] = gt_classes
########################################################################
# data augmentation START:
########################################################################
# # # # # # debug visualization:
# bbox_polys = []
# for i in range(gt_bboxes_xxyyc.shape[0]):
# bbox = gt_bboxes_xxyyc[i, 0:4]
# bbox_poly = create2Dbbox_poly(bbox)
# if gt_bboxes_xxyyc[i, 4] == 1: # (Car)
# bbox_poly["color"] = np.array([255, 0, 0], dtype='float64')
# elif gt_bboxes_xxyyc[i, 4] == 2: # (Pedestrian)
# bbox_poly["color"] = np.array([0, 200, 0], dtype='float64')
# elif gt_bboxes_xxyyc[i, 4] == 3: # (Cyclist)
# bbox_poly["color"] = np.array([0, 0, 255], dtype='float64')
# bbox_polys.append(bbox_poly)
# img_with_gt_bboxes = draw_2d_polys_no_text(img, bbox_polys)
# cv2.imshow("test", img_with_gt_bboxes)
# cv2.waitKey(0)
# # # # # #
# flip the img and the labels with 0.5 probability:
img, gt_bboxes_xyxyc = self.random_horizontal_flip(img, bboxes_xxyyc_2_xyxyc(gt_bboxes_xxyyc))
gt_bboxes_xxyyc = bboxes_xyxyc_2_xxyyc(gt_bboxes_xyxyc)
# # # # # # debug visualization:
# bbox_polys = []
# for i in range(gt_bboxes_xxyyc.shape[0]):
# bbox = gt_bboxes_xxyyc[i, 0:4]
# bbox_poly = create2Dbbox_poly(bbox)
# if gt_bboxes_xxyyc[i, 4] == 1: # (Car)
# bbox_poly["color"] = np.array([255, 0, 0], dtype='float64')
# elif gt_bboxes_xxyyc[i, 4] == 2: # (Pedestrian)
# bbox_poly["color"] = np.array([0, 200, 0], dtype='float64')
# elif gt_bboxes_xxyyc[i, 4] == 3: # (Cyclist)
# bbox_poly["color"] = np.array([0, 0, 255], dtype='float64')
# bbox_polys.append(bbox_poly)
# img_with_gt_bboxes = draw_2d_polys_no_text(img, bbox_polys)
# cv2.imshow("test", img_with_gt_bboxes)
# cv2.waitKey(0)
# # # # # #
# randomly modify the hue, saturation and brightness of the image:
img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
img_hsv, gt_bboxes_xyxyc = self.random_hsv(img_hsv, bboxes_xxyyc_2_xyxyc(gt_bboxes_xxyyc))
img = cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR)
gt_bboxes_xxyyc = bboxes_xyxyc_2_xxyyc(gt_bboxes_xyxyc)
# # # # # # debug visualization:
# bbox_polys = []
# for i in range(gt_bboxes_xxyyc.shape[0]):
# bbox = gt_bboxes_xxyyc[i, 0:4]
# bbox_poly = create2Dbbox_poly(bbox)
# if gt_bboxes_xxyyc[i, 4] == 1: # (Car)
# bbox_poly["color"] = np.array([255, 0, 0], dtype='float64')
# elif gt_bboxes_xxyyc[i, 4] == 2: # (Pedestrian)
# bbox_poly["color"] = np.array([0, 200, 0], dtype='float64')
# elif gt_bboxes_xxyyc[i, 4] == 3: # (Cyclist)
# bbox_poly["color"] = np.array([0, 0, 255], dtype='float64')
# bbox_polys.append(bbox_poly)
# img_with_gt_bboxes = draw_2d_polys_no_text(img, bbox_polys)
# cv2.imshow("test", img_with_gt_bboxes)
# cv2.waitKey(0)
# # # # # #
# scale the image and the labels with a factor drawn from Uniform[1-scale, 1+scale]:
img, gt_bboxes_xyxyc = self.random_scale(img, bboxes_xxyyc_2_xyxyc(gt_bboxes_xxyyc))
gt_bboxes_xxyyc = bboxes_xyxyc_2_xxyyc(gt_bboxes_xyxyc)
# # # # # # debug visualization:
# bbox_polys = []
# for i in range(gt_bboxes_xxyyc.shape[0]):
# bbox = gt_bboxes_xxyyc[i, 0:4]
# bbox_poly = create2Dbbox_poly(bbox)
# if gt_bboxes_xxyyc[i, 4] == 1: # (Car)
# bbox_poly["color"] = np.array([255, 0, 0], dtype='float64')
# elif gt_bboxes_xxyyc[i, 4] == 2: # (Pedestrian)
# bbox_poly["color"] = np.array([0, 200, 0], dtype='float64')
# elif gt_bboxes_xxyyc[i, 4] == 3: # (Cyclist)
# bbox_poly["color"] = np.array([0, 0, 255], dtype='float64')
# bbox_polys.append(bbox_poly)
# img_with_gt_bboxes = draw_2d_polys_no_text(img, bbox_polys)
# cv2.imshow("test", img_with_gt_bboxes)
# cv2.waitKey(0)
# # # # # #
# randomly translate the image and the labels:
img, gt_bboxes_xyxyc = self.random_translate(img, bboxes_xxyyc_2_xyxyc(gt_bboxes_xxyyc))
gt_bboxes_xxyyc = bboxes_xyxyc_2_xxyyc(gt_bboxes_xyxyc)
# # # # # # debug visualization:
# bbox_polys = []
# for i in range(gt_bboxes_xxyyc.shape[0]):
# bbox = gt_bboxes_xxyyc[i, 0:4]
# bbox_poly = create2Dbbox_poly(bbox)
# if gt_bboxes_xxyyc[i, 4] == 1: # (Car)
# bbox_poly["color"] = np.array([255, 0, 0], dtype='float64')
# elif gt_bboxes_xxyyc[i, 4] == 2: # (Pedestrian)
# bbox_poly["color"] = np.array([0, 200, 0], dtype='float64')
# elif gt_bboxes_xxyyc[i, 4] == 3: # (Cyclist)
# bbox_poly["color"] = np.array([0, 0, 255], dtype='float64')
# bbox_polys.append(bbox_poly)
# img_with_gt_bboxes = draw_2d_polys_no_text(img, bbox_polys)
# cv2.imshow("test", img_with_gt_bboxes)
# cv2.waitKey(0)
# # # # # #
########################################################################
# data augmentation END:
########################################################################
########################################################################
# normalize the img:
########################################################################
img = img/255.0
img = img - np.array([0.485, 0.456, 0.406])
img = img/np.array([0.229, 0.224, 0.225]) # (shape: (img_height, img_width, 3))
img = np.transpose(img, (2, 0, 1)) # (shape: (3, img_height, img_width))
img = img.astype(np.float32)
########################################################################
# get ground truth:
########################################################################
gt_bboxes_xxyy = gt_bboxes_xxyyc[:, 0:4] # (shape: (num_gt_objects, 4), (x_min, x_max, y_min, y_max))
gt_classes = gt_bboxes_xxyyc[:, 4] # (shape (num_gt_objects, ))
gt_bboxes_xxyy = torch.from_numpy(gt_bboxes_xxyy) # (shape: (num_gt_objects, 4), (x_min, x_max, y_min, y_max))
gt_classes = torch.from_numpy(gt_classes) # (shape (num_gt_objects, ))
if gt_bboxes_xxyy.size(0) == 0: # (if 0 gt objects)
return self.__getitem__(index+1)
if gt_bboxes_xxyy.size() == torch.Size([4]): # (if 1 gt object)
gt_bboxes_xxyy = gt_bboxes_xxyy.unsqueeze(0)
gt_classes = torch.from_numpy(np.array([gt_classes.data]))
label_regr, label_class = self.bbox_encoder.encode(gt_bboxes_xxyy, gt_classes)
# (label_regr is a Tensor of shape: (num_anchors, 4)) (x_resid, y_resid, w_resid, h_resid)
# (label_class is a Tensor of shape: (num_anchors, ))
########################################################################
# convert numpy -> torch:
########################################################################
img = torch.from_numpy(img) # (shape: (3, img_height, img_width))
# (img has shape: (3, img_height, img_width))
# (label_regr has shape: (num_anchors, 4)) (x_resid, y_resid, w_resid, h_resid)
# (label_class has shape: (num_anchors, ))
return (img, label_regr, label_class)
def __len__(self):
return self.num_examples
class DatasetSynscapesEval(torch.utils.data.Dataset):
def __init__(self, synscapes_path, synscapes_meta_path, type):
self.img_dir = synscapes_path + "/img/rgb/"
self.meta_dir = synscapes_path + "/meta/"
with open(synscapes_meta_path + "/%s_img_ids.pkl" % type, "rb") as file: # (needed for python3)
img_ids = pickle.load(file)
self.orig_img_height = 720
self.orig_img_width = 1440
self.img_height = 375
self.img_width = 1242
self.bbox_encoder = BboxEncoder(img_h=self.img_height, img_w=self.img_width)
self.num_classes = 4 # (background, car, pedestrian, cyclist)
self.examples = []
for img_id in img_ids:
example = {}
example["img_id"] = img_id
labels = LabelLoader2D3D_Synscapes(meta_dir=self.meta_dir, file_id=img_id)
bboxes = np.zeros((len(labels), 4), dtype=np.float32)
class_labels = np.zeros((len(labels), ), dtype=np.float32)
counter = 0
for label in labels:
label_2d = label["label_2D"]
if label_2d["class"] in ["car", "person", "bicyclist"] and label_2d["occluded"] < 0.7 and label_2d["truncated"] < 0.7:
bbox = label_2d["poly"]
u_min = bbox[0, 0] # (left)
u_max = bbox[1, 0] # (rigth)
v_min = bbox[0, 1] # (top)
v_max = bbox[2, 1] # (bottom)
bboxes[counter] = np.array([u_min, u_max, v_min, v_max])
class_labels[counter] = class_string_to_label_synscapes[label_2d["class"]]
counter += 1
bboxes = bboxes[0:counter]
class_labels = class_labels[0:counter]
example["bboxes"] = bboxes
example["class_labels"] = class_labels
self.examples.append(example)
self.num_examples = len(self.examples)
def __getitem__(self, index):
example = self.examples[index]
img_id = example["img_id"]
img_path = self.img_dir + img_id + ".png"
img = cv2.imread(img_path, -1) # (shape: (orig_img_height, orig_img_width, 3))
gt_bboxes_xxyy = example["bboxes"] # (shape: (num_gt_objects, 4), (x_min, x_max, y_min, y_max))
gt_classes = example["class_labels"] # (shape: (num_gt_objects, ))
gt_bboxes_xxyyc = np.zeros((gt_bboxes_xxyy.shape[0], 5), dtype=gt_bboxes_xxyy.dtype) # (shape: (num_gt_objects, 5), (x_min, x_max, y_min, y_max, class_label))
gt_bboxes_xxyyc[:, 0:4] = gt_bboxes_xxyy
gt_bboxes_xxyyc[:, 4] = gt_classes
scale = float(float(self.img_width)/float(self.orig_img_width))
img = cv2.resize(img, (self.img_width, int(scale*self.orig_img_height))) # (shape: (621, img_width, 3))
gt_bboxes_xxyyc[:, 0:4] = gt_bboxes_xxyyc[:, 0:4]*scale
start = img.shape[0] - self.img_height
img = img[start:(start + self.img_height)] # (shape: (img_height, img_width, 3))
gt_bboxes_xxyyc[:, 2:4] = gt_bboxes_xxyyc[:, 2:4] - start
########################################################################
# normalize the img:
########################################################################
img = img/255.0
img = img - np.array([0.485, 0.456, 0.406])
img = img/np.array([0.229, 0.224, 0.225]) # (shape: (img_height, img_width, 3))
img = np.transpose(img, (2, 0, 1)) # (shape: (3, img_height, img_width))
img = img.astype(np.float32)
########################################################################
# get ground truth:
########################################################################
gt_bboxes_xxyy = gt_bboxes_xxyyc[:, 0:4] # (shape: (num_gt_objects, 4), (x_min, x_max, y_min, y_max))
gt_classes = gt_bboxes_xxyyc[:, 4] # (shape (num_gt_objects, ))
gt_bboxes_xxyy = torch.from_numpy(gt_bboxes_xxyy) # (shape: (num_gt_objects, 4), (x_min, x_max, y_min, y_max))
gt_classes = torch.from_numpy(gt_classes) # (shape (num_gt_objects, ))
if gt_bboxes_xxyy.size(0) == 0: # (if 0 gt objects)
return self.__getitem__(index+1)
if gt_bboxes_xxyy.size() == torch.Size([4]): # (if 1 gt object)
gt_bboxes_xxyy = gt_bboxes_xxyy.unsqueeze(0)
gt_classes = torch.from_numpy(np.array([gt_classes.data]))
label_regr, label_class = self.bbox_encoder.encode(gt_bboxes_xxyy, gt_classes)
# (label_regr is a Tensor of shape: (num_anchors, 4)) (x_resid, y_resid, w_resid, h_resid)
# (label_class is a Tensor of shape: (num_anchors, ))
########################################################################
# convert numpy -> torch:
########################################################################
img = torch.from_numpy(img) # (shape: (3, img_height, img_width))
# (img has shape: (3, img_height, img_width))
# (label_regr has shape: (num_anchors, 4)) (x_resid, y_resid, w_resid, h_resid)
# (label_class has shape: (num_anchors, ))
return (img, label_regr, label_class, img_id)
def __len__(self):
return self.num_examples
class DatasetSynscapesEvalFullSize(torch.utils.data.Dataset):
def __init__(self, synscapes_path, synscapes_meta_path, type):
self.img_dir = synscapes_path + "/img/rgb/"
self.meta_dir = synscapes_path + "/meta/"
with open(synscapes_meta_path + "/%s_img_ids.pkl" % type, "rb") as file: # (needed for python3)
img_ids = pickle.load(file)
self.img_height = 720
self.img_width = 1440
self.bbox_encoder = BboxEncoder(img_h=self.img_height, img_w=self.img_width)
self.num_classes = 4 # (background, car, pedestrian, cyclist)
self.examples = []
for img_id in img_ids:
example = {}
example["img_id"] = img_id
labels = LabelLoader2D3D_Synscapes(meta_dir=self.meta_dir, file_id=img_id)
bboxes = np.zeros((len(labels), 4), dtype=np.float32)
class_labels = np.zeros((len(labels), ), dtype=np.float32)
counter = 0
for label in labels:
label_2d = label["label_2D"]
if label_2d["class"] in ["car", "person", "bicyclist"] and label_2d["occluded"] < 0.7 and label_2d["truncated"] < 0.7:
bbox = label_2d["poly"]
u_min = bbox[0, 0] # (left)
u_max = bbox[1, 0] # (rigth)
v_min = bbox[0, 1] # (top)
v_max = bbox[2, 1] # (bottom)
bboxes[counter] = np.array([u_min, u_max, v_min, v_max])
class_labels[counter] = class_string_to_label_synscapes[label_2d["class"]]
counter += 1
bboxes = bboxes[0:counter]
class_labels = class_labels[0:counter]
example["bboxes"] = bboxes
example["class_labels"] = class_labels
self.examples.append(example)
self.num_examples = len(self.examples)
def __getitem__(self, index):
example = self.examples[index]
img_id = example["img_id"]
img_path = self.img_dir + img_id + ".png"
img = cv2.imread(img_path, -1) # (shape: (orig_img_height, orig_img_width, 3))
gt_bboxes_xxyy = example["bboxes"] # (shape: (num_gt_objects, 4), (x_min, x_max, y_min, y_max))
gt_classes = example["class_labels"] # (shape: (num_gt_objects, ))
gt_bboxes_xxyyc = np.zeros((gt_bboxes_xxyy.shape[0], 5), dtype=gt_bboxes_xxyy.dtype) # (shape: (num_gt_objects, 5), (x_min, x_max, y_min, y_max, class_label))
gt_bboxes_xxyyc[:, 0:4] = gt_bboxes_xxyy
gt_bboxes_xxyyc[:, 4] = gt_classes
########################################################################
# normalize the img:
########################################################################
img = img/255.0
img = img - np.array([0.485, 0.456, 0.406])
img = img/np.array([0.229, 0.224, 0.225]) # (shape: (img_height, img_width, 3))
img = np.transpose(img, (2, 0, 1)) # (shape: (3, img_height, img_width))
img = img.astype(np.float32)
########################################################################
# get ground truth:
########################################################################
gt_bboxes_xxyy = gt_bboxes_xxyyc[:, 0:4] # (shape: (num_gt_objects, 4), (x_min, x_max, y_min, y_max))
gt_classes = gt_bboxes_xxyyc[:, 4] # (shape (num_gt_objects, ))
gt_bboxes_xxyy = torch.from_numpy(gt_bboxes_xxyy) # (shape: (num_gt_objects, 4), (x_min, x_max, y_min, y_max))
gt_classes = torch.from_numpy(gt_classes) # (shape (num_gt_objects, ))
if gt_bboxes_xxyy.size(0) == 0: # (if 0 gt objects)
return self.__getitem__(index+1)
if gt_bboxes_xxyy.size() == torch.Size([4]): # (if 1 gt object)
gt_bboxes_xxyy = gt_bboxes_xxyy.unsqueeze(0)
gt_classes = torch.from_numpy(np.array([gt_classes.data]))
label_regr, label_class = self.bbox_encoder.encode(gt_bboxes_xxyy, gt_classes)
# (label_regr is a Tensor of shape: (num_anchors, 4)) (x_resid, y_resid, w_resid, h_resid)
# (label_class is a Tensor of shape: (num_anchors, ))
########################################################################
# convert numpy -> torch:
########################################################################
img = torch.from_numpy(img) # (shape: (3, img_height, img_width))
# (img has shape: (3, img_height, img_width))
# (label_regr has shape: (num_anchors, 4)) (x_resid, y_resid, w_resid, h_resid)
# (label_class has shape: (num_anchors, ))
return (img, label_regr, label_class, img_id)
def __len__(self):
return self.num_examples
def ProjectTo2Dbbox(bbox_3D, cam_intrinsic, R):
# (bbox_3D is an array of shape: (6, ), (x, y, z, h, w, l) in cam coordinates)
# (cam_intrinsic is an array of shape: (3, 3))
x = bbox_3D[0]
y = bbox_3D[1]
z = bbox_3D[2]
h = bbox_3D[3]
w = bbox_3D[4]
l = bbox_3D[5]
# 3D bounding box corners. (Convention: x points forward, y to the left, z up.)
x_corners = (l/2.0)*np.array([1, 1, 1, 1, -1, -1, -1, -1])
y_corners = (w/2.0)*np.array([1, -1, -1, 1, 1, -1, -1, 1])
z_corners = (h/2.0)*np.array([1, 1, -1, -1, 1, 1, -1, -1])
corners = np.vstack((x_corners, y_corners, z_corners))
# Rotate
corners = np.dot(R, corners)
# Translate
corners[0, :] = corners[0, :] + x
corners[1, :] = corners[1, :] + y
corners[2, :] = corners[2, :] + z
viewpad = np.eye(4)
viewpad[:cam_intrinsic.shape[0], :cam_intrinsic.shape[1]] = cam_intrinsic
points = corners
nbr_points = points.shape[1]
# Do operation in homogenous coordinates
points = np.concatenate((points, np.ones((1, nbr_points))))
points = np.dot(viewpad, points)
points = points[:3, :]
points = points/points[2:3, :].repeat(3, 0).reshape(3, nbr_points)
points = points.T
u_min = np.min(points[:, 0])
v_min = np.min(points[:, 1])
u_max = np.max(points[:, 0])
v_max = np.max(points[:, 1])
left = int(u_min)
top = int(v_min)
right = int(u_max)
bottom = int(v_max)
projected_2Dbbox = [left, top, right, bottom]
return projected_2Dbbox
def wrapToPi(a):
return (a + np.pi) % (2*np.pi) - np.pi
sys.path.append("/root/project1/nuscenes-devkit/python-sdk")
sys.path.append("/home/fregu856/project1/nuscenes-devkit/python-sdk")
from nuscenes_utils.nuscenes import NuScenes
from nuscenes_utils.data_classes import PointCloud as NuScenesPointCloud
from pyquaternion import Quaternion
class_string_to_label_nuscenes = {"vehicle.car": 1,
"human.pedestrian.adult": 2,
"human.pedestrian.child": 2,
"human.pedestrian.police_officer": 2,
"human.pedestrian.construction_worker": 2,
"vehicle.bicycle": 3} # (background: 0)
class DatasetKITTINuscenesAugmentation(torch.utils.data.Dataset):
def __init__(self, nuscenes_data_path, kitti_data_path, kitti_meta_path, kitti_type):
self.nuscenes_data_path = nuscenes_data_path
self.nusc = NuScenes(version="v0.1", dataroot=self.nuscenes_data_path, verbose=False)
self.kitti_img_dir = kitti_data_path + "/object/training/image_2/"
self.kitti_label_dir = kitti_data_path + "/object/training/label_2/"
self.kitti_calib_dir = kitti_data_path + "/object/training/calib/"
self.kitti_lidar_dir = kitti_data_path + "/object/training/velodyne/"
with open(kitti_meta_path + "/%s_img_ids.pkl" % kitti_type, "rb") as file: # (needed for python3)
kitti_img_ids = pickle.load(file)
self.img_height = 375
self.img_width = 1242
self.nuscenes_img_height = 900
self.nuscenes_img_width = 1600
self.random_horizontal_flip = RandomHorizontalFlip(p=0.5)
self.random_hsv = RandomHSV(hue=10, saturation=20, brightness=20)
self.random_scale = RandomScale(scale=0.3)
self.random_translate = RandomTranslate(translate=0.2)
self.bbox_encoder = BboxEncoder(img_h=self.img_height, img_w=self.img_width)
self.num_classes = 4 # (car, pedestrian, cyclist, background)
self.examples = []
samples = self.nusc.sample
for sample in samples:
example = {}
example["dataset"] = "nuscenes"
example["sample_token"] = sample["token"]
bboxes = []
class_labels = []
cam_front_token = sample["data"]["CAM_FRONT"]
cam_front = self.nusc.get("sample_data", cam_front_token)
sd_record = cam_front
pose_record = self.nusc.get('ego_pose', sd_record['ego_pose_token'])
cs_record = self.nusc.get('calibrated_sensor', cam_front['calibrated_sensor_token'])
cam_front_intrinsic = np.array(cs_record['camera_intrinsic']) # (shape: (3, 3))
annotation_tokens = sample["anns"]
for annotation_token in annotation_tokens:
annotation = self.nusc.get("sample_annotation", annotation_token)
if annotation["category_name"] in ["vehicle.car", "human.pedestrian.adult", "human.pedestrian.police_officer", "human.pedestrian.child", "human.pedestrian.construction_worker", "vehicle.bicycle"]:
if int(annotation["visibility_token"]) > 1:
translation = annotation["translation"] # (X, Y, Z) (global frame)
r_y_quat = Quaternion(annotation["rotation"])
# transform from global frame into the ego vehicle frame for the timestamp of the front-camera image:
translation = translation - np.array(pose_record['translation'])
translation = np.dot(Quaternion(pose_record['rotation']).inverse.rotation_matrix, translation)
r_y_quat = Quaternion(pose_record['rotation']).inverse*r_y_quat
# transform into the front-camera frame (same as the point cloud is transformed into):
translation = translation - np.array(cs_record['translation'])
translation = np.dot(Quaternion(cs_record['rotation']).inverse.rotation_matrix, translation)
r_y_quat = Quaternion(cs_record['rotation']).inverse*r_y_quat
size = annotation["size"] # (w, l, h)
bbox_3D = np.array([translation[0], translation[1], translation[2], size[2], size[0], size[1]]) # (x, y, z, h, w, l)
R = r_y_quat.rotation_matrix
if bbox_3D[2] > 2.0: # (remove all bboxes which are located behind the camera)
bbox_xyxy = ProjectTo2Dbbox(bbox_3D, cam_front_intrinsic, R) # (x_min, y_min, x_max, y_max)
bbox = [bbox_xyxy[0], bbox_xyxy[2], bbox_xyxy[1], bbox_xyxy[3]] # (x_min, x_max, y_min, y_max)
bboxes.append(bbox)
class_labels.append(class_string_to_label_nuscenes[annotation["category_name"]])
example["bboxes"] = np.array(bboxes, dtype=np.float32)
example["class_labels"] = np.array(class_labels, dtype=np.float32)
self.examples.append(example)
for img_id in kitti_img_ids:
example = {}
example["dataset"] = "kitti"
example["img_id"] = img_id
labels = LabelLoader2D3D(img_id, self.kitti_label_dir, ".txt", self.kitti_calib_dir, ".txt")
bboxes = np.zeros((len(labels), 4), dtype=np.float32)
class_labels = np.zeros((len(labels), ), dtype=np.float32)
counter = 0
for label in labels:
label_2d = label["label_2D"]
if label_2d["class"] in ["Car", "Pedestrian", "Cyclist"]:
bbox = label_2d["poly"]
u_min = bbox[0, 0] # (left)
u_max = bbox[1, 0] # (rigth)
v_min = bbox[0, 1] # (top)
v_max = bbox[2, 1] # (bottom)
bboxes[counter] = np.array([u_min, u_max, v_min, v_max])
class_labels[counter] = class_string_to_label[label_2d["class"]]
counter += 1
bboxes = bboxes[0:counter]
class_labels = class_labels[0:counter]
example["bboxes"] = bboxes
example["class_labels"] = class_labels
self.examples.append(example)
self.num_examples = len(self.examples)
def __getitem__(self, index):
example = self.examples[index]
if example["dataset"] == "kitti":
img_id = example["img_id"]
img_path = self.kitti_img_dir + img_id + ".png"
img = cv2.imread(img_path, -1)
img = cv2.resize(img, (self.img_width, self.img_height)) # (shape: (img_height, img_width, 3))
gt_bboxes_xxyy = example["bboxes"] # (shape: (num_gt_objects, 4), (x_min, x_max, y_min, y_max))
gt_classes = example["class_labels"] # (shape: (num_gt_objects, ))
if gt_classes.shape[0] == 0:
return self.__getitem__(0)
gt_bboxes_xxyyc = np.zeros((gt_bboxes_xxyy.shape[0], 5), dtype=gt_bboxes_xxyy.dtype) # (shape: (num_gt_objects, 5), (x_min, x_max, y_min, y_max, class_label))
gt_bboxes_xxyyc[:, 0:4] = gt_bboxes_xxyy
gt_bboxes_xxyyc[:, 4] = gt_classes
elif example["dataset"] == "nuscenes":
sample_token = example["sample_token"]
sample = self.nusc.get("sample", sample_token)
cam_front_token = sample["data"]["CAM_FRONT"]
cam_front_sample_data = self.nusc.get("sample_data", cam_front_token)
cam_front_filename = cam_front_sample_data["filename"]
img_path = self.nuscenes_data_path + "/" + cam_front_filename
img = cv2.imread(img_path, -1)
gt_bboxes_xxyy = example["bboxes"] # (shape: (num_gt_objects, 4), (x_min, x_max, y_min, y_max))
gt_classes = example["class_labels"] # (shape: (num_gt_objects, ))
if gt_classes.shape[0] == 0:
return self.__getitem__(0)
gt_bboxes_xxyyc = np.zeros((gt_bboxes_xxyy.shape[0], 5), dtype=gt_bboxes_xxyy.dtype) # (shape: (num_gt_objects, 5), (x_min, x_max, y_min, y_max, class_label))
gt_bboxes_xxyyc[:, 0:4] = gt_bboxes_xxyy
gt_bboxes_xxyyc[:, 4] = gt_classes
scale = float(float(self.img_width)/float(self.nuscenes_img_width))
img = cv2.resize(img, (self.img_width, int(scale*self.nuscenes_img_height))) # (shape: (621, img_width, 3))
gt_bboxes_xxyyc[:, 0:4] = gt_bboxes_xxyyc[:, 0:4]*scale
start = int(np.random.uniform(low=0, high=(img.shape[0] - self.img_height)))
img = img[start:(start + self.img_height)] # (shape: (img_height, img_width, 3))
gt_bboxes_xxyyc[:, 2:4] = gt_bboxes_xxyyc[:, 2:4] - start
########################################################################
# data augmentation START:
########################################################################
# # # # # # debug visualization:
# bbox_polys = []
# for i in range(gt_bboxes_xxyyc.shape[0]):
# bbox = gt_bboxes_xxyyc[i, 0:4]
# bbox_poly = create2Dbbox_poly(bbox)
# bbox_polys.append(bbox_poly)
# img_with_gt_bboxes = draw_2d_polys_no_text(img, bbox_polys)
# cv2.imshow("test", img_with_gt_bboxes)
# cv2.waitKey(0)
# # # # # #
# flip the img and the labels with 0.5 probability:
img, gt_bboxes_xyxyc = self.random_horizontal_flip(img, bboxes_xxyyc_2_xyxyc(gt_bboxes_xxyyc))
gt_bboxes_xxyyc = bboxes_xyxyc_2_xxyyc(gt_bboxes_xyxyc)
# # # # # # debug visualization:
# bbox_polys = []
# for i in range(gt_bboxes_xxyyc.shape[0]):
# bbox = gt_bboxes_xxyyc[i, 0:4]
# bbox_poly = create2Dbbox_poly(bbox)
# bbox_polys.append(bbox_poly)
# img_with_gt_bboxes = draw_2d_polys_no_text(img, bbox_polys)
# cv2.imshow("test", img_with_gt_bboxes)
# cv2.waitKey(0)
# # # # # #
# randomly modify the hue, saturation and brightness of the image:
img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
img_hsv, gt_bboxes_xyxyc = self.random_hsv(img_hsv, bboxes_xxyyc_2_xyxyc(gt_bboxes_xxyyc))
img = cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR)
gt_bboxes_xxyyc = bboxes_xyxyc_2_xxyyc(gt_bboxes_xyxyc)
# # # # # # debug visualization:
# bbox_polys = []
# for i in range(gt_bboxes_xxyyc.shape[0]):
# bbox = gt_bboxes_xxyyc[i, 0:4]
# bbox_poly = create2Dbbox_poly(bbox)
# bbox_polys.append(bbox_poly)
# img_with_gt_bboxes = draw_2d_polys_no_text(img, bbox_polys)
# cv2.imshow("test", img_with_gt_bboxes)
# cv2.waitKey(0)
# # # # # #
# scale the image and the labels with a factor drawn from Uniform[1-scale, 1+scale]:
img, gt_bboxes_xyxyc = self.random_scale(img, bboxes_xxyyc_2_xyxyc(gt_bboxes_xxyyc))
gt_bboxes_xxyyc = bboxes_xyxyc_2_xxyyc(gt_bboxes_xyxyc)
# # # # # # debug visualization:
# bbox_polys = []
# for i in range(gt_bboxes_xxyyc.shape[0]):
# bbox = gt_bboxes_xxyyc[i, 0:4]
# bbox_poly = create2Dbbox_poly(bbox)
# bbox_polys.append(bbox_poly)
# img_with_gt_bboxes = draw_2d_polys_no_text(img, bbox_polys)
# cv2.imshow("test", img_with_gt_bboxes)
# cv2.waitKey(0)
# # # # # #
# randomly translate the image and the labels:
img, gt_bboxes_xyxyc = self.random_translate(img, bboxes_xxyyc_2_xyxyc(gt_bboxes_xxyyc))
gt_bboxes_xxyyc = bboxes_xyxyc_2_xxyyc(gt_bboxes_xyxyc)
# # # # # # debug visualization:
# bbox_polys = []
# for i in range(gt_bboxes_xxyyc.shape[0]):
# bbox = gt_bboxes_xxyyc[i, 0:4]
# bbox_poly = create2Dbbox_poly(bbox)
# bbox_polys.append(bbox_poly)
# img_with_gt_bboxes = draw_2d_polys_no_text(img, bbox_polys)
# cv2.imshow("test", img_with_gt_bboxes)
# cv2.waitKey(0)
# # # # # #
########################################################################
# data augmentation END:
########################################################################
########################################################################
# normalize the img:
########################################################################
img = img/255.0
img = img - np.array([0.485, 0.456, 0.406])
img = img/np.array([0.229, 0.224, 0.225]) # (shape: (img_height, img_width, 3))
img = np.transpose(img, (2, 0, 1)) # (shape: (3, img_height, img_width))
img = img.astype(np.float32)
########################################################################
# get ground truth:
########################################################################
gt_bboxes_xxyy = gt_bboxes_xxyyc[:, 0:4] # (shape: (num_gt_objects, 4), (x_min, x_max, y_min, y_max))
gt_classes = gt_bboxes_xxyyc[:, 4] # (shape (num_gt_objects, ))
gt_bboxes_xxyy = torch.from_numpy(gt_bboxes_xxyy) # (shape: (num_gt_objects, 4), (x_min, x_max, y_min, y_max))
gt_classes = torch.from_numpy(gt_classes) # (shape (num_gt_objects, ))
if gt_bboxes_xxyy.size(0) == 0: # (if 0 gt objects)
return self.__getitem__(index+1)
if gt_bboxes_xxyy.size() == torch.Size([4]): # (if 1 gt object)
gt_bboxes_xxyy = gt_bboxes_xxyy.unsqueeze(0)
gt_classes = torch.from_numpy(np.array([gt_classes.data]))
label_regr, label_class = self.bbox_encoder.encode(gt_bboxes_xxyy, gt_classes)
# (label_regr is a Tensor of shape: (num_anchors, 4)) (x_resid, y_resid, w_resid, h_resid)
# (label_class is a Tensor of shape: (num_anchors, ))
########################################################################
# convert numpy -> torch:
########################################################################
img = torch.from_numpy(img) # (shape: (3, img_height, img_width))
# (img has shape: (3, img_height, img_width))
# (label_regr has shape: (num_anchors, 4)) (x_resid, y_resid, w_resid, h_resid)
# (label_class has shape: (num_anchors, ))
return (img, label_regr, label_class)
def __len__(self):
return self.num_examples
# test = DatasetKITTINuscenesAugmentation("/home/fregu856/project1/data/nuscenes", "/home/fregu856/exjobb/data/kitti", "/home/fregu856/exjobb/data/kitti/meta", "train")
# for i in range(200):
# _ = test.__getitem__(i)
# # _ = test.__getitem__(0)
| 45.292967
| 254
| 0.546964
| 14,165
| 108,839
| 3.910272
| 0.035581
| 0.056473
| 0.040369
| 0.016429
| 0.830418
| 0.802759
| 0.781527
| 0.772139
| 0.7532
| 0.746159
| 0
| 0.034826
| 0.266577
| 108,839
| 2,402
| 255
| 45.311823
| 0.659054
| 0.313922
| 0
| 0.69821
| 0
| 0
| 0.041956
| 0.014105
| 0
| 0
| 0
| 0
| 0
| 1
| 0.048593
| false
| 0
| 0.01364
| 0.011935
| 0.12191
| 0.000853
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1a272873885d1d913cc024632d21ca4bfc5b3cff
| 220
|
py
|
Python
|
desperado/cmd/credentials.py
|
ClifHouck/desperado
|
95de3ceeb088a9c0ee5690e8d08f0fee2a6f5bdf
|
[
"BSD-3-Clause"
] | 3
|
2015-02-16T15:38:55.000Z
|
2015-04-07T21:47:41.000Z
|
desperado/cmd/credentials.py
|
ClifHouck/desperado
|
95de3ceeb088a9c0ee5690e8d08f0fee2a6f5bdf
|
[
"BSD-3-Clause"
] | 5
|
2021-03-18T20:12:51.000Z
|
2022-03-11T23:12:34.000Z
|
desperado/cmd/credentials.py
|
ClifHouck/desperado
|
95de3ceeb088a9c0ee5690e8d08f0fee2a6f5bdf
|
[
"BSD-3-Clause"
] | 1
|
2015-11-18T00:55:51.000Z
|
2015-11-18T00:55:51.000Z
|
steam_username = ''
steam_password = ''
mail_user = ''
mail_password = ''
mail_server = ''
def steam():
return (steam_username, steam_password)
def mail():
return (mail_server, mail_user, mail_password)
| 16.923077
| 50
| 0.677273
| 26
| 220
| 5.346154
| 0.307692
| 0.18705
| 0.258993
| 0.374101
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.195455
| 220
| 12
| 51
| 18.333333
| 0.785311
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.222222
| false
| 0.444444
| 0
| 0.222222
| 0.444444
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
|
0
| 6
|
c5047bb7d5da6f287eaaff55cd5ced11f48be8db
| 38
|
py
|
Python
|
tests/test_attention.py
|
gchhablani/vformer
|
c7dc7d14e33aa5b2974667d281e7910e17538b34
|
[
"MIT"
] | null | null | null |
tests/test_attention.py
|
gchhablani/vformer
|
c7dc7d14e33aa5b2974667d281e7910e17538b34
|
[
"MIT"
] | null | null | null |
tests/test_attention.py
|
gchhablani/vformer
|
c7dc7d14e33aa5b2974667d281e7910e17538b34
|
[
"MIT"
] | null | null | null |
import vformer.attention as attention
| 19
| 37
| 0.868421
| 5
| 38
| 6.6
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 38
| 1
| 38
| 38
| 0.970588
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c509924cbf2c2db9e82833bf6cdddc8dd8141764
| 85
|
py
|
Python
|
src/hub/dataload/sources/pubchem/__init__.py
|
ravila4/mychem.info
|
9b63b5f0957b5e7b252ca8122734a363905036b3
|
[
"Apache-2.0"
] | 10
|
2017-07-24T11:45:27.000Z
|
2022-02-14T13:42:36.000Z
|
src/hub/dataload/sources/pubchem/__init__.py
|
veleritas/mychem.info
|
bb22357d4cbbc3c4865da224bf998f2cbc59f8f2
|
[
"Apache-2.0"
] | 92
|
2017-06-22T16:49:20.000Z
|
2022-03-24T20:50:01.000Z
|
src/hub/dataload/sources/pubchem/__init__.py
|
veleritas/mychem.info
|
bb22357d4cbbc3c4865da224bf998f2cbc59f8f2
|
[
"Apache-2.0"
] | 11
|
2017-06-12T18:31:35.000Z
|
2022-01-31T02:56:52.000Z
|
from .pubchem_upload import PubChemUploader
from .pubchem_dump import PubChemDumper
| 21.25
| 43
| 0.870588
| 10
| 85
| 7.2
| 0.7
| 0.305556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105882
| 85
| 3
| 44
| 28.333333
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3d9feba37eb660601ce774fa6cda0bfd665a0df8
| 108
|
py
|
Python
|
terrascript/oneandone/d.py
|
hugovk/python-terrascript
|
08fe185904a70246822f5cfbdc9e64e9769ec494
|
[
"BSD-2-Clause"
] | 507
|
2017-07-26T02:58:38.000Z
|
2022-01-21T12:35:13.000Z
|
terrascript/oneandone/d.py
|
hugovk/python-terrascript
|
08fe185904a70246822f5cfbdc9e64e9769ec494
|
[
"BSD-2-Clause"
] | 135
|
2017-07-20T12:01:59.000Z
|
2021-10-04T22:25:40.000Z
|
terrascript/oneandone/d.py
|
hugovk/python-terrascript
|
08fe185904a70246822f5cfbdc9e64e9769ec494
|
[
"BSD-2-Clause"
] | 81
|
2018-02-20T17:55:28.000Z
|
2022-01-31T07:08:40.000Z
|
# terrascript/oneandone/d.py
import terrascript
class oneandone_instance_size(terrascript.Data):
pass
| 15.428571
| 48
| 0.805556
| 13
| 108
| 6.538462
| 0.769231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12037
| 108
| 6
| 49
| 18
| 0.894737
| 0.240741
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
3de23e1efb3bc1d25d9d4bf4c50c2471f14e169b
| 126
|
py
|
Python
|
pdip/integrator/connection/types/queue/base/__init__.py
|
ahmetcagriakca/pdip
|
c4c16d5666a740154cabdc6762cd44d98b7bdde8
|
[
"MIT"
] | 2
|
2021-12-09T21:07:46.000Z
|
2021-12-11T22:18:01.000Z
|
pdip/connection/queue/base/__init__.py
|
fmuyilmaz/pdip
|
f7e30b0c04d9e85ef46b0b7094fafd3ce18bccab
|
[
"MIT"
] | null | null | null |
pdip/connection/queue/base/__init__.py
|
fmuyilmaz/pdip
|
f7e30b0c04d9e85ef46b0b7094fafd3ce18bccab
|
[
"MIT"
] | 3
|
2021-11-15T00:47:00.000Z
|
2021-12-17T11:35:45.000Z
|
from .queue_connector import QueueConnector
from .queue_context import QueueContext
from .queue_provider import QueueProvider
| 31.5
| 43
| 0.880952
| 15
| 126
| 7.2
| 0.6
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 126
| 3
| 44
| 42
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3df3e4f0936754bf8efcd491f54a585ff4608191
| 145
|
py
|
Python
|
machina/noise/__init__.py
|
Wataru-Y/machina
|
d82db3b5535afa958b32ecfb13db19740c95be5c
|
[
"MIT"
] | null | null | null |
machina/noise/__init__.py
|
Wataru-Y/machina
|
d82db3b5535afa958b32ecfb13db19740c95be5c
|
[
"MIT"
] | null | null | null |
machina/noise/__init__.py
|
Wataru-Y/machina
|
d82db3b5535afa958b32ecfb13db19740c95be5c
|
[
"MIT"
] | null | null | null |
from machina.noise.base import BaseActionNoise
from machina.noise.ounoise import OUActionNoise
from machina.noise.normalnoise import NormalNoise
| 36.25
| 49
| 0.875862
| 18
| 145
| 7.055556
| 0.5
| 0.259843
| 0.377953
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.082759
| 145
| 3
| 50
| 48.333333
| 0.954887
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
9ac268c1fd33d71128b7c79fc9c81050eeb9cedf
| 97
|
py
|
Python
|
design/__init__.py
|
insidemirage/py-wishlist
|
488e8bfab04a8a50aedddd797edd314827028d56
|
[
"MIT"
] | null | null | null |
design/__init__.py
|
insidemirage/py-wishlist
|
488e8bfab04a8a50aedddd797edd314827028d56
|
[
"MIT"
] | null | null | null |
design/__init__.py
|
insidemirage/py-wishlist
|
488e8bfab04a8a50aedddd797edd314827028d56
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from .wish_dialog import Ui_WishDialog
from .window import Ui_MainWindow
| 24.25
| 38
| 0.752577
| 14
| 97
| 5
| 0.785714
| 0.228571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011905
| 0.134021
| 97
| 3
| 39
| 32.333333
| 0.821429
| 0.216495
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9addd91f09d39d7f4caa57d01a084cb105e25ce1
| 47
|
py
|
Python
|
pigor/plugins/interferometer/__init__.py
|
nicoeinsidler/pigor2
|
bf3e5b542cdd1c685b2542258c728861e8c586c8
|
[
"MIT"
] | 1
|
2019-10-09T11:31:41.000Z
|
2019-10-09T11:31:41.000Z
|
pigor/plugins/interferometer/__init__.py
|
nicoeinsidler/pigor2
|
bf3e5b542cdd1c685b2542258c728861e8c586c8
|
[
"MIT"
] | 1
|
2019-11-07T11:59:33.000Z
|
2019-11-09T23:31:31.000Z
|
pigor/plugins/polarimeter/__init__.py
|
nicoeinsidler/pigor2
|
bf3e5b542cdd1c685b2542258c728861e8c586c8
|
[
"MIT"
] | null | null | null |
from .functions import *
from .adapter import *
| 23.5
| 24
| 0.765957
| 6
| 47
| 6
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148936
| 47
| 2
| 25
| 23.5
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b1061b018e83ca97f44443eb21660e31532d4c74
| 101
|
py
|
Python
|
src/scripts/__init__.py
|
kjhall-iri/cpt-tools
|
7c0a43c3332e6c51253fe4a530c47a2b839d6075
|
[
"MIT"
] | null | null | null |
src/scripts/__init__.py
|
kjhall-iri/cpt-tools
|
7c0a43c3332e6c51253fe4a530c47a2b839d6075
|
[
"MIT"
] | null | null | null |
src/scripts/__init__.py
|
kjhall-iri/cpt-tools
|
7c0a43c3332e6c51253fe4a530c47a2b839d6075
|
[
"MIT"
] | null | null | null |
from .seasonal import load_nmme, load_c3s, load_observations, preload_southasia, preload_lesotho_nmme
| 101
| 101
| 0.881188
| 14
| 101
| 5.928571
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010638
| 0.069307
| 101
| 1
| 101
| 101
| 0.87234
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b112080f51393c0730a83c7b09f8627af2bb9886
| 72
|
py
|
Python
|
blockworld/blocks/__init__.py
|
belledon/blockworld
|
be84bf304e688cdf4418c3990399722f181ad566
|
[
"MIT"
] | null | null | null |
blockworld/blocks/__init__.py
|
belledon/blockworld
|
be84bf304e688cdf4418c3990399722f181ad566
|
[
"MIT"
] | null | null | null |
blockworld/blocks/__init__.py
|
belledon/blockworld
|
be84bf304e688cdf4418c3990399722f181ad566
|
[
"MIT"
] | null | null | null |
from .simple_block import SimpleBlock
from .base_block import BaseBlock
| 24
| 37
| 0.861111
| 10
| 72
| 6
| 0.7
| 0.366667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 72
| 2
| 38
| 36
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b11c4b5a6e8bc5512d23a84f6c35221ca7be3e90
| 18
|
py
|
Python
|
src/rough.py
|
parthbhope/NC_Concolic_Testing
|
d2622ba3f7fd667b6534bda09d29f1c95c59799f
|
[
"BSD-3-Clause"
] | null | null | null |
src/rough.py
|
parthbhope/NC_Concolic_Testing
|
d2622ba3f7fd667b6534bda09d29f1c95c59799f
|
[
"BSD-3-Clause"
] | null | null | null |
src/rough.py
|
parthbhope/NC_Concolic_Testing
|
d2622ba3f7fd667b6534bda09d29f1c95c59799f
|
[
"BSD-3-Clause"
] | null | null | null |
print(len((122,)))
| 18
| 18
| 0.611111
| 3
| 18
| 3.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 0
| 18
| 1
| 18
| 18
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
b13282982de4e122bbd071e489df33bf4069cdd4
| 30
|
py
|
Python
|
domtree/__init__.py
|
tvogels/domtree
|
fb89724db10b37b76954fa0f1977c00945d32c05
|
[
"Apache-2.0"
] | null | null | null |
domtree/__init__.py
|
tvogels/domtree
|
fb89724db10b37b76954fa0f1977c00945d32c05
|
[
"Apache-2.0"
] | null | null | null |
domtree/__init__.py
|
tvogels/domtree
|
fb89724db10b37b76954fa0f1977c00945d32c05
|
[
"Apache-2.0"
] | null | null | null |
from domtree.node import Node
| 15
| 29
| 0.833333
| 5
| 30
| 5
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 30
| 1
| 30
| 30
| 0.961538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b1785bf3e683b7d2fe81ddfd7756f069c270c418
| 37
|
py
|
Python
|
tests/src/TNB/mklaren/projection/__init__.py
|
bellwethers-in-se/issueCloseTime
|
e5e00c9625da0793dc8e7985fd88b0ca0b35f7d3
|
[
"MIT"
] | 9
|
2017-07-27T10:32:48.000Z
|
2021-07-01T11:51:51.000Z
|
tests/src/TNB/mklaren/projection/__init__.py
|
bellwethers-in-se/issueCloseTime
|
e5e00c9625da0793dc8e7985fd88b0ca0b35f7d3
|
[
"MIT"
] | 11
|
2016-03-15T16:27:47.000Z
|
2019-09-05T02:25:08.000Z
|
tests/src/TNB/mklaren/projection/__init__.py
|
bellwethers-in-se/issueCloseTime
|
e5e00c9625da0793dc8e7985fd88b0ca0b35f7d3
|
[
"MIT"
] | 5
|
2017-01-28T22:45:34.000Z
|
2019-12-04T13:15:10.000Z
|
import csi
import icd
import nystrom
| 9.25
| 14
| 0.837838
| 6
| 37
| 5.166667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.162162
| 37
| 3
| 15
| 12.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b18ad303ee34356030f7f24be4c300ddc92f6429
| 54
|
py
|
Python
|
dadukate/blueprints/contact/__init__.py
|
DevDHera/Dadu-Kate
|
ab1e0780d9b4ea6308b72abbd8e0b131c0097e31
|
[
"MIT"
] | null | null | null |
dadukate/blueprints/contact/__init__.py
|
DevDHera/Dadu-Kate
|
ab1e0780d9b4ea6308b72abbd8e0b131c0097e31
|
[
"MIT"
] | null | null | null |
dadukate/blueprints/contact/__init__.py
|
DevDHera/Dadu-Kate
|
ab1e0780d9b4ea6308b72abbd8e0b131c0097e31
|
[
"MIT"
] | null | null | null |
from dadukate.blueprints.contact.views import contact
| 27
| 53
| 0.87037
| 7
| 54
| 6.714286
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074074
| 54
| 1
| 54
| 54
| 0.94
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
b18e111edad06245b8b72384833b677d789104df
| 104
|
py
|
Python
|
test_project/views.py
|
rhblind/django-tracer
|
b0dfb9df27f5cbafe14ff964307da3cb3d006d2b
|
[
"BSD-3-Clause"
] | 9
|
2018-07-31T16:29:48.000Z
|
2019-07-29T19:34:41.000Z
|
test_project/views.py
|
revsys/django-tracer
|
b0dfb9df27f5cbafe14ff964307da3cb3d006d2b
|
[
"BSD-3-Clause"
] | 1
|
2018-06-20T13:37:59.000Z
|
2018-06-21T06:07:33.000Z
|
test_project/views.py
|
rhblind/django-tracer
|
b0dfb9df27f5cbafe14ff964307da3cb3d006d2b
|
[
"BSD-3-Clause"
] | 1
|
2020-11-24T10:12:01.000Z
|
2020-11-24T10:12:01.000Z
|
from django.http import HttpResponse
def request_test_view(request):
return HttpResponse("VIEW")
| 14.857143
| 36
| 0.778846
| 13
| 104
| 6.076923
| 0.769231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.144231
| 104
| 6
| 37
| 17.333333
| 0.88764
| 0
| 0
| 0
| 0
| 0
| 0.038835
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
49248601b390516abf06d50d8d9f51e489c8c589
| 109
|
py
|
Python
|
tracker/sot/lib/version.py
|
collector-m/UniTrack
|
e8e56e164f2dd40ba590a19ed7a4a75d8da7e2eb
|
[
"MIT"
] | 240
|
2021-06-20T13:50:42.000Z
|
2022-03-31T05:08:29.000Z
|
tracker/sot/lib/version.py
|
collector-m/UniTrack
|
e8e56e164f2dd40ba590a19ed7a4a75d8da7e2eb
|
[
"MIT"
] | 27
|
2021-07-12T01:19:39.000Z
|
2021-12-27T08:05:08.000Z
|
tracker/sot/lib/version.py
|
collector-m/UniTrack
|
e8e56e164f2dd40ba590a19ed7a4a75d8da7e2eb
|
[
"MIT"
] | 24
|
2021-07-01T09:48:24.000Z
|
2022-03-14T06:39:46.000Z
|
# GENERATED VERSION FILE
# TIME: Fri Dec 11 13:54:02 2020
__version__ = '1.0.rc0'
short_version = '1.0.rc0'
| 18.166667
| 32
| 0.697248
| 20
| 109
| 3.55
| 0.75
| 0.225352
| 0.253521
| 0.338028
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.197802
| 0.165138
| 109
| 5
| 33
| 21.8
| 0.582418
| 0.486239
| 0
| 0
| 1
| 0
| 0.264151
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
493f4198a2ca86429a93db674807051c2ab97dd8
| 28
|
py
|
Python
|
malclient/__init__.py
|
RobiMez/MAL-API-Client
|
b58d1bc662495fad02e8212870adb4e6fe30201c
|
[
"MIT"
] | 14
|
2020-07-19T04:19:10.000Z
|
2022-02-07T10:16:46.000Z
|
MK/Sync/__init__.py
|
Mecha-Karen/MechaK.py
|
e173d53d2412101a5c96e624ce784333ba8e784f
|
[
"MIT"
] | 1
|
2020-08-01T16:38:48.000Z
|
2020-09-12T20:16:08.000Z
|
MK/Sync/__init__.py
|
Mecha-Karen/MechaK.py
|
e173d53d2412101a5c96e624ce784333ba8e784f
|
[
"MIT"
] | 9
|
2020-07-19T04:04:12.000Z
|
2022-02-07T11:25:19.000Z
|
from .client import Client
| 14
| 27
| 0.785714
| 4
| 28
| 5.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.178571
| 28
| 1
| 28
| 28
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
771730d2a6577c85ee1ae9e0a620f4347b2df0f9
| 2,541
|
py
|
Python
|
tools/telemetry/telemetry/page/actions/javascript_click_unittest.py
|
shaochangbin/chromium-crosswalk
|
634d34e4cf82b4f7400357c53ec12efaffe94add
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2
|
2019-01-16T03:57:28.000Z
|
2021-01-23T15:29:45.000Z
|
tools/telemetry/telemetry/page/actions/javascript_click_unittest.py
|
shaochangbin/chromium-crosswalk
|
634d34e4cf82b4f7400357c53ec12efaffe94add
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
tools/telemetry/telemetry/page/actions/javascript_click_unittest.py
|
shaochangbin/chromium-crosswalk
|
634d34e4cf82b4f7400357c53ec12efaffe94add
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2
|
2015-04-17T13:19:09.000Z
|
2021-10-21T12:55:15.000Z
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page.actions import javascript_click
from telemetry.page.actions import wait_until
from telemetry.unittest import tab_test_case
class ClickElementActionTest(tab_test_case.TabTestCase):
def testClickWithSelectorWaitForNavigation(self):
self.Navigate('page_with_link.html')
self.assertEquals(
self._tab.EvaluateJavaScript('document.location.pathname;'),
'/page_with_link.html')
data = {'selector': 'a[id="clickme"]'}
i = javascript_click.ClickElementAction(data)
data = {'condition': 'href_change'}
j = wait_until.WaitUntil(i, data)
j.RunActionAndWait(None, self._tab)
self.assertEquals(
self._tab.EvaluateJavaScript('document.location.pathname;'),
'/blank.html')
def testClickWithSingleQuoteSelectorWaitForNavigation(self):
self.Navigate('page_with_link.html')
self.assertEquals(
self._tab.EvaluateJavaScript('document.location.pathname;'),
'/page_with_link.html')
data = {'selector': 'a[id=\'clickme\']'}
i = javascript_click.ClickElementAction(data)
data = {'condition': 'href_change'}
j = wait_until.WaitUntil(i, data)
j.RunActionAndWait(None, self._tab)
self.assertEquals(
self._tab.EvaluateJavaScript('document.location.pathname;'),
'/blank.html')
def testClickWithTextWaitForRefChange(self):
self.Navigate('page_with_link.html')
self.assertEquals(
self._tab.EvaluateJavaScript('document.location.pathname;'),
'/page_with_link.html')
data = {'text': 'Click me'}
i = javascript_click.ClickElementAction(data)
data = {'condition': 'href_change'}
j = wait_until.WaitUntil(i, data)
j.RunActionAndWait(None, self._tab)
self.assertEquals(
self._tab.EvaluateJavaScript('document.location.pathname;'),
'/blank.html')
def testClickWithXPathWaitForRefChange(self):
self.Navigate('page_with_link.html')
self.assertEquals(
self._tab.EvaluateJavaScript('document.location.pathname;'),
'/page_with_link.html')
data = {'xpath': '//a[@id="clickme"]'}
i = javascript_click.ClickElementAction(data)
data = {'condition': 'href_change'}
j = wait_until.WaitUntil(i, data)
j.RunActionAndWait(None, self._tab)
self.assertEquals(
self._tab.EvaluateJavaScript('document.location.pathname;'),
'/blank.html')
| 34.337838
| 72
| 0.702873
| 278
| 2,541
| 6.258993
| 0.266187
| 0.048276
| 0.055172
| 0.073563
| 0.764943
| 0.73046
| 0.73046
| 0.73046
| 0.73046
| 0.73046
| 0
| 0.001892
| 0.168044
| 2,541
| 73
| 73
| 34.808219
| 0.821192
| 0.061
| 0
| 0.785714
| 0
| 0
| 0.238875
| 0.09068
| 0
| 0
| 0
| 0
| 0.142857
| 1
| 0.071429
| false
| 0
| 0.053571
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
771f82fa40237706c15ce1439664ed4c00418724
| 326
|
py
|
Python
|
packages/pyright-internal/src/tests/samples/import10.py
|
lipovsek/pytea
|
c536515a5e5947fac8871784323ba7eddc58956d
|
[
"MIT"
] | null | null | null |
packages/pyright-internal/src/tests/samples/import10.py
|
lipovsek/pytea
|
c536515a5e5947fac8871784323ba7eddc58956d
|
[
"MIT"
] | null | null | null |
packages/pyright-internal/src/tests/samples/import10.py
|
lipovsek/pytea
|
c536515a5e5947fac8871784323ba7eddc58956d
|
[
"MIT"
] | null | null | null |
# This sample tests the handling of an unresolved import.
# It should report a single error but not have cascading
# errors when the unresolved symbol is used.
# This should generate an error.
import unresolved_import
def test_zero_division():
with unresolved_import.raises(ZeroDivisionError):
1 / 0
| 27.166667
| 58
| 0.739264
| 45
| 326
| 5.266667
| 0.755556
| 0.202532
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007813
| 0.214724
| 326
| 11
| 59
| 29.636364
| 0.917969
| 0.564417
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.5
| 0
| 0.75
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
91f7fb0764029b89d8fe9ebb676bd93c6952e149
| 175
|
py
|
Python
|
what2make/search/admin.py
|
velezj425/what-to-make-site
|
f78f0c01b60408b0aefd64f9346b9d72e7dbb3f0
|
[
"MIT"
] | null | null | null |
what2make/search/admin.py
|
velezj425/what-to-make-site
|
f78f0c01b60408b0aefd64f9346b9d72e7dbb3f0
|
[
"MIT"
] | null | null | null |
what2make/search/admin.py
|
velezj425/what-to-make-site
|
f78f0c01b60408b0aefd64f9346b9d72e7dbb3f0
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import *
admin.site.register(Profile)
admin.site.register(Recipe)
admin.site.register(Ing_Type)
admin.site.register(Ingredient)
| 21.875
| 32
| 0.817143
| 25
| 175
| 5.68
| 0.52
| 0.253521
| 0.478873
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074286
| 175
| 8
| 33
| 21.875
| 0.876543
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
6201c0486ae09827bb6054b82958c937dde88f39
| 3,432
|
py
|
Python
|
tests/test_tokenisers.py
|
LaudateCorpus1/Bella-5
|
7de51ff4914bdefbcf05e490b85517c5fb014595
|
[
"MIT"
] | 22
|
2018-06-16T02:03:44.000Z
|
2022-01-04T19:06:12.000Z
|
tests/test_tokenisers.py
|
LaudateCorpus1/Bella-5
|
7de51ff4914bdefbcf05e490b85517c5fb014595
|
[
"MIT"
] | 3
|
2018-06-21T11:01:28.000Z
|
2018-11-29T20:32:22.000Z
|
tests/test_tokenisers.py
|
LaudateCorpus1/Bella-5
|
7de51ff4914bdefbcf05e490b85517c5fb014595
|
[
"MIT"
] | 2
|
2019-11-12T18:02:15.000Z
|
2021-11-25T12:15:02.000Z
|
'''
Unit test suite for the :py:mod:`bella.tokenisers` module.
'''
from unittest import TestCase
from bella.tokenisers import whitespace
from bella.tokenisers import ark_twokenize, spacy_tokeniser
class TestTokenisers(TestCase):
'''
Contains the following functions:
1. :py:func:`bella.tokenisers.whitespace`
'''
test_sentences = ['The fox jumped over the MOON.',
'lol ly x0x0,:D',
' ']
def test_whitespace(self):
'''
Tests :py:func:`bella.tokenisers.whitespace`
'''
with self.assertRaises(ValueError, msg='It should not accept a list'):
whitespace(['words to be tested'])
expected_results = [['The', 'fox', 'jumped', 'over', 'the', 'MOON.'],
['lol', 'ly', 'x0x0,:D'],
[]]
for index, test_sentence in enumerate(self.test_sentences):
test_result = whitespace(test_sentence)
expected_result = expected_results[index]
self.assertIsInstance(test_result, list, msg='The returned result is of '\
'the wrong type {} should be a list'.format(type(test_result)))
self.assertEqual(expected_result, test_result, msg='Did not return the '\
'expected result {} returned this {}'\
.format(expected_result, test_result))
def test_ark_twokenize(self):
'''
Tests :py:func:`bella.tokenisers.ark_twokenize`
'''
with self.assertRaises(ValueError, msg='It should not accept a list'):
ark_twokenize(['words to be tested'])
expected_results = [['The', 'fox', 'jumped', 'over', 'the', 'MOON', '.'],
['lol', 'ly', 'x0x0', ',', ':D'],
[]]
for index, test_sentence in enumerate(self.test_sentences):
test_result = ark_twokenize(test_sentence)
expected_result = expected_results[index]
self.assertIsInstance(test_result, list, msg='The returned result is of '\
'the wrong type {} should be a list'.format(type(test_result)))
self.assertEqual(expected_result, test_result, msg='Did not return the '\
'expected result {} returned this {}'\
.format(expected_result, test_result))
def test_spacy_tokeniser(self):
'''
Tests :py:func:`bella.tokenisers.spacy_tokeniser`
'''
with self.assertRaises(ValueError, msg='It should not accept a list'):
spacy_tokeniser(['words to be tested'])
expected_results =[['The', 'fox', 'jumped', 'over', 'the', 'MOON', '.'],
['lol', 'ly', 'x0x0,:D'], []]
for index, test_sentence in enumerate(self.test_sentences):
test_result = spacy_tokeniser(test_sentence)
expected_result = expected_results[index]
self.assertIsInstance(test_result, list, msg='The returned result is of '\
'the wrong type {} should be a list'.format(type(test_result)))
self.assertEqual(expected_result, test_result, msg='Did not return the '\
'expected result {} returned this {}'\
.format(expected_result, test_result))
| 45.157895
| 97
| 0.557401
| 357
| 3,432
| 5.210084
| 0.196078
| 0.080645
| 0.058065
| 0.077419
| 0.802151
| 0.780108
| 0.73172
| 0.73172
| 0.73172
| 0.73172
| 0
| 0.003868
| 0.32197
| 3,432
| 76
| 98
| 45.157895
| 0.795445
| 0.080711
| 0
| 0.583333
| 0
| 0
| 0.210131
| 0
| 0
| 0
| 0
| 0
| 0.1875
| 1
| 0.0625
| false
| 0
| 0.0625
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
620bf1d173fff661326c2601e4eac1034d7e87e3
| 135,726
|
py
|
Python
|
mbuild/tests/test_charmm_writer.py
|
daico007/mbuild
|
8baa19a46d1ece809f214a2e1a2f8984dd923b56
|
[
"MIT"
] | 101
|
2017-02-14T18:23:52.000Z
|
2022-03-20T03:29:59.000Z
|
mbuild/tests/test_charmm_writer.py
|
Leticia-maria/mbuild
|
b6278441ff6d6cf1f954affe3d0fbeec17bbbc6d
|
[
"MIT"
] | 689
|
2017-02-13T04:40:30.000Z
|
2022-03-31T19:57:32.000Z
|
mbuild/tests/test_charmm_writer.py
|
Leticia-maria/mbuild
|
b6278441ff6d6cf1f954affe3d0fbeec17bbbc6d
|
[
"MIT"
] | 82
|
2017-02-13T21:08:48.000Z
|
2022-03-21T21:55:43.000Z
|
from collections import OrderedDict
import numpy as np
import pytest
from foyer.forcefields import forcefields
import mbuild as mb
from mbuild import Box, Compound
from mbuild.formats import charmm_writer
from mbuild.formats.charmm_writer import Charmm
from mbuild.lattice import load_cif
from mbuild.tests.base_test import BaseTest
from mbuild.utils.conversion import (
base10_to_base16_alph_num,
base10_to_base26_alph,
base10_to_base52_alph,
base10_to_base62_alph_num,
)
from mbuild.utils.io import get_fn, has_foyer
from mbuild.utils.specific_ff_to_residue import specific_ff_to_residue
@pytest.mark.skipif(not has_foyer, reason="Foyer package not installed")
class TestCharmmWriterData(BaseTest):
def test_save(self, ethane_gomc):
Charmm(
ethane_gomc,
"ethane",
ff_filename="ethane",
residues=[ethane_gomc.name],
forcefield_selection="oplsaa",
)
def test_save_charmm_gomc_ff(self, ethane_gomc):
charmm = Charmm(
ethane_gomc,
"charmm_data",
ff_filename="charmm_data",
residues=[ethane_gomc.name],
forcefield_selection="oplsaa",
)
charmm.write_inp()
with open("charmm_data.inp", "r") as fp:
masses_read = False
bonds_read = False
angles_read = False
dihedrals_read = False
nonbondeds_read = False
out_gomc = fp.readlines()
for i, line in enumerate(out_gomc):
if (
"!atom_types" in line
and "mass" in line
and "atomTypeForceFieldName_ResidueName" in line
and "(i.e., atoms_type_per_utilized_FF)" in line
):
masses_read = True
assert len(out_gomc[i + 1].split("!")[0].split()) == 3
assert out_gomc[i + 1].split("!")[0].split()[0:3] == [
"*",
"A",
"12.010780",
]
assert len(out_gomc[i + 2].split("!")[0].split()) == 3
assert out_gomc[i + 2].split("!")[0].split()[0:3] == [
"*",
"B",
"1.007947",
]
assert out_gomc[i + 1].split()[4:5] == ["opls_135_ETH"]
assert out_gomc[i + 2].split()[4:5] == ["opls_140_ETH"]
elif (
"!atom_types" in line
and "Kb" in line
and "b0" in line
and "atoms_types_per_utilized_FF" in line
):
bonds_read = True
bond_types = [
["A", "B", "340.0", "1.09"],
["A", "A", "268.0", "1.529"],
]
assert len(out_gomc[i + 1].split("!")[0].split()) == 4
assert len(out_gomc[i + 2].split("!")[0].split()) == 4
if (
out_gomc[i + 1].split("!")[0].split()[0:4]
== bond_types[0]
):
assert (
out_gomc[i + 1].split("!")[0].split()[0:4]
== bond_types[0]
)
assert (
out_gomc[i + 2].split("!")[0].split()[0:4]
== bond_types[1]
)
elif (
out_gomc[i + 1].split("!")[0].split()[0:4]
== bond_types[1]
):
assert (
out_gomc[i + 1].split("!")[0].split()[0:4]
== bond_types[1]
)
assert (
out_gomc[i + 2].split("!")[0].split()[0:4]
== bond_types[0]
)
elif (
"!atom_types" in line
and "Ktheta" in line
and "Theta0" in line
and "atoms_types_per_utilized_FF" in line
):
angles_read = True
angle_types = [
["A", "A", "B", "37.5", "110.70000"],
["B", "A", "B", "33.0", "107.80000"],
]
assert len(out_gomc[i + 1].split("!")[0].split()) == 5
assert len(out_gomc[i + 2].split("!")[0].split()) == 5
if (
out_gomc[i + 1].split("!")[0].split()[0:5]
== angle_types[0]
):
assert (
out_gomc[i + 1].split("!")[0].split()[0:5]
== angle_types[0]
)
assert (
out_gomc[i + 2].split("!")[0].split()[0:5]
== angle_types[1]
)
elif (
out_gomc[i + 1].split("!")[0].split()[0:4]
== angle_types[1]
):
assert (
out_gomc[i + 1].split("!")[0].split()[0:5]
== angle_types[1]
)
assert (
out_gomc[i + 2].split("!")[0].split()[0:5]
== angle_types[0]
)
elif (
"!atom_types" in line
and "Kchi" in line
and "n" in line
and "delta" in line
and "atoms_types_per_utilized_FF" in line
):
dihedrals_read = True
dihed_types = [
["B", "A", "A", "B", "0.300000", "0", "90.0"],
["B", "A", "A", "B", "0.000000", "1", "180.0"],
["B", "A", "A", "B", "0.000000", "2", "0.0"],
["B", "A", "A", "B", "-0.150000", "3", "180.0"],
["B", "A", "A", "B", "0.000000", "4", "0.0"],
["B", "A", "A", "B", "0.000000", "5", "180.0"],
]
for j in range(0, len(dihed_types)):
assert (
len(out_gomc[i + 1 + j].split("!")[0].split()) == 7
)
assert (
out_gomc[i + 1 + j].split("!")[0].split()[0:7]
== dihed_types[j]
)
elif (
"!atype" in line
and "ignored epsilon" in line
and "Rmin/2" in line
and "ignored" in line
and "eps,1-4" in line
and "Rmin/2,1-4" in line
and "atom_type_per_utilized_FF" in line
):
nonbondeds_read = True
nb_types = [
[
"A",
"0.00",
"-0.066000000",
"1.96430858454",
"0.00",
"-0.033000000",
"1.96430858454",
],
[
"B",
"0.00",
"-0.030000000",
"1.40307756039",
"0.00",
"-0.015000000",
"1.40307756039",
],
]
for j in range(0, len(nb_types)):
assert (
len(out_gomc[i + 1 + j].split("!")[0].split()) == 7
)
assert (
out_gomc[i + 1 + j].split("!")[0].split()[0:7]
== nb_types[j]
)
else:
pass
assert masses_read
assert bonds_read
assert angles_read
assert dihedrals_read
assert nonbondeds_read
def test_save_charmm_psf(self, ethane_gomc):
charmm = Charmm(
ethane_gomc,
"charmm_data",
ff_filename="charmm_data",
residues=[ethane_gomc.name],
forcefield_selection="oplsaa",
)
charmm.write_psf()
with open("charmm_data.psf", "r") as fp:
charges_read = False
out_gomc = fp.readlines()
for i, line in enumerate(out_gomc):
if "8 !NATOM" in line:
charges_read = True
atom_type_charge_etc_list = [
[
"1",
"SYS",
"1",
"ETH",
"C1",
"A",
"-0.180000",
"12.0108",
],
[
"2",
"SYS",
"1",
"ETH",
"C2",
"A",
"-0.180000",
"12.0108",
],
[
"3",
"SYS",
"1",
"ETH",
"H1",
"B",
"0.060000",
"1.0079",
],
[
"4",
"SYS",
"1",
"ETH",
"H2",
"B",
"0.060000",
"1.0079",
],
[
"5",
"SYS",
"1",
"ETH",
"H3",
"B",
"0.060000",
"1.0079",
],
[
"6",
"SYS",
"1",
"ETH",
"H4",
"B",
"0.060000",
"1.0079",
],
[
"7",
"SYS",
"1",
"ETH",
"H5",
"B",
"0.060000",
"1.0079",
],
[
"8",
"SYS",
"1",
"ETH",
"H6",
"B",
"0.060000",
"1.0079",
],
]
for j in range(0, len(atom_type_charge_etc_list)):
assert (
out_gomc[i + 1 + j].split()[0:8]
== atom_type_charge_etc_list[j]
)
else:
pass
assert charges_read
def test_save_charmm_pdb(self, ethane_gomc):
charmm = Charmm(
ethane_gomc,
"charmm_data",
ff_filename="charmm_data",
residues=[ethane_gomc.name],
forcefield_selection="oplsaa",
)
charmm.write_pdb()
with open("charmm_data.pdb", "r") as fp:
pdb_read = False
out_gomc = fp.readlines()
for i, line in enumerate(out_gomc):
if "CRYST1" in line:
pdb_read = True
atom_type_res_part_1_list = [
["ATOM", "1", "C1", "ETH", "A", "1"],
["ATOM", "2", "C2", "ETH", "A", "1"],
["ATOM", "3", "H1", "ETH", "A", "1"],
["ATOM", "4", "H2", "ETH", "A", "1"],
["ATOM", "5", "H3", "ETH", "A", "1"],
["ATOM", "6", "H4", "ETH", "A", "1"],
["ATOM", "7", "H5", "ETH", "A", "1"],
["ATOM", "8", "H6", "ETH", "A", "1"],
]
atom_type_res_part_2_list = [
["1.00", "0.00", "C"],
["1.00", "0.00", "C"],
["1.00", "0.00", "H"],
["1.00", "0.00", "H"],
["1.00", "0.00", "H"],
["1.00", "0.00", "H"],
["1.00", "0.00", "H"],
["1.00", "0.00", "H"],
]
for j in range(0, len(atom_type_res_part_1_list)):
assert (
out_gomc[i + 1 + j].split()[0:6]
== atom_type_res_part_1_list[j]
)
assert (
out_gomc[i + 1 + j].split()[9:12]
== atom_type_res_part_2_list[j]
)
else:
pass
assert pdb_read
def test_save_charmm_ua_gomc_ff(self, two_propanol_ua):
charmm = Charmm(
two_propanol_ua,
"charmm_data_UA",
ff_filename="charmm_data_UA",
residues=[two_propanol_ua.name],
forcefield_selection="trappe-ua",
bead_to_atom_name_dict={"_CH3": "C"},
)
charmm.write_inp()
with open("charmm_data_UA.inp", "r") as fp:
masses_read = False
bonds_read = False
angles_read = False
dihedrals_read = False
nonbondeds_read = False
out_gomc = fp.readlines()
for i, line in enumerate(out_gomc):
if (
"!atom_types" in line
and "mass" in line
and "atomTypeForceFieldName_ResidueName" in line
and "(i.e., atoms_type_per_utilized_FF)" in line
):
masses_read = True
atom_types_1 = [
["*", "A", "15.035000"],
["*", "B", "13.019000"],
["*", "D", "15.999430"],
["*", "C", "1.007947"],
]
atom_types_2 = [
["CH3_sp3_POL"],
["CH_O_POL"],
["O_POL"],
["H_POL"],
]
for j in range(0, len(atom_types_1)):
assert (
len(out_gomc[i + 1 + j].split("!")[0].split()) == 3
)
assert (
out_gomc[i + 1 + j].split("!")[0].split()[0:3]
== atom_types_1[j]
)
assert (
out_gomc[i + 1 + j].split()[4:5] == atom_types_2[j]
)
elif (
"!atom_types" in line
and "Kb" in line
and "b0" in line
and "atoms_types_per_utilized_FF" in line
):
bonds_read = True
bond_types = [
["C", "D", "600.40152964", "0.945"],
["B", "D", "600.40152964", "1.43"],
["A", "B", "600.40152964", "1.54"],
]
total_bonds_evaluated = []
total_bonds_evaluated_reorg = []
for j in range(0, len(bond_types)):
assert (
len(out_gomc[i + 1 + j].split("!")[0].split()) == 4
)
if (
out_gomc[i + 1 + j].split("!")[0].split()[0:4]
== bond_types[0]
or bond_types[1]
or bond_types[2]
):
total_bonds_evaluated.append(
out_gomc[i + 1 + j].split("!")[0].split()[0:4]
)
for k in range(0, len(bond_types)):
if bond_types[k] in total_bonds_evaluated:
total_bonds_evaluated_reorg.append(bond_types[k])
assert total_bonds_evaluated_reorg == bond_types
elif (
"!atom_types" in line
and "Ktheta" in line
and "Theta0" in line
and "atoms_types_per_utilized_FF" in line in line
):
angles_read = True
angle_types = [
["A", "B", "A", "62.10013026", "112.00007"],
["A", "B", "D", "50.07754422", "109.46989"],
["B", "D", "C", "55.04555449", "108.49987"],
]
total_angles_evaluated = []
total_angles_evaluated_reorg = []
for j in range(0, len(angle_types)):
assert (
len(out_gomc[i + 1 + j].split("!")[0].split()) == 5
)
if (
out_gomc[i + 1 + j].split("!")[0].split()[0:5]
== angle_types[0]
or angle_types[1]
or angle_types[2]
):
total_angles_evaluated.append(
out_gomc[i + 1 + j].split("!")[0].split()[0:5]
)
for k in range(0, len(angle_types)):
if angle_types[k] in total_angles_evaluated:
total_angles_evaluated_reorg.append(angle_types[k])
assert total_angles_evaluated_reorg == angle_types
elif (
"!atom_types" in line
and "Kchi" in line
and "n" in line
and "delta" in line
and "atoms_types_per_utilized_FF" in line
):
dihedrals_read = True
dihedral_types = [
["A", "B", "D", "C", "0.647232", "0", "90.0"],
["A", "B", "D", "C", "-0.392135", "1", "180.0"],
["A", "B", "D", "C", "-0.062518", "2", "0.0"],
["A", "B", "D", "C", "0.345615", "3", "180.0"],
["A", "B", "D", "C", "0.000000", "4", "0.0"],
["A", "B", "D", "C", "0.000000", "5", "180.0"],
]
for j in range(0, len(dihedral_types)):
assert (
len(out_gomc[i + 1 + j].split("!")[0].split()) == 7
)
assert (
out_gomc[i + 1 + j].split("!")[0].split()[0:7]
== dihedral_types[j]
)
elif (
"!atype" in line
and "ignored epsilon" in line
and "Rmin/2" in line
and "ignored" in line
and "eps,1-4" in line
and "Rmin/2,1-4" in line
and "atom_type_per_utilized_FF" in line
):
nonbondeds_read = True
nb_types = [
[
"A",
"0.00",
"-0.194745937",
"2.10461634058",
"0.00",
"-0.000000000",
"2.10461634058",
],
[
"B",
"0.00",
"-0.019872012",
"2.43013033459",
"0.00",
"-0.000000000",
"2.43013033459",
],
[
"D",
"0.00",
"-0.184809990",
"1.69491769295",
"0.00",
"-0.000000000",
"1.69491769295",
],
[
"C",
"0.00",
"-0.000000000",
"5.61231024155",
"0.00",
"-0.000000000",
"5.61231024155",
],
]
for j in range(0, len(nb_types)):
assert (
len(out_gomc[i + 1 + j].split("!")[0].split()) == 7
)
assert (
out_gomc[i + 1 + j].split("!")[0].split()[0:7]
== nb_types[j]
)
else:
pass
assert masses_read
assert bonds_read
assert angles_read
assert dihedrals_read
assert nonbondeds_read
def test_save_charmm_ua_psf(self, two_propanol_ua):
charmm = Charmm(
two_propanol_ua,
"charmm_data_UA",
ff_filename="charmm_data_UA",
residues=[two_propanol_ua.name],
forcefield_selection="trappe-ua",
bead_to_atom_name_dict={"_CH3": "C"},
)
charmm.write_psf()
with open("charmm_data_UA.psf", "r") as fp:
read_psf = False
out_gomc = fp.readlines()
for i, line in enumerate(out_gomc):
if "5 !NATOM" in line:
read_psf = True
atom_type_charge_etc_list = [
[
"1",
"SYS",
"1",
"POL",
"C1",
"A",
"0.000000",
"15.0350",
],
[
"2",
"SYS",
"1",
"POL",
"BD1",
"B",
"0.265000",
"13.0190",
],
[
"3",
"SYS",
"1",
"POL",
"O1",
"D",
"-0.700000",
"15.9994",
],
[
"4",
"SYS",
"1",
"POL",
"H1",
"C",
"0.435000",
"1.0079",
],
[
"5",
"SYS",
"1",
"POL",
"C2",
"A",
"0.000000",
"15.0350",
],
]
for j in range(0, len(atom_type_charge_etc_list)):
assert (
out_gomc[i + 1 + j].split()[0:8]
== atom_type_charge_etc_list[j]
)
else:
pass
assert read_psf
def test_save_charmm_ua_pdb(self, two_propanol_ua):
charmm = Charmm(
two_propanol_ua,
"charmm_data_UA",
ff_filename="charmm_data_UA",
residues=[two_propanol_ua.name],
forcefield_selection="trappe-ua",
bead_to_atom_name_dict={"_CH3": "C"},
)
charmm.write_pdb()
with open("charmm_data_UA.pdb", "r") as fp:
read_pdb = False
out_gomc = fp.readlines()
for i, line in enumerate(out_gomc):
if "CRYST1" in line:
read_pdb = True
atom_type_res_part_1_list = [
["ATOM", "1", "C1", "POL", "A", "1"],
["ATOM", "2", "BD1", "POL", "A", "1"],
["ATOM", "3", "O1", "POL", "A", "1"],
["ATOM", "4", "H1", "POL", "A", "1"],
["ATOM", "5", "C2", "POL", "A", "1"],
]
atom_type_res_part_2_list = [
["1.00", "0.00", "EP"],
["1.00", "0.00", "EP"],
["1.00", "0.00", "O"],
["1.00", "0.00", "H"],
["1.00", "0.00", "EP"],
]
for j in range(0, len(atom_type_res_part_1_list)):
assert (
out_gomc[i + 1 + j].split()[0:6]
== atom_type_res_part_1_list[j]
)
assert (
out_gomc[i + 1 + j].split()[9:12]
== atom_type_res_part_2_list[j]
)
else:
pass
assert read_pdb
def test_charmm_pdb_fix_angle_bond_fix_atoms(
self, ethane_gomc, ethanol_gomc
):
test_box_ethane_propane = mb.fill_box(
compound=[ethane_gomc, ethanol_gomc],
n_compounds=[1, 1],
box=[2.0, 2.0, 2.0],
)
charmm = Charmm(
test_box_ethane_propane,
"Test_fixes_angle_bond_atoms",
ff_filename="Test_fixes_angle_bond_atoms",
residues=[ethanol_gomc.name, ethane_gomc.name],
forcefield_selection="oplsaa",
fix_residue=[ethane_gomc.name],
fix_residue_in_box=[ethanol_gomc.name],
gomc_fix_bonds_angles=[ethane_gomc.name],
)
charmm.write_inp()
charmm.write_pdb()
with open("Test_fixes_angle_bond_atoms.inp", "r") as fp:
masses_read = False
bonds_read = False
angles_read = False
out_gomc = fp.readlines()
for i, line in enumerate(out_gomc):
if (
"!atom_types" in line
and "mass" in line
and "atomTypeForceFieldName_ResidueName" in line
and "(i.e., atoms_type_per_utilized_FF)" in line
):
masses_read = True
mass_type_1 = [
["*", "A", "12.010780"],
["*", "C", "1.007947"],
["*", "B", "12.010780"],
["*", "G", "12.010780"],
["*", "E", "15.999430"],
["*", "D", "1.007947"],
["*", "F", "1.007947"],
]
mass_type_2 = [
["opls_135_ETH"],
["opls_140_ETH"],
["opls_135_ETO"],
["opls_157_ETO"],
["opls_154_ETO"],
["opls_140_ETO"],
["opls_155_ETO"],
]
for j in range(0, len(mass_type_1)):
assert (
len(out_gomc[i + 1 + j].split("!")[0].split()) == 3
)
assert (
out_gomc[i + 1 + j].split("!")[0].split()[0:3]
== mass_type_1[j]
)
assert (
out_gomc[i + 1 + j].split()[4:5] == mass_type_2[j]
)
elif (
"!atom_types" in line
and "Kb" in line
and "b0" in line
and "atoms_types_per_utilized_FF" in line
):
bonds_read = True
bond_types = [
["D", "G", "340.0", "1.09"],
["E", "G", "320.0", "1.41"],
["E", "F", "553.0", "0.945"],
["A", "C", "999999999999", "1.09"],
["B", "D", "340.0", "1.09"],
["A", "A", "999999999999", "1.529"],
["B", "G", "268.0", "1.529"],
]
total_bonds_evaluated = []
total_fixed_bonds = []
for j in range(0, 7):
total_bonds_evaluated.append(
out_gomc[i + 1 + j].split("!")[0].split()[0:4]
)
if out_gomc[i + 1 + j].split("!")[0].split()[2:3] == [
"999999999999"
]:
total_fixed_bonds.append(
out_gomc[i + 1 + j].split("!")[0].split()[0:4]
)
assert total_bonds_evaluated.sort() == bond_types.sort()
assert len(total_fixed_bonds) == 2
elif (
"!atom_types" in line
and "Ktheta" in line
and "Theta0" in line
and "atoms_types_per_utilized_FF" in line in line
):
angles_read = True
fixed_angle_types = [
["A", "A", "C", "999999999999", "110.70000"],
["C", "A", "C", "999999999999", "107.80000"],
]
total_angles_evaluated = []
total_fixed_angles = []
for j in range(0, 9):
if out_gomc[i + 1 + j].split("!")[0].split()[0:4] == (
fixed_angle_types[0] or fixed_angle_types[1]
):
total_angles_evaluated.append(
out_gomc[i + 1 + j].split("!")[0].split()[0:4]
)
if out_gomc[i + 1 + j].split("!")[0].split()[3:4] == [
"999999999999"
]:
total_fixed_angles.append(
out_gomc[i + 1 + j].split("!")[0].split()[0:4]
)
assert (
fixed_angle_types.sort()
== total_angles_evaluated.sort()
)
assert len(total_fixed_angles) == len(fixed_angle_types)
else:
pass
assert masses_read
assert bonds_read
assert angles_read
with open("Test_fixes_angle_bond_atoms.pdb", "r") as fp:
read_pdb_part_1 = False
read_pdb_part_2 = False
out_gomc = fp.readlines()
for i, line in enumerate(out_gomc):
if "CRYST1" in line:
read_pdb_part_1 = True
assert out_gomc[i].split()[0:7] == [
"CRYST1",
"20.000",
"20.000",
"20.000",
"90.00",
"90.00",
"90.00",
]
if "CRYST1" in line:
read_pdb_part_2 = True
atom_type_res_part_1_list = [
["ATOM", "1", "C1", "ETH", "A", "1"],
["ATOM", "2", "C2", "ETH", "A", "1"],
["ATOM", "3", "H1", "ETH", "A", "1"],
["ATOM", "4", "H2", "ETH", "A", "1"],
["ATOM", "5", "H3", "ETH", "A", "1"],
["ATOM", "6", "H4", "ETH", "A", "1"],
["ATOM", "7", "H5", "ETH", "A", "1"],
["ATOM", "8", "H6", "ETH", "A", "1"],
["ATOM", "9", "C1", "ETO", "A", "2"],
["ATOM", "10", "C2", "ETO", "A", "2"],
["ATOM", "11", "O1", "ETO", "A", "2"],
["ATOM", "12", "H1", "ETO", "A", "2"],
["ATOM", "13", "H2", "ETO", "A", "2"],
["ATOM", "14", "H3", "ETO", "A", "2"],
["ATOM", "15", "H4", "ETO", "A", "2"],
["ATOM", "16", "H5", "ETO", "A", "2"],
["ATOM", "17", "H6", "ETO", "A", "2"],
]
atom_type_res_part_2_list = [
["1.00", "1.00", "C"],
["1.00", "1.00", "C"],
["1.00", "1.00", "H"],
["1.00", "1.00", "H"],
["1.00", "1.00", "H"],
["1.00", "1.00", "H"],
["1.00", "1.00", "H"],
["1.00", "1.00", "H"],
["1.00", "2.00", "C"],
["1.00", "2.00", "C"],
["1.00", "2.00", "O"],
["1.00", "2.00", "H"],
["1.00", "2.00", "H"],
["1.00", "2.00", "H"],
["1.00", "2.00", "H"],
["1.00", "2.00", "H"],
["1.00", "2.00", "H"],
]
for j in range(0, len(atom_type_res_part_1_list)):
assert (
out_gomc[i + 1 + j].split()[0:6]
== atom_type_res_part_1_list[j]
)
assert (
out_gomc[i + 1 + j].split()[9:12]
== atom_type_res_part_2_list[j]
)
else:
pass
assert read_pdb_part_1
assert read_pdb_part_2
def test_charmm_pdb_fix_bonds_only(self, ethane_gomc, ethanol_gomc):
test_box_ethane_propane = mb.fill_box(
compound=[ethane_gomc, ethanol_gomc],
n_compounds=[1, 1],
box=[2.0, 2.0, 2.0],
)
charmm = Charmm(
test_box_ethane_propane,
"Test_fixes_bonds_only",
ff_filename="Test_fixes_bonds_only",
residues=[ethanol_gomc.name, ethane_gomc.name],
forcefield_selection="oplsaa",
gomc_fix_bonds=[ethane_gomc.name],
)
charmm.write_inp()
with open("Test_fixes_bonds_only.inp", "r") as fp:
bonds_read = False
angles_read = False
out_gomc = fp.readlines()
for i, line in enumerate(out_gomc):
if (
"!atom_types" in line
and "Kb" in line
and "b0" in line
and "atoms_types_per_utilized_FF" in line
):
bonds_read = True
bond_types = [
["D", "G", "340.0", "1.09"],
["E", "G", "320.0", "1.41"],
["E", "F", "553.0", "0.945"],
["A", "C", "999999999999", "1.09"],
["B", "D", "340.0", "1.09"],
["A", "A", "999999999999", "1.529"],
["B", "G", "268.0", "1.529"],
]
total_bonds_evaluated = []
total_fixed_bonds = []
for j in range(0, 7):
total_bonds_evaluated.append(
out_gomc[i + 1 + j].split("!")[0].split()[0:4]
)
if out_gomc[i + 1 + j].split("!")[0].split()[2:3] == [
"999999999999"
]:
total_fixed_bonds.append(
out_gomc[i + 1 + j].split("!")[0].split()[0:4]
)
assert total_bonds_evaluated.sort() == bond_types.sort()
assert len(total_fixed_bonds) == 2
elif (
"!atom_types" in line
and "Ktheta" in line
and "Theta0" in line
and "atoms_types_per_utilized_FF" in line
):
angles_read = True
fixed_angle_types = []
total_angles_evaluated = []
total_fixed_angles = []
for j in range(0, 9):
if len(fixed_angle_types) > 0:
if out_gomc[i + 1 + j].split("!")[0].split()[
0:4
] == (fixed_angle_types[0] or fixed_angle_types[1]):
total_angles_evaluated.append(
out_gomc[i + 1 + j]
.split("!")[0]
.split()[0:4]
)
if out_gomc[i + 1 + j].split("!")[0].split()[3:4] == [
"999999999999"
]:
total_fixed_angles.append(
out_gomc[i + 1 + j].split("!")[0].split()[0:4]
)
assert (
fixed_angle_types.sort()
== total_angles_evaluated.sort()
)
assert len(total_fixed_angles) == len(fixed_angle_types)
else:
pass
assert bonds_read
assert angles_read
def test_charmm_pdb_fix_bonds_only_and_fix_bonds_angles(
self, ethane_gomc, ethanol_gomc
):
test_box_ethane_propane = mb.fill_box(
compound=[ethane_gomc, ethanol_gomc],
n_compounds=[1, 1],
box=[2.0, 2.0, 2.0],
)
charmm = Charmm(
test_box_ethane_propane,
"Test_fixes_bonds_only_and_fix_bonds_angles",
ff_filename="Test_fixes_bonds_only_and_fix_bonds_angles",
residues=[ethanol_gomc.name, ethane_gomc.name],
forcefield_selection="oplsaa",
gomc_fix_bonds=[ethane_gomc.name],
gomc_fix_bonds_angles=[ethane_gomc.name],
)
charmm.write_inp()
with open("Test_fixes_bonds_only_and_fix_bonds_angles.inp", "r") as fp:
bonds_read = False
angles_read = False
out_gomc = fp.readlines()
for i, line in enumerate(out_gomc):
if (
"!atom_types" in line
and "Kb" in line
and "b0" in line
and "atoms_types_per_utilized_FF" in line
):
bonds_read = True
bond_types = [
["D", "G", "340.0", "1.09"],
["E", "G", "320.0", "1.41"],
["E", "F", "553.0", "0.945"],
["A", "C", "999999999999", "1.09"],
["B", "D", "340.0", "1.09"],
["A", "A", "999999999999", "1.529"],
["B", "G", "268.0", "1.529"],
]
total_bonds_evaluated = []
total_fixed_bonds = []
for j in range(0, 7):
total_bonds_evaluated.append(
out_gomc[i + 1 + j].split("!")[0].split()[0:4]
)
if out_gomc[i + 1 + j].split("!")[0].split()[2:3] == [
"999999999999"
]:
total_fixed_bonds.append(
out_gomc[i + 1 + j].split("!")[0].split()[0:4]
)
assert total_bonds_evaluated.sort() == bond_types.sort()
assert len(total_fixed_bonds) == 2
elif (
"!atom_types" in line
and "Ktheta" in line
and "Theta0" in line
and "atoms_types_per_utilized_FF" in line
):
angles_read = True
fixed_angle_types = [
["A", "A", "C", "999999999999", "110.70000"],
["C", "A", "C", "999999999999", "107.80000"],
]
total_angles_evaluated = []
total_fixed_angles = []
for j in range(0, 9):
if out_gomc[i + 1 + j].split("!")[0].split()[0:4] == (
fixed_angle_types[0] or fixed_angle_types[1]
):
total_angles_evaluated.append(
out_gomc[i + 1 + j].split("!")[0].split()[0:4]
)
if out_gomc[i + 1 + j].split("!")[0].split()[3:4] == [
"999999999999"
]:
total_fixed_angles.append(
out_gomc[i + 1 + j].split("!")[0].split()[0:4]
)
assert (
fixed_angle_types.sort()
== total_angles_evaluated.sort()
)
assert len(total_fixed_angles) == len(fixed_angle_types)
else:
pass
assert bonds_read
assert angles_read
def test_charmm_pdb_fix_angles_only(self, ethane_gomc, ethanol_gomc):
test_box_ethane_propane = mb.fill_box(
compound=[ethane_gomc, ethanol_gomc],
n_compounds=[1, 1],
box=[2.0, 2.0, 2.0],
)
charmm = Charmm(
test_box_ethane_propane,
"Test_fixes_angles_only",
ff_filename="Test_fixes_angles_only",
residues=[ethanol_gomc.name, ethane_gomc.name],
forcefield_selection="oplsaa",
gomc_fix_angles=[ethane_gomc.name],
)
charmm.write_inp()
with open("Test_fixes_angles_only.inp", "r") as fp:
bonds_read = False
angles_read = False
out_gomc = fp.readlines()
for i, line in enumerate(out_gomc):
if (
"!atom_types" in line
and "Kb" in line
and "b0" in line
and "atoms_types_per_utilized_FF" in line
):
bonds_read = True
bond_types = [
["D", "G", "340.0", "1.09"],
["E", "G", "320.0", "1.41"],
["E", "F", "553.0", "0.945"],
["A", "C", "340.0", "1.09"],
["B", "D", "340.0", "1.09"],
["A", "A", "268.0", "1.529"],
["B", "G", "268.0", "1.529"],
]
total_bonds_evaluated = []
total_fixed_bonds = []
for j in range(0, 7):
total_bonds_evaluated.append(
out_gomc[i + 1 + j].split("!")[0].split()[0:4]
)
if out_gomc[i + 1 + j].split("!")[0].split()[2:3] == [
"999999999999"
]:
total_fixed_bonds.append(
out_gomc[i + 1 + j].split("!")[0].split()[0:4]
)
assert total_bonds_evaluated.sort() == bond_types.sort()
assert len(total_fixed_bonds) == 0
elif (
"!atom_types" in line
and "Ktheta" in line
and "Theta0" in line
and "atoms_types_per_utilized_FF" in line
):
angles_read = True
fixed_angle_types = [
["A", "A", "C", "999999999999", "110.70000"],
["C", "A", "C", "999999999999", "107.80000"],
]
total_angles_evaluated = []
total_fixed_angles = []
for j in range(0, 9):
if out_gomc[i + 1 + j].split("!")[0].split()[0:4] == (
fixed_angle_types[0] or fixed_angle_types[1]
):
total_angles_evaluated.append(
out_gomc[i + 1 + j].split("!")[0].split()[0:4]
)
if out_gomc[i + 1 + j].split("!")[0].split()[3:4] == [
"999999999999"
]:
total_fixed_angles.append(
out_gomc[i + 1 + j].split("!")[0].split()[0:4]
)
assert (
fixed_angle_types.sort()
== total_angles_evaluated.sort()
)
assert len(total_fixed_angles) == len(fixed_angle_types)
else:
pass
assert bonds_read
assert angles_read
def test_charmm_pdb_fix_angles_only_and_fix_bonds_angles(
self, ethane_gomc, ethanol_gomc
):
test_box_ethane_propane = mb.fill_box(
compound=[ethane_gomc, ethanol_gomc],
n_compounds=[1, 1],
box=[2.0, 2.0, 2.0],
)
charmm = Charmm(
test_box_ethane_propane,
"Test_fixes_angles_only_and_fix_bonds_angles",
ff_filename="Test_fixes_angles_only_and_fix_bonds_angles",
residues=[ethanol_gomc.name, ethane_gomc.name],
forcefield_selection="oplsaa",
gomc_fix_angles=[ethane_gomc.name],
gomc_fix_bonds_angles=[ethane_gomc.name],
)
charmm.write_inp()
with open("Test_fixes_angles_only_and_fix_bonds_angles.inp", "r") as fp:
bonds_read = False
angles_read = False
out_gomc = fp.readlines()
for i, line in enumerate(out_gomc):
if (
"!atom_types" in line
and "Kb" in line
and "b0" in line
and "atoms_types_per_utilized_FF" in line
):
bonds_read = True
bond_types = [
["D", "G", "340.0", "1.09"],
["E", "G", "320.0", "1.41"],
["E", "F", "553.0", "0.945"],
["A", "C", "999999999999", "1.09"],
["B", "D", "340.0", "1.09"],
["A", "A", "999999999999", "1.529"],
["B", "G", "268.0", "1.529"],
]
total_bonds_evaluated = []
total_fixed_bonds = []
for j in range(0, 7):
total_bonds_evaluated.append(
out_gomc[i + 1 + j].split("!")[0].split()[0:4]
)
if out_gomc[i + 1 + j].split("!")[0].split()[2:3] == [
"999999999999"
]:
total_fixed_bonds.append(
out_gomc[i + 1 + j].split("!")[0].split()[0:4]
)
assert total_bonds_evaluated.sort() == bond_types.sort()
assert len(total_fixed_bonds) == 2
elif (
"!atom_types" in line
and "Ktheta" in line
and "Theta0" in line
and "atoms_types_per_utilized_FF" in line
):
angles_read = True
fixed_angle_types = [
["A", "A", "C", "999999999999", "110.70000"],
["C", "A", "C", "999999999999", "107.80000"],
]
total_angles_evaluated = []
total_fixed_angles = []
for j in range(0, 9):
if out_gomc[i + 1 + j].split("!")[0].split()[0:4] == (
fixed_angle_types[0] or fixed_angle_types[1]
):
total_angles_evaluated.append(
out_gomc[i + 1 + j].split("!")[0].split()[0:4]
)
if out_gomc[i + 1 + j].split("!")[0].split()[3:4] == [
"999999999999"
]:
total_fixed_angles.append(
out_gomc[i + 1 + j].split("!")[0].split()[0:4]
)
assert (
fixed_angle_types.sort()
== total_angles_evaluated.sort()
)
assert len(total_fixed_angles) == len(fixed_angle_types)
else:
pass
assert bonds_read
assert angles_read
def test_charmm_pdb_no_differenc_1_4_coul_scalars(
self, two_propanol_ua, ethane_gomc
):
test_box_ethane_two_propanol_ua = mb.fill_box(
compound=[two_propanol_ua, ethane_gomc],
n_compounds=[1, 1],
box=[2.0, 2.0, 2.0],
)
with pytest.raises(
ValueError,
match=r"ERROR: There are multiple 1,4-coulombic scaling factors "
"GOMC will only accept a singular input for the 1,4-coulombic "
"scaling factors",
):
Charmm(
test_box_ethane_two_propanol_ua,
"residue_reorder_box_sizing_box_0",
structure_box_1=ethane_gomc,
filename_box_1="residue_reorder_box_sizing_box_1",
ff_filename="residue_reorder_box",
residues=[two_propanol_ua.name, ethane_gomc.name],
forcefield_selection={
two_propanol_ua.name: "trappe-ua",
ethane_gomc.name: "oplsaa",
},
fix_residue=None,
fix_residue_in_box=None,
gomc_fix_bonds_angles=None,
reorder_res_in_pdb_psf=False,
bead_to_atom_name_dict={"_CH3": "C"},
)
def test_charmm_pdb_residue_reorder_and_ff_filename_box_sizing(
self, ethanol_gomc, ethane_gomc
):
test_box_ethane_ethanol_gomc = mb.fill_box(
compound=[ethanol_gomc, ethane_gomc],
n_compounds=[1, 1],
box=[3, 3, 3],
)
test_box_ethane_gomc = mb.fill_box(
compound=[ethane_gomc], n_compounds=[1], box=[4, 4, 4]
)
charmm = Charmm(
test_box_ethane_ethanol_gomc,
"residue_reorder_box_sizing_box_0",
structure_box_1=test_box_ethane_gomc,
filename_box_1="residue_reorder_box_sizing_box_1",
ff_filename=None,
residues=[ethane_gomc.name, ethanol_gomc.name],
forcefield_selection=str(forcefields.get_ff_path()[0])
+ "/xml/"
+ "oplsaa.xml",
fix_residue=None,
fix_residue_in_box=None,
gomc_fix_bonds_angles=None,
reorder_res_in_pdb_psf=True,
bead_to_atom_name_dict={"_CH3": "C"},
)
charmm.write_pdb()
with open("residue_reorder_box_sizing_box_0.pdb", "r") as fp:
pdb_box_0_read = False
out_gomc = fp.readlines()
for i, line in enumerate(out_gomc):
if "CRYST1" in line:
pdb_box_0_read = True
assert out_gomc[i].split()[0:7] == [
"CRYST1",
"30.000",
"30.000",
"30.000",
"90.00",
"90.00",
"90.00",
]
atom_type_res_part_1_list = [
["ATOM", "1", "C1", "ETH", "A", "1"],
["ATOM", "2", "C2", "ETH", "A", "1"],
["ATOM", "3", "H1", "ETH", "A", "1"],
["ATOM", "4", "H2", "ETH", "A", "1"],
["ATOM", "5", "H3", "ETH", "A", "1"],
["ATOM", "6", "H4", "ETH", "A", "1"],
["ATOM", "7", "H5", "ETH", "A", "1"],
["ATOM", "8", "H6", "ETH", "A", "1"],
["ATOM", "9", "C1", "ETO", "A", "2"],
["ATOM", "10", "C2", "ETO", "A", "2"],
["ATOM", "11", "O1", "ETO", "A", "2"],
["ATOM", "12", "H1", "ETO", "A", "2"],
["ATOM", "13", "H2", "ETO", "A", "2"],
["ATOM", "14", "H3", "ETO", "A", "2"],
["ATOM", "15", "H4", "ETO", "A", "2"],
["ATOM", "16", "H5", "ETO", "A", "2"],
["ATOM", "17", "H6", "ETO", "A", "2"],
]
atom_type_res_part_2_list = [
["1.00", "0.00", "C"],
["1.00", "0.00", "C"],
["1.00", "0.00", "H"],
["1.00", "0.00", "H"],
["1.00", "0.00", "H"],
["1.00", "0.00", "H"],
["1.00", "0.00", "H"],
["1.00", "0.00", "H"],
["1.00", "0.00", "C"],
["1.00", "0.00", "C"],
["1.00", "0.00", "O"],
["1.00", "0.00", "H"],
["1.00", "0.00", "H"],
["1.00", "0.00", "H"],
["1.00", "0.00", "H"],
["1.00", "0.00", "H"],
["1.00", "0.00", "H"],
]
for j in range(0, len(atom_type_res_part_1_list)):
assert (
out_gomc[i + 1 + j].split()[0:6]
== atom_type_res_part_1_list[j]
)
assert (
out_gomc[i + 1 + j].split()[9:12]
== atom_type_res_part_2_list[j]
)
else:
pass
assert pdb_box_0_read
with open("residue_reorder_box_sizing_box_1.pdb", "r") as fp:
pdb_box_1_read = False
out_gomc = fp.readlines()
for i, line in enumerate(out_gomc):
if "CRYST1" in line:
pdb_box_1_read = True
assert out_gomc[i].split()[0:7] == [
"CRYST1",
"40.000",
"40.000",
"40.000",
"90.00",
"90.00",
"90.00",
]
else:
pass
assert pdb_box_1_read
# test utils base 10 to base 16 converter
def test_base_10_to_base_16(self):
list_base_10_and_16 = [
[15, "f"],
[16, "10"],
[17, "11"],
[200, "c8"],
[1000, "3e8"],
[5000, "1388"],
[int(16 ** 3 - 1), "fff"],
[int(16 ** 3), "1000"],
]
for test_base_16_iter in range(0, len(list_base_10_and_16)):
test_10_iter = list_base_10_and_16[test_base_16_iter][0]
test_16_iter = list_base_10_and_16[test_base_16_iter][1]
assert str(base10_to_base16_alph_num(test_10_iter)) == str(
test_16_iter
)
unique_entries_base_16_list = []
for test_unique_base_16 in range(0, 16 ** 2):
unique_entries_base_16_list.append(
base10_to_base16_alph_num(test_unique_base_16)
)
verified_unique_entries_base_16_list = np.unique(
unique_entries_base_16_list
)
assert len(verified_unique_entries_base_16_list) == len(
unique_entries_base_16_list
)
add_same_values_list = ["1", "a"]
for add_same_base_16 in range(0, len(add_same_values_list)):
verified_unique_entries_base_16_list = np.append(
verified_unique_entries_base_16_list,
add_same_values_list[add_same_base_16],
)
assert len(verified_unique_entries_base_16_list) - len(
add_same_values_list
) == len(unique_entries_base_16_list)
# test utils base 10 to base 26 converter
def test_base_10_to_base_26(self):
list_base_10_and_26 = [
[0, "A"],
[5, "F"],
[25, "Z"],
[26, "BA"],
[200, "HS"],
[1000, "BMM"],
[5000, "HKI"],
[int(26 ** 3 - 1), "ZZZ"],
[int(26 ** 3), "BAAA"],
]
for test_base_26_iter in range(0, len(list_base_10_and_26)):
test_10_iter = list_base_10_and_26[test_base_26_iter][0]
test_26_iter = list_base_10_and_26[test_base_26_iter][1]
assert str(base10_to_base26_alph(test_10_iter)) == str(test_26_iter)
unique_entries_base_26_list = []
for test_unique_base_26 in range(0, 26 ** 2):
unique_entries_base_26_list.append(
base10_to_base26_alph(test_unique_base_26)
)
verified_unique_entries_base_26_list = np.unique(
unique_entries_base_26_list
)
assert len(verified_unique_entries_base_26_list) == len(
unique_entries_base_26_list
)
add_same_values_list = ["1", "a"]
for add_same_base_26 in range(0, len(add_same_values_list)):
verified_unique_entries_base_26_list = np.append(
verified_unique_entries_base_26_list,
add_same_values_list[add_same_base_26],
)
assert len(verified_unique_entries_base_26_list) - len(
add_same_values_list
) == len(unique_entries_base_26_list)
# test utils base 10 to base 52 converter
def test_base_10_to_base_52(self):
list_base_10_and_52 = [
[17, "R"],
[51, "z"],
[52, "BA"],
[53, "BB"],
[200, "Ds"],
[1000, "TM"],
[5000, "BsI"],
[int(52 ** 3 - 1), "zzz"],
[int(52 ** 3), "BAAA"],
]
for test_base_52_iter in range(0, len(list_base_10_and_52)):
test_10_iter = list_base_10_and_52[test_base_52_iter][0]
test_52_iter = list_base_10_and_52[test_base_52_iter][1]
assert str(base10_to_base52_alph(test_10_iter)) == str(test_52_iter)
unique_entries_base_52_list = []
for test_unique_base_52 in range(0, 52 ** 2):
unique_entries_base_52_list.append(
base10_to_base52_alph(test_unique_base_52)
)
verified_unique_entries_base_52_list = np.unique(
unique_entries_base_52_list
)
assert len(verified_unique_entries_base_52_list) == len(
unique_entries_base_52_list
)
add_same_values_list = ["1", "a"]
for add_same_base_52 in range(0, len(add_same_values_list)):
verified_unique_entries_base_52_list = np.append(
verified_unique_entries_base_52_list,
add_same_values_list[add_same_base_52],
)
assert len(verified_unique_entries_base_52_list) - len(
add_same_values_list
) == len(unique_entries_base_52_list)
# test utils base 10 to base 62 converter
def test_base_10_to_base_62(self):
list_base_10_and_62 = [
[17, "H"],
[61, "z"],
[62, "10"],
[63, "11"],
[200, "3E"],
[1000, "G8"],
[5000, "1Ie"],
[int(62 ** 3 - 1), "zzz"],
[int(62 ** 3), "1000"],
]
for test_base_62_iter in range(0, len(list_base_10_and_62)):
test_10_iter = list_base_10_and_62[test_base_62_iter][0]
test_62_iter = list_base_10_and_62[test_base_62_iter][1]
assert str(base10_to_base62_alph_num(test_10_iter)) == str(
test_62_iter
)
unique_entries_base_62_list = []
for test_unique_base_62 in range(0, 62 ** 2):
unique_entries_base_62_list.append(
base10_to_base62_alph_num(test_unique_base_62)
)
verified_unique_entries_base_62_list = np.unique(
unique_entries_base_62_list
)
assert len(verified_unique_entries_base_62_list) == len(
unique_entries_base_62_list
)
add_same_values_list = ["1", "a"]
for add_same_base_62 in range(0, len(add_same_values_list)):
verified_unique_entries_base_62_list = np.append(
verified_unique_entries_base_62_list,
add_same_values_list[add_same_base_62],
)
assert len(verified_unique_entries_base_62_list) - len(
add_same_values_list
) == len(unique_entries_base_62_list)
# Tests for the mbuild.utils.specific_FF_to_residue.Specific_FF_to_residue() function
def test_specific_ff_ff_is_none(self, ethane_gomc):
with pytest.raises(
TypeError,
match=r"Please the force field selection \(forcefield_selection\) as a "
r"dictionary with all the residues specified to a force field "
'-> Ex: {"Water" : "oplsaa", "OCT": "path/trappe-ua.xml"}, '
"Note: the file path must be specified the force field file "
"or by using the standard force field name provided the `foyer` package.",
):
specific_ff_to_residue(
ethane_gomc,
forcefield_selection=None,
residues=[ethane_gomc.name],
reorder_res_in_pdb_psf=False,
boxes_for_simulation=1,
)
def test_specific_ff_wrong_ff_extention(self, ethane_gomc):
with pytest.raises(
ValueError,
match=r"Please make sure you are entering the correct "
r"foyer FF name and not a path to a FF file. "
r"If you are entering a path to a FF file, "
r"please use the forcefield_files variable with the "
r"proper XML extension \(.xml\).",
):
specific_ff_to_residue(
ethane_gomc,
forcefield_selection={ethane_gomc.name: "oplsaa.pdb"},
residues=[ethane_gomc.name],
reorder_res_in_pdb_psf=False,
boxes_for_simulation=1,
)
def test_specific_all_residue_not_input(self, ethane_gomc, ethanol_gomc):
with pytest.raises(
ValueError,
match=r"All the residues are not specified, or the residues "
r"entered does not match the residues that were found "
r"and built for structure.",
):
box = mb.fill_box(
compound=[ethane_gomc, ethanol_gomc],
box=[1, 1, 1],
n_compounds=[1, 1],
)
specific_ff_to_residue(
box,
forcefield_selection={ethane_gomc.name: "oplsaa"},
residues=[ethane_gomc.name],
reorder_res_in_pdb_psf=False,
boxes_for_simulation=2,
)
def test_specific_ff_to_residue_ff_selection_not_dict(self, ethane_gomc):
with pytest.raises(
TypeError,
match=r"The force field selection \(forcefield_selection\) "
"is not a dictionary. Please enter a dictionary "
"with all the residues specified to a force field "
'-> Ex: {"Water" : "oplsaa", "OCT": "path/trappe-ua.xml"}, '
"Note: the file path must be specified the force field file "
"or by using the standard force field name provided the `foyer` package.",
):
specific_ff_to_residue(
ethane_gomc,
forcefield_selection="oplsaa",
residues=[ethane_gomc.name],
reorder_res_in_pdb_psf=False,
boxes_for_simulation=1,
)
def test_specific_ff_to_residue_is_none(self, ethane_gomc):
with pytest.raises(
TypeError,
match=r"Please enter the residues in the Specific_FF_to_residue function.",
):
specific_ff_to_residue(
ethane_gomc,
forcefield_selection={ethane_gomc.name: "oplsaa"},
residues=None,
reorder_res_in_pdb_psf=False,
boxes_for_simulation=1,
)
def test_specific_ff_to_residue_reorder_not_true_or_false(
self, ethane_gomc
):
with pytest.raises(
TypeError,
match=r"Please enter the reorder_res_in_pdb_psf "
r"in the Specific_FF_to_residue function \(i.e., True or False\).",
):
specific_ff_to_residue(
ethane_gomc,
forcefield_selection={ethane_gomc.name: "oplsaa"},
residues=[ethane_gomc.name],
reorder_res_in_pdb_psf=None,
boxes_for_simulation=1,
)
def test_specific_ff_to_simulation_boxes_not_1_or_2(self, ethane_gomc):
with pytest.raises(
ValueError,
match=r"Please enter boxes_for_simulation equal the integer 1 or 2.",
):
test_box_ethane_gomc = mb.fill_box(
compound=[ethane_gomc], n_compounds=[1], box=[2, 3, 4]
)
specific_ff_to_residue(
test_box_ethane_gomc,
forcefield_selection={ethane_gomc.name: "oplsaa"},
residues=[ethane_gomc.name],
reorder_res_in_pdb_psf=False,
boxes_for_simulation=3,
)
def test_specific_ff_to_residue_ffselection_wrong_path(self, ethane_gomc):
with pytest.raises(
ValueError,
match=r"Please make sure you are entering the correct foyer FF path, "
r"including the FF file name.xml "
r"If you are using the pre-build FF files in foyer, "
r"only use the string name without any extension.",
):
test_box_ethane_gomc = mb.fill_box(
compound=[ethane_gomc], n_compounds=[1], box=[4, 5, 6]
)
specific_ff_to_residue(
test_box_ethane_gomc,
forcefield_selection={ethane_gomc.name: "oplsaa.xml"},
residues=[ethane_gomc.name],
reorder_res_in_pdb_psf=False,
boxes_for_simulation=1,
)
def test_specific_ff_wrong_path(self, ethane_gomc):
with pytest.raises(
ValueError,
match=r"Please make sure you are entering the correct foyer FF path, "
r"including the FF file name.xml "
r"If you are using the pre-build FF files in foyer, "
r"only use the string name without any extension.",
):
specific_ff_to_residue(
ethane_gomc,
forcefield_selection={ethane_gomc.name: "oplsaa.xml"},
residues=[ethane_gomc.name],
reorder_res_in_pdb_psf=False,
boxes_for_simulation=1,
)
def test_specific_ff_to_residue_input_string_as_compound(self, ethane_gomc):
with pytest.raises(
TypeError,
match=r"ERROR: The structure expected to be of type: "
r"<class 'mbuild.compound.Compound'> or <class 'mbuild.box.Box'>, "
r"received: <class 'str'>",
):
specific_ff_to_residue(
"ethane_gomc",
forcefield_selection={ethane_gomc.name: "oplsaa"},
residues=[ethane_gomc.name],
reorder_res_in_pdb_psf=False,
boxes_for_simulation=1,
)
def test_specific_ff_to_residue_boxes_for_simulation_not_int(
self, ethane_gomc
):
with pytest.raises(
TypeError,
match=r"ERROR: Please enter boxes_for_simulation equal "
"the integer 1 or 2.",
):
specific_ff_to_residue(
ethane_gomc,
forcefield_selection={ethane_gomc.name: "oplsaa"},
residues=[ethane_gomc.name],
reorder_res_in_pdb_psf=False,
boxes_for_simulation=1.1,
)
def test_specific_ff_to_residues_no_ff(self, ethane_gomc):
with pytest.raises(
ValueError,
match=r"The forcefield_selection variable are not provided, "
r"but there are residues provided.",
):
specific_ff_to_residue(
ethane_gomc,
forcefield_selection={},
residues=[ethane_gomc.name],
reorder_res_in_pdb_psf=False,
boxes_for_simulation=1,
)
def test_specific_ff_to_no_residues(self, ethane_gomc):
with pytest.raises(
ValueError,
match=r"The residues variable is an empty list but there are "
"forcefield_selection variables provided.",
):
specific_ff_to_residue(
ethane_gomc,
forcefield_selection={ethane_gomc.name: "oplsaa"},
residues=[],
reorder_res_in_pdb_psf=False,
boxes_for_simulation=1,
)
def test_specific_ff_wrong_foyer_name(self, ethane_gomc):
with pytest.raises(
ValueError,
match=r"Please make sure you are entering the correct foyer FF name, "
r"or the correct file extension \(i.e., .xml, if required\).",
):
specific_ff_to_residue(
ethane_gomc,
forcefield_selection={ethane_gomc.name: "xxx"},
residues=[ethane_gomc.name],
reorder_res_in_pdb_psf=False,
boxes_for_simulation=1,
)
def test_specific_ff_to_residue_ffselection_run(self, ethane_gomc):
test_box_ethane_gomc = mb.fill_box(
compound=[ethane_gomc], n_compounds=[1], box=[4, 5, 6]
)
[
test_value_0,
test_value_1,
test_value_2,
test_value_3,
] = specific_ff_to_residue(
test_box_ethane_gomc,
forcefield_selection={
ethane_gomc.name: forcefields.get_ff_path()[0]
+ "/xml/"
+ "oplsaa.xml"
},
residues=[ethane_gomc.name],
reorder_res_in_pdb_psf=False,
boxes_for_simulation=1,
)
assert test_value_1 == {"ETH": 0.5}
assert test_value_2 == {"ETH": 0.5}
assert test_value_3 == ["ETH"]
def test_specific_ff_to_no_atoms_in_residue(self):
with pytest.raises(
ValueError,
match=r"The residues variable is an empty list but there "
r"are forcefield_selection variables provided.",
):
empty_compound = mb.Compound()
specific_ff_to_residue(
empty_compound,
forcefield_selection={"empty_compound": "oplsaa"},
residues=[],
reorder_res_in_pdb_psf=False,
boxes_for_simulation=1,
)
def test_charmm_methane_test_no_children(self, methane_ua_gomc):
with pytest.raises(
TypeError,
match=r"ERROR: If you are not providing an empty box, "
r"you need to specify the atoms/beads as children in the mb.Compound. "
r"If you are providing and empty box, please do so by specifying and "
r"mbuild Box \({}\)".format(type(Box(lengths=[1, 1, 1]))),
):
specific_ff_to_residue(
methane_ua_gomc,
forcefield_selection={methane_ua_gomc.name: "trappe-ua"},
residues=[methane_ua_gomc.name],
reorder_res_in_pdb_psf=False,
boxes_for_simulation=1,
)
def test_charmm_a_few_mbuild_layers(self, ethane_gomc, ethanol_gomc):
box_reservior_1 = mb.fill_box(
compound=[ethane_gomc], box=[1, 1, 1], n_compounds=[1]
)
box_reservior_1.periodicity = (True, True, True)
box_reservior_2 = mb.fill_box(
compound=[ethanol_gomc], box=[1, 1, 1], n_compounds=[1]
)
box_reservior_2.translate([0, 0, 1])
box_reservior_1.add(box_reservior_2, inherit_periodicity=False)
[
test_value_0,
test_value_1,
test_value_2,
test_value_3,
] = specific_ff_to_residue(
box_reservior_1,
forcefield_selection={
ethanol_gomc.name: "oplsaa",
ethane_gomc.name: "oplsaa",
},
residues=[ethanol_gomc.name, ethane_gomc.name],
reorder_res_in_pdb_psf=False,
boxes_for_simulation=1,
)
assert (
str(test_value_0)
== "<Structure 17 atoms; 2 residues; 15 bonds; PBC (orthogonal); parametrized>"
)
assert test_value_1 == {"ETO": 0.5, "ETH": 0.5}
assert test_value_2 == {"ETO": 0.5, "ETH": 0.5}
assert test_value_3 == ["ETH", "ETO"]
def test_charmm_all_residues_not_in_dict(self, ethane_gomc, ethanol_gomc):
with pytest.raises(
ValueError,
match=r"All the residues were not used from the forcefield_selection "
r"string or dictionary. There may be residues below other "
r"specified residues in the mbuild.Compound hierarchy. "
r"If so, all the highest listed residues pass down the force "
r"fields through the hierarchy. Alternatively, residues that "
r"are not in the structure may have been specified. ",
):
box_reservior_1 = mb.fill_box(
compound=[ethane_gomc], box=[1, 1, 1], n_compounds=[1]
)
specific_ff_to_residue(
box_reservior_1,
forcefield_selection={ethanol_gomc.name: "oplsaa"},
residues=[ethanol_gomc.name, ethane_gomc.name],
reorder_res_in_pdb_psf=False,
boxes_for_simulation=1,
)
def test_charmm_correct_residue_format(self, ethane_gomc):
test_value = Charmm(
ethane_gomc,
"box_0",
structure_box_1=None,
filename_box_1=None,
ff_filename=None,
residues=[ethane_gomc.name],
forcefield_selection={ethane_gomc.name: "oplsaa"},
)
assert test_value.input_error is False
def test_charmm_residue_not_list(self, ethane_gomc):
with pytest.raises(
TypeError,
match=r"ERROR: Please enter the residues list \(residues\) in a list format.",
):
Charmm(
ethane_gomc,
"box_0",
structure_box_1=None,
filename_box_1=None,
ff_filename=None,
residues=ethane_gomc.name,
forcefield_selection={ethane_gomc.name: "oplsaa"},
)
def test_charmm_residue_string(self, ethane_gomc):
with pytest.raises(
TypeError,
match=r"ERROR: Please enter the residues list \(residues\) in a list format.",
):
Charmm(
ethane_gomc,
"box_0",
structure_box_1=None,
filename_box_1=None,
ff_filename=None,
residues="ethane_gomc.name",
forcefield_selection={ethane_gomc.name: "oplsaa"},
)
def test_charmm_residue_is_none(self, ethane_gomc):
with pytest.raises(
TypeError,
match=r"ERROR: Please enter the residues list \(residues\)",
):
Charmm(
ethane_gomc,
"box_0",
structure_box_1=None,
filename_box_1=None,
ff_filename=None,
residues=None,
forcefield_selection={ethane_gomc.name: "oplsaa"},
)
def test_charmm_filename_0_is_not_string(self, ethane_gomc):
with pytest.raises(
TypeError,
match=r"ERROR: Please enter the filename_box_0 as a string.",
):
Charmm(
ethane_gomc,
0,
structure_box_1=None,
filename_box_1=None,
ff_filename=None,
residues=[ethane_gomc.name],
forcefield_selection={ethane_gomc.name: "oplsaa"},
)
def test_charmm_filename_box_1_is_not_string(self, ethane_gomc):
with pytest.raises(
TypeError,
match=r"ERROR: Please enter the filename_box_1 as a string.",
):
Charmm(
ethane_gomc,
"box_0",
structure_box_1=ethane_gomc,
filename_box_1=["box_0"],
ff_filename=None,
residues=[ethane_gomc.name],
forcefield_selection={ethane_gomc.name: "oplsaa"},
)
def test_charmm_gomc_filename_not_string(self, ethane_gomc):
with pytest.raises(
TypeError,
match=r"ERROR: Please enter GOMC force field name \(ff_filename\) as a string.",
):
Charmm(
ethane_gomc,
"box_0",
structure_box_1=None,
filename_box_1=None,
ff_filename=0,
residues=[ethane_gomc.name],
forcefield_selection={ethane_gomc.name: "oplsaa"},
)
def test_charmm_gomc_filename_ext_not_dot_inp(self, ethane_gomc):
with pytest.raises(
ValueError,
match=r"ERROR: Please enter GOMC force field name without an "
"extention or the .inp extension.",
):
Charmm(
ethane_gomc,
"box_0",
structure_box_1=None,
filename_box_1=None,
ff_filename="box.test",
residues=[ethane_gomc.name],
forcefield_selection={ethane_gomc.name: "oplsaa"},
)
def test_charmm_ffselection_not_dict(self, ethane_gomc):
with pytest.raises(
TypeError,
match=r"ERROR: The force field selection \(forcefield_selection\) "
"is not a string or a dictionary with all the residues specified "
'to a force field. -> String Ex: "path/trappe-ua.xml" or Ex: "trappe-ua" '
"Otherise provided a dictionary with all the residues specified "
"to a force field "
'->Dictionary Ex: {"Water" : "oplsaa", "OCT": "path/trappe-ua.xml"}, '
"Note: the file path must be specified the force field file if "
"a standard foyer force field is not used.",
):
Charmm(
ethane_gomc,
"box_0",
structure_box_1=None,
filename_box_1=None,
ff_filename="box_0",
residues=[ethane_gomc.name],
forcefield_selection=["oplsaa", "oplsaa"],
)
def test_charmm_ffselection_string(self, ethane_gomc):
test_value = Charmm(
ethane_gomc,
"box_0",
structure_box_1=None,
filename_box_1=None,
ff_filename="box_0",
residues=[ethane_gomc.name],
forcefield_selection="oplsaa",
)
assert test_value.input_error is False
def test_charmm_residue_name_not_in_residues(self, ethane_gomc):
with pytest.raises(
ValueError,
match=r"ERROR: All the residues are not specified, or "
"the residues entered does not match the residues that "
"were found and built for structure.",
):
Charmm(
ethane_gomc,
"box_0",
structure_box_1=None,
filename_box_1=None,
ff_filename="box_0",
residues=["XXX"],
forcefield_selection="oplsaa",
)
def test_ffselection_string(self, two_propanol_ua):
charmm = Charmm(
two_propanol_ua,
"ffselection_string",
ff_filename="ffselection_string",
residues=[two_propanol_ua.name],
forcefield_selection=forcefields.get_ff_path()[0]
+ "/xml/"
+ "trappe-ua.xml",
bead_to_atom_name_dict={"_CH3": "C"},
)
charmm.write_pdb()
with open("ffselection_string.pdb", "r") as fp:
pdb_read = False
out_gomc = fp.readlines()
for i, line in enumerate(out_gomc):
if "CRYST1" in line:
pdb_read = True
atom_type_res_part_1_list = [
["ATOM", "1", "C1", "POL", "A", "1"],
["ATOM", "2", "BD1", "POL", "A", "1"],
["ATOM", "3", "O1", "POL", "A", "1"],
["ATOM", "4", "H1", "POL", "A", "1"],
["ATOM", "5", "C2", "POL", "A", "1"],
]
atom_type_res_part_2_list = [
["1.00", "0.00", "EP"],
["1.00", "0.00", "EP"],
["1.00", "0.00", "O"],
["1.00", "0.00", "H"],
["1.00", "0.00", "EP"],
]
for j in range(0, len(atom_type_res_part_1_list)):
assert (
out_gomc[i + 1 + j].split()[0:6]
== atom_type_res_part_1_list[j]
)
assert (
out_gomc[i + 1 + j].split()[9:12]
== atom_type_res_part_2_list[j]
)
else:
pass
assert pdb_read
def test_ff_selection_list(self, two_propanol_ua):
with pytest.raises(
TypeError,
match=r"ERROR: The force field selection \(forcefield_selection\) "
"is not a string or a dictionary with all the residues specified "
'to a force field. -> String Ex: "path/trappe-ua.xml" or Ex: "trappe-ua" '
"Otherise provided a dictionary with all the residues specified "
"to a force field "
'->Dictionary Ex: {"Water" : "oplsaa", "OCT": "path/trappe-ua.xml"}, '
"Note: the file path must be specified the force field file if "
"a standard foyer force field is not used.",
):
Charmm(
two_propanol_ua,
"S",
ff_filename="S",
residues=[two_propanol_ua.name],
forcefield_selection=[
str(forcefields.get_ff_path()[0])
+ "/xml/"
+ "trappe-ua.xml"
],
bead_to_atom_name_dict={"_CH3": "C"},
)
def test_residues_not_a_string(self, two_propanol_ua):
with pytest.raises(
TypeError,
match=r"ERROR: Please enter a residues list "
r"\(residues\) with only string values.",
):
Charmm(
two_propanol_ua,
"box_0",
ff_filename="box_0",
residues=[2],
forcefield_selection={two_propanol_ua.name: "trappe-ua"},
bead_to_atom_name_dict={"_CH3": "C"},
)
# charmm writer sub-function testing
def test_charmm_bond_reorder_angle_urey_bradleys(
self, two_propanol_gomc, ethanol_gomc
):
box_reservior_0 = mb.fill_box(
compound=[two_propanol_gomc, ethanol_gomc],
box=[2, 2, 2],
n_compounds=[2, 2],
)
[
structure_ff,
coulomb14scalar_dict,
lj14_scalar_dict,
residues_applied_list,
] = specific_ff_to_residue(
box_reservior_0,
forcefield_selection={
two_propanol_gomc.name: "oplsaa",
ethanol_gomc.name: "oplsaa",
},
residues=[ethanol_gomc.name, two_propanol_gomc.name],
reorder_res_in_pdb_psf=False,
boxes_for_simulation=1,
)
sigma_conversion_factor = 1
epsilon_conversion_factor = 1
# reversed the bond order so it fixes itself
bonds_1 = [
[bond.atom1.idx + 1, bond.atom2.idx + 1]
for bond in structure_ff.bonds
]
bond_types_1, unique_bond_types_1 = charmm_writer._get_bond_types(
structure_ff, sigma_conversion_factor, epsilon_conversion_factor
)
bonds_2 = [
[bond.atom2.idx + 1, bond.atom1.idx + 1]
for bond in structure_ff.bonds
]
bond_types_2, unique_bond_types_2 = charmm_writer._get_bond_types(
structure_ff, sigma_conversion_factor, epsilon_conversion_factor
)
assert bonds_1 != bonds_2
assert bond_types_1 == bond_types_2
assert unique_bond_types_1 == unique_bond_types_2
# test for error if trying to use urey_bradleys in th angles
use_urey_bradleys = True
angle_types_1, unique_angle_types_1 = charmm_writer._get_angle_types(
structure_ff,
sigma_conversion_factor,
epsilon_conversion_factor,
use_urey_bradleys=use_urey_bradleys,
)
assert angle_types_1 is None
assert unique_angle_types_1 is None
# test for error if trying to use use_dihedrals and impropers in the dihedrals (i.e. only RB torsion allowed)
def test_charmm_dihedral_reorder(self, ethyl_ether_gomc, methyl_ether_gomc):
box_reservior_0 = mb.fill_box(
compound=[ethyl_ether_gomc, methyl_ether_gomc],
box=[10, 10, 10],
n_compounds=[10, 10],
)
[
structure_ff,
coulomb14scalar_dict,
lj14_scalar_dict,
residues_applied_list,
] = specific_ff_to_residue(
box_reservior_0,
forcefield_selection={
ethyl_ether_gomc.name: "oplsaa",
methyl_ether_gomc.name: "oplsaa",
},
residues=[ethyl_ether_gomc.name, methyl_ether_gomc.name],
reorder_res_in_pdb_psf=False,
boxes_for_simulation=1,
)
use_rb_torsions_1 = False
use_dihedrals_1 = True
epsilon_conversion_factor = 1
lj_unit = 1 / epsilon_conversion_factor
(
dihedral_types_1,
unique_dihedral_types_1,
) = charmm_writer._get_dihedral_types(
structure_ff,
use_rb_torsions_1,
use_dihedrals_1,
epsilon_conversion_factor,
)
assert dihedral_types_1 is None
assert unique_dihedral_types_1 is None
use_rb_torsions_2 = True
use_dihedrals_2 = False
(
dihedral_types_2,
unique_dihedral_types_2,
) = charmm_writer._get_dihedral_types(
structure_ff,
use_rb_torsions_2,
use_dihedrals_2,
epsilon_conversion_factor,
)
unique_dih_typ_unsorted_2 = dict(
enumerate(
set(
[
(
round(dihedral.type.c0 * lj_unit, 3),
round(dihedral.type.c1 * lj_unit, 3),
round(dihedral.type.c2 * lj_unit, 3),
round(dihedral.type.c3 * lj_unit, 3),
round(dihedral.type.c4 * lj_unit, 3),
round(dihedral.type.c5 * lj_unit, 3),
round(dihedral.type.scee, 1),
round(dihedral.type.scnb, 1),
dihedral.atom1.type,
dihedral.atom2.type,
dihedral.atom3.type,
dihedral.atom4.type,
dihedral.atom1.residue.name,
dihedral.atom2.residue.name,
dihedral.atom3.residue.name,
dihedral.atom4.residue.name,
)
for dihedral in structure_ff.rb_torsions
]
)
)
)
unique_dih_typ_unsorted_2 = OrderedDict(
[(y, x + 1) for x, y in unique_dih_typ_unsorted_2.items()]
)
assert len(unique_dih_typ_unsorted_2) == 7
assert len(unique_dihedral_types_2) == 5
# test for error if trying to use impropers in the dihedrals (currently impropers are found but not used in
# the output)
# ******** NOTE*************************
# ******** NOTE*************************
# These impropers are blank and will need filled in upon adding the improper functionallity.
# They are kept in the code to identify if there are any impropers in the system and count them
# ******** NOTE*************************
# ******** NOTE*************************
# ******** NOTE*************************
(
improper_types_1,
unique_improper_types_1,
) = charmm_writer._get_impropers(
structure_ff, epsilon_conversion_factor
)
assert str(improper_types_1) == "[]"
assert str(unique_improper_types_1) == "OrderedDict()"
def test_charmm_angle_reorder(self, ethyl_ether_gomc, methyl_ether_gomc):
box_reservior_0 = mb.fill_box(
compound=[ethyl_ether_gomc, methyl_ether_gomc],
box=[10, 10, 10],
n_compounds=[10, 10],
)
[
structure_ff,
coulomb14scalar_dict,
lj14_scalar_dict,
residues_applied_list,
] = specific_ff_to_residue(
box_reservior_0,
forcefield_selection={
ethyl_ether_gomc.name: "oplsaa",
methyl_ether_gomc.name: "oplsaa",
},
residues=[ethyl_ether_gomc.name, methyl_ether_gomc.name],
reorder_res_in_pdb_psf=False,
boxes_for_simulation=1,
)
sigma_conversion_factor = 1
epsilon_conversion_factor = 1
use_urey_bradleys = False
angle_types_1, unique_angle_types_1 = charmm_writer._get_angle_types(
structure_ff,
sigma_conversion_factor,
epsilon_conversion_factor,
use_urey_bradleys,
)
# note this sorts all the possible combinations, so this should be the same as the double check (i.e, both 10)
unique_angle_types_1_unsorted = dict(
enumerate(
set(
[
(
round(
angle.type.k
* (
sigma_conversion_factor ** 2
/ epsilon_conversion_factor
),
3,
),
round(angle.type.theteq, 3),
angle.atom2.type,
tuple(sorted((angle.atom1.type, angle.atom3.type))),
angle.atom1.residue.name,
angle.atom2.residue.name,
angle.atom3.residue.name,
)
for angle in structure_ff.angles
]
)
)
)
unique_angle_types_1_unsorted = OrderedDict(
[(y, x + 1) for x, y in unique_angle_types_1_unsorted.items()]
)
assert len(unique_angle_types_1_unsorted) == 10
assert len(unique_angle_types_1) == 10
def test_bead_atomname_equal_3(self, two_propanol_ua):
# testing def unique_atom_naming in charmm_writer, expecting when failing
with pytest.raises(
ValueError,
match=r"ERROR: The unique_atom_naming function failed while "
"running the charmm_writer function. Ensure the proper inputs are "
"in the bead_to_atom_name_dict.",
):
box_reservior_0 = mb.fill_box(
compound=[two_propanol_ua], box=[10, 10, 10], n_compounds=[10]
)
value_0 = Charmm(
box_reservior_0,
"test_bead_atomname_equal_3",
ff_filename="test_bead_atomname_equal_3",
residues=[two_propanol_ua.name],
forcefield_selection="trappe-ua",
bead_to_atom_name_dict={"_CH3": "Cx", "_HC": "Cxx"},
)
value_0.write_inp()
value_0.write_pdb()
value_0.write_psf()
def test_gomc_fix_bonds_angles_string(self, two_propanol_ua):
with pytest.raises(
TypeError,
match=r"ERROR: Please ensure the residue names in the \({}\) variable "
r"are in a list.".format("gomc_fix_bonds_angles"),
):
Charmm(
two_propanol_ua,
"charmm_data_UA",
ff_filename="charmm_data_UA",
residues=[two_propanol_ua.name],
forcefield_selection="trappe-ua",
bead_to_atom_name_dict={"_CH3": "C"},
gomc_fix_bonds_angles="two_propanol_ua.name",
)
def test_gomc_fix_bonds_angles_residue_not_in_system(self, two_propanol_ua):
with pytest.raises(
ValueError,
match=r"ERROR: Please ensure that all the residue names in the "
r"{} list are also in the residues list.".format(
"gomc_fix_bonds_angles"
),
):
Charmm(
two_propanol_ua,
"charmm_data_UA",
ff_filename="charmm_data_UA",
residues=[two_propanol_ua.name],
forcefield_selection="trappe-ua",
bead_to_atom_name_dict={"_CH3": "C"},
gomc_fix_bonds_angles=["WNG"],
)
def test_gomc_fix_bonds_string(self, two_propanol_ua):
with pytest.raises(
TypeError,
match=r"ERROR: Please ensure the residue names in the \({}\) variable "
r"are in a list.".format("gomc_fix_bonds"),
):
Charmm(
two_propanol_ua,
"charmm_data_UA",
ff_filename="charmm_data_UA",
residues=[two_propanol_ua.name],
forcefield_selection="trappe-ua",
bead_to_atom_name_dict={"_CH3": "C"},
gomc_fix_bonds="two_propanol_ua.name",
)
def test_gomc_fix_bonds_residue_not_in_system(self, two_propanol_ua):
with pytest.raises(
ValueError,
match=r"ERROR: Please ensure that all the residue names in the "
r"{} list are also in the residues list.".format("gomc_fix_bonds"),
):
Charmm(
two_propanol_ua,
"charmm_data_UA",
ff_filename="charmm_data_UA",
residues=[two_propanol_ua.name],
forcefield_selection="trappe-ua",
bead_to_atom_name_dict={"_CH3": "C"},
gomc_fix_bonds=["WNG"],
)
def test_gomc_fix_angles_string(self, two_propanol_ua):
with pytest.raises(
TypeError,
match=r"ERROR: Please ensure the residue names in the \({}\) variable "
r"are in a list.".format("gomc_fix_angles"),
):
Charmm(
two_propanol_ua,
"charmm_data_UA",
ff_filename="charmm_data_UA",
residues=[two_propanol_ua.name],
forcefield_selection="trappe-ua",
bead_to_atom_name_dict={"_CH3": "C"},
gomc_fix_angles="two_propanol_ua.name",
)
def test_gomc_fix_angles_residue_not_in_system(self, two_propanol_ua):
with pytest.raises(
ValueError,
match=r"ERROR: Please ensure that all the residue names in the "
r"{} list are also in the residues list.".format("gomc_fix_angles"),
):
Charmm(
two_propanol_ua,
"charmm_data_UA",
ff_filename="charmm_data_UA",
residues=[two_propanol_ua.name],
forcefield_selection="trappe-ua",
bead_to_atom_name_dict={"_CH3": "C"},
gomc_fix_angles=["WNG"],
)
def test_fix_residue_string(self, two_propanol_ua):
with pytest.raises(
TypeError,
match=r"ERROR: Please enter the fix_residue in a list format",
):
Charmm(
two_propanol_ua,
"charmm_data_UA",
ff_filename="charmm_data_UA",
residues=[two_propanol_ua.name],
forcefield_selection="trappe-ua",
bead_to_atom_name_dict={"_CH3": "C"},
fix_residue="two_propanol_ua.name",
)
def test_fix_residue_string_residue_not_in_system(self, two_propanol_ua):
with pytest.raises(
ValueError,
match=r"Error: Please ensure that all the residue names in the fix_residue "
r"list are also in the residues list.",
):
Charmm(
two_propanol_ua,
"charmm_data_UA",
ff_filename="charmm_data_UA",
residues=[two_propanol_ua.name],
forcefield_selection="trappe-ua",
bead_to_atom_name_dict={"_CH3": "C"},
fix_residue=["WNG"],
)
def test_fix_residue_in_box_string(self, two_propanol_ua):
with pytest.raises(
TypeError,
match=r"ERROR: Please enter the fix_residue_in_box in a list format.",
):
Charmm(
two_propanol_ua,
"charmm_data_UA",
ff_filename="charmm_data_UA",
residues=[two_propanol_ua.name],
forcefield_selection="trappe-ua",
bead_to_atom_name_dict={"_CH3": "C"},
fix_residue_in_box="two_propanol_ua.name",
)
def test_fix_residue_in_box_string_residue_not_in_system(
self, two_propanol_ua
):
with pytest.raises(
ValueError,
match=r"Error: Please ensure that all the residue names in the "
r"fix_residue_in_box list are also in the residues list.",
):
Charmm(
two_propanol_ua,
"charmm_data_UA",
ff_filename="charmm_data_UA",
residues=[two_propanol_ua.name],
forcefield_selection="trappe-ua",
bead_to_atom_name_dict={"_CH3": "C"},
fix_residue_in_box=["WNG"],
)
def test_bead_to_atom_name_dict_list(self, two_propanol_ua):
with pytest.raises(
TypeError,
match=r"ERROR: Please enter the a bead type to atom in the dictionary "
r"\(bead_to_atom_name_dict\) so GOMC can properly evaluate the "
r"unique atom names",
):
Charmm(
two_propanol_ua,
"charmm_data_UA",
ff_filename="charmm_data_UA",
residues=[two_propanol_ua.name],
forcefield_selection="trappe-ua",
bead_to_atom_name_dict=["_CH3", "C"],
)
def test_bead_to_atom_name_dict_not_string_0(self, two_propanol_ua):
with pytest.raises(
TypeError,
match=r"ERROR: Please enter the bead_to_atom_name_dict with only "
r"string inputs.",
):
Charmm(
two_propanol_ua,
"charmm_data_UA",
ff_filename="charmm_data_UA",
residues=[two_propanol_ua.name],
forcefield_selection="trappe-ua",
bead_to_atom_name_dict={"_CH3": 0},
)
def test_bead_to_atom_name_dict_not_string_1(self, two_propanol_ua):
with pytest.raises(
TypeError,
match=r"ERROR: Please enter the bead_to_atom_name_dict with only "
r"string inputs.",
):
Charmm(
two_propanol_ua,
"charmm_data_UA",
ff_filename="charmm_data_UA",
residues=[two_propanol_ua.name],
forcefield_selection="trappe-ua",
bead_to_atom_name_dict={0: "C"},
)
def test_1_box_residues_not_all_listed_box_0(
self, ethane_gomc, ethanol_gomc
):
with pytest.raises(
ValueError,
match=r"ERROR: All the residues are not specified, or the residues "
r"entered does not match the residues that were found and "
r"built for structure.",
):
Charmm(
ethane_gomc,
"charmm_data_box_0",
structure_box_1=None,
filename_box_1=None,
ff_filename="charmm_data",
residues=[ethanol_gomc.name],
forcefield_selection="oplsaa",
)
def test_2_box_residues_not_all_listed_box_0(
self, ethane_gomc, ethanol_gomc
):
with pytest.raises(
ValueError,
match=r"ERROR: All the residues are not specified, or the residues "
r"entered does not match the residues that were found and "
r"built for structure.",
):
Charmm(
ethane_gomc,
"charmm_data_box_0",
structure_box_1=ethanol_gomc,
filename_box_1="charmm_data_box_1",
ff_filename="charmm_data",
residues=["XXX", ethanol_gomc.name],
forcefield_selection="oplsaa",
)
def test_2_box_residues_not_all_listed_box_1(
self, ethane_gomc, ethanol_gomc
):
with pytest.raises(
ValueError,
match=r"ERROR: All the residues are not specified, or the residues "
r"entered does not match the residues that were found and "
r"built for structure.",
):
Charmm(
ethane_gomc,
"charmm_data_box_0",
structure_box_1=ethanol_gomc,
filename_box_1="charmm_data_box_1",
ff_filename="charmm_data",
residues=["XXX", ethane_gomc.name],
forcefield_selection="oplsaa",
)
def test_2_box_residues_listed_2x(self, ethane_gomc, ethanol_gomc):
with pytest.raises(
ValueError,
match=r"ERROR: Please enter the residues list \(residues\) that has "
r"only unique residue names.",
):
Charmm(
ethane_gomc,
"charmm_data_box_0",
structure_box_1=ethanol_gomc,
filename_box_1="charmm_data_box_1",
ff_filename="charmm_data",
residues=[ethanol_gomc.name, ethanol_gomc.name],
forcefield_selection="oplsaa",
)
def test_all_residues_are_listed(self, ethane_gomc, ethanol_gomc):
with pytest.raises(
ValueError,
match=r"ERROR: All the residues are not specified, or the residues "
r"entered does not match the residues that were found and "
r"built for structure.",
):
Charmm(
ethane_gomc,
"charmm_data_box_0",
structure_box_1=ethanol_gomc,
filename_box_1="charmm_data_box_1",
ff_filename="charmm_data",
residues=[ethanol_gomc.name],
forcefield_selection="oplsaa",
)
# Test that an empty box (psf and pdb files) can be created to start a simulation
def test_box_1_empty_test_1(self, two_propanol_ua):
empty_compound = Box(lengths=[2, 2, 2])
charmm = Charmm(
two_propanol_ua,
"charmm_filled_box",
structure_box_1=empty_compound,
filename_box_1="charmm_empty_box",
ff_filename="charmm_empty_box.inp",
residues=[two_propanol_ua.name],
forcefield_selection="trappe-ua",
bead_to_atom_name_dict={"_CH3": "C"},
)
charmm.write_pdb()
charmm.write_psf()
with open("charmm_empty_box.pdb", "r") as fp:
pdb_read = False
out_gomc = fp.readlines()
for i, line in enumerate(out_gomc):
if "CRYST1" in line:
pdb_read = True
assert out_gomc[i].split()[0:7] == [
"CRYST1",
"20.000",
"20.000",
"20.000",
"90.00",
"90.00",
"90.00",
]
assert out_gomc[i + 1].split() == ["END"]
else:
pass
assert pdb_read
with open("charmm_filled_box.pdb", "r") as fp:
pdb_read = False
out_gomc = fp.readlines()
for i, line in enumerate(out_gomc):
if "CRYST1" in line:
pdb_read = True
atom_type_res_part_1_list = [
["ATOM", "1", "C1", "POL", "A", "1"],
["ATOM", "2", "BD1", "POL", "A", "1"],
["ATOM", "3", "O1", "POL", "A", "1"],
["ATOM", "4", "H1", "POL", "A", "1"],
["ATOM", "5", "C2", "POL", "A", "1"],
]
atom_type_res_part_2_list = [
["1.00", "0.00", "EP"],
["1.00", "0.00", "EP"],
["1.00", "0.00", "O"],
["1.00", "0.00", "H"],
["1.00", "0.00", "EP"],
]
for j in range(0, len(atom_type_res_part_1_list)):
assert (
out_gomc[i + 1 + j].split()[0:6]
== atom_type_res_part_1_list[j]
)
assert (
out_gomc[i + 1 + j].split()[9:12]
== atom_type_res_part_2_list[j]
)
else:
pass
assert pdb_read
def test_box_1_empty_test_2(self, two_propanol_ua):
empty_compound = Box(lengths=[3, 3, 3], angles=[90, 90, 90])
charmm = Charmm(
two_propanol_ua,
"charmm_filled_box",
structure_box_1=empty_compound,
filename_box_1="charmm_empty_box",
ff_filename="charmm_empty_box.inp",
residues=[two_propanol_ua.name],
forcefield_selection="trappe-ua",
bead_to_atom_name_dict={"_CH3": "C"},
)
charmm.write_pdb()
charmm.write_psf()
with open("charmm_empty_box.pdb", "r") as fp:
pdb_read = False
out_gomc = fp.readlines()
for i, line in enumerate(out_gomc):
if "CRYST1" in line:
pdb_read = True
assert out_gomc[i].split()[0:7] == [
"CRYST1",
"30.000",
"30.000",
"30.000",
"90.00",
"90.00",
"90.00",
]
assert out_gomc[i + 1].split() == ["END"]
else:
pass
assert pdb_read
with open("charmm_filled_box.pdb", "r") as fp:
pdb_read = False
out_gomc = fp.readlines()
for i, line in enumerate(out_gomc):
if "CRYST1" in line:
pdb_read = True
atom_type_res_part_1_list = [
["ATOM", "1", "C1", "POL", "A", "1"],
["ATOM", "2", "BD1", "POL", "A", "1"],
["ATOM", "3", "O1", "POL", "A", "1"],
["ATOM", "4", "H1", "POL", "A", "1"],
["ATOM", "5", "C2", "POL", "A", "1"],
]
atom_type_res_part_2_list = [
["1.00", "0.00", "EP"],
["1.00", "0.00", "EP"],
["1.00", "0.00", "O"],
["1.00", "0.00", "H"],
["1.00", "0.00", "EP"],
]
for j in range(0, len(atom_type_res_part_1_list)):
assert (
out_gomc[i + 1 + j].split()[0:6]
== atom_type_res_part_1_list[j]
)
assert (
out_gomc[i + 1 + j].split()[9:12]
== atom_type_res_part_2_list[j]
)
else:
pass
assert pdb_read
def test_box_1_empty_test_3(self, two_propanol_ua):
empty_compound = Box(lengths=[4, 5, 6])
test_box_two_propanol_ua_gomc = mb.fill_box(
compound=[two_propanol_ua], n_compounds=[1], box=[3, 4, 5]
)
charmm = Charmm(
empty_compound,
"charmm_empty_box",
structure_box_1=test_box_two_propanol_ua_gomc,
filename_box_1="charmm_filled_box",
ff_filename="charmm_empty_box",
residues=[two_propanol_ua.name],
forcefield_selection="trappe-ua",
bead_to_atom_name_dict={"_CH3": "C"},
)
charmm.write_pdb()
charmm.write_psf()
with open("charmm_empty_box.pdb", "r") as fp:
pdb_part_1_read = False
out_gomc = fp.readlines()
for i, line in enumerate(out_gomc):
if "CRYST1" in line:
pdb_part_1_read = True
assert out_gomc[i].split()[0:7] == [
"CRYST1",
"40.000",
"50.000",
"60.000",
"90.00",
"90.00",
"90.00",
]
assert out_gomc[i + 1].split() == ["END"]
else:
pass
assert pdb_part_1_read
with open("charmm_filled_box.pdb", "r") as fp:
pdb_part_2_read = False
out_gomc = fp.readlines()
for i, line in enumerate(out_gomc):
if "CRYST1" in line:
pdb_part_2_read = True
atom_type_res_part_1_list = [
["ATOM", "1", "C1", "POL", "A", "1"],
["ATOM", "2", "BD1", "POL", "A", "1"],
["ATOM", "3", "O1", "POL", "A", "1"],
["ATOM", "4", "H1", "POL", "A", "1"],
["ATOM", "5", "C2", "POL", "A", "1"],
]
atom_type_res_part_2_list = [
["1.00", "0.00", "EP"],
["1.00", "0.00", "EP"],
["1.00", "0.00", "O"],
["1.00", "0.00", "H"],
["1.00", "0.00", "EP"],
]
for j in range(0, len(atom_type_res_part_1_list)):
assert (
out_gomc[i + 1 + j].split()[0:6]
== atom_type_res_part_1_list[j]
)
assert (
out_gomc[i + 1 + j].split()[9:12]
== atom_type_res_part_2_list[j]
)
else:
pass
assert pdb_part_2_read
def test_box_1_empty_test_4(self):
empty_compound_box_0 = Box(lengths=[2, 2, 2])
empty_compound_box_1 = Box(lengths=[3, 3, 3])
with pytest.raises(
TypeError,
match=r"ERROR: Both structure_box_0 and structure_box_0 are empty Boxes {}. "
"At least 1 structure must be an mbuild compound {} with 1 "
"or more atoms in it".format(
type(Box(lengths=[1, 1, 1])), type(Compound())
),
):
Charmm(
empty_compound_box_0,
"charmm_data_box_0",
structure_box_1=empty_compound_box_1,
filename_box_1="charmm_data_box_1",
ff_filename="charmm_data",
residues=[],
forcefield_selection="oplsaa",
)
def test_box_1_empty_test_5(self):
empty_compound_box_0 = Box(lengths=[2, 2, 2])
with pytest.raises(
TypeError,
match=r"ERROR: Only 1 structure is provided and it can not be an empty "
r"mbuild Box {}. "
"it must be an mbuild compound {} with at least 1 "
"or more atoms in it.".format(
type(Box(lengths=[1, 1, 1])), type(Compound())
),
):
Charmm(
empty_compound_box_0,
"charmm_data_box_0",
structure_box_1=None,
filename_box_1=None,
ff_filename="charmm_data",
residues=[],
forcefield_selection="oplsaa",
)
def test_box_1_empty_test_6(self, two_propanol_ua):
with pytest.raises(
TypeError,
match=r"ERROR: If you are not providing an empty box, "
r"you need to specify the atoms/beads as children in the mb.Compound. "
r"If you are providing and empty box, please do so by specifying and "
r"mbuild Box \({}\)".format(type(Box(lengths=[1, 1, 1]))),
):
test_box_two_propanol_ua_gomc = mb.fill_box(
compound=[two_propanol_ua], n_compounds=[1], box=[3, 4, 5]
)
empty_compound = mb.Compound()
Charmm(
empty_compound,
"charmm_empty_box",
structure_box_1=test_box_two_propanol_ua_gomc,
filename_box_1="charmm_filled_box",
ff_filename="charmm_empty_box",
residues=[two_propanol_ua.name],
forcefield_selection="trappe-ua",
bead_to_atom_name_dict={"_CH3": "C"},
)
def test_structure_box_0_not_mb_compound(self, ethane_gomc):
with pytest.raises(
TypeError,
match=r"ERROR: The structure_box_0 expected to be of type: "
r"{} or {}, received: {}".format(
type(Compound()),
type(Box(lengths=[1, 1, 1])),
type("ethane_gomc"),
),
):
Charmm(
"ethane_gomc",
"charmm_data_box_0",
structure_box_1=ethane_gomc,
filename_box_1="charmm_data_box_1",
ff_filename="charmm_data",
residues=[ethane_gomc.name],
forcefield_selection="oplsaa",
)
def test_structure_box_1_not_mb_compound(self, ethane_gomc):
with pytest.raises(
TypeError,
match=r"ERROR: The structure_box_1 expected to be of type: "
"{} or {}, received: {}".format(
type(Compound()), type(Box(lengths=[1, 1, 1])), type(0)
),
):
Charmm(
ethane_gomc,
"charmm_data_box_0",
structure_box_1=0,
filename_box_1="charmm_data_box_1",
ff_filename="charmm_data",
residues=[ethane_gomc.name],
forcefield_selection="oplsaa",
)
def test_ff_dict_not_entered(self, ethane_gomc):
with pytest.raises(
TypeError,
match=r"ERROR: Please enter the forcefield_selection as it was not provided.",
):
Charmm(
ethane_gomc,
"charmm_data_box_0",
structure_box_1=ethane_gomc,
filename_box_1="charmm_data_box_1",
ff_filename="charmm_data",
residues=[ethane_gomc.name],
forcefield_selection=None,
)
def test_mie_non_bonded_type(self, ethane_gomc):
with pytest.raises(
ValueError,
match=r"ERROR: Currently the Mie potential \(non_bonded_type\) is not "
r"supported in this MoSDeF GOMC parameter writer.",
):
charmm = Charmm(
ethane_gomc,
"charmm_data_box_0",
structure_box_1=ethane_gomc,
filename_box_1="charmm_data_box_1",
ff_filename="charmm_data",
residues=[ethane_gomc.name],
forcefield_selection="oplsaa",
non_bonded_type="Mie",
)
charmm.write_inp()
def test_other_non_bonded_type(self, ethane_gomc):
with pytest.raises(
ValueError,
match=r"ERROR: Currently this potential \(non_bonded_type\) is not "
r"supported in this MoSDeF GOMC parameter writer.",
):
charmm = Charmm(
ethane_gomc,
"charmm_data_box_0",
structure_box_1=ethane_gomc,
filename_box_1="charmm_data_box_1",
ff_filename="charmm_data",
residues=[ethane_gomc.name],
forcefield_selection="oplsaa",
non_bonded_type="XXX",
)
charmm.write_inp()
def test_diff_1_4_coul_scalars(self, ethane_gomc, two_propanol_ua):
with pytest.raises(
ValueError,
match=r"ERROR: There are multiple 1,4-coulombic scaling factors "
"GOMC will only accept a singular input for the 1,4-coulombic "
"scaling factors.",
):
Charmm(
ethane_gomc,
"charmm_data_box_0",
structure_box_1=two_propanol_ua,
filename_box_1="charmm_data_box_1",
ff_filename="charmm_data",
residues=[ethane_gomc.name, two_propanol_ua.name],
forcefield_selection={
ethane_gomc.name: "oplsaa",
two_propanol_ua.name: "trappe-ua",
},
)
def test_write_inp_wo_ff_filename(self, ethane_gomc):
with pytest.raises(
TypeError,
match=r"ERROR: The force field file name was not specified and in the "
r"Charmm object. "
r"Therefore, the force field file \(.inp\) can not be written. "
r"Please use the force field file name when building the Charmm object, "
r"then use the write_inp function.",
):
charmm = Charmm(
ethane_gomc,
"charmm_data_box_0",
structure_box_1=ethane_gomc,
filename_box_1="charmm_data_box_1",
ff_filename=None,
forcefield_selection="oplsaa",
residues=[ethane_gomc.name],
)
charmm.write_inp()
def test_write_inp_with_2_boxes(self, ethane_gomc):
charmm = Charmm(
ethane_gomc,
"charmm_data_box_0",
structure_box_1=ethane_gomc,
filename_box_1="charmm_data_box_1",
ff_filename="charmm_data",
residues=[ethane_gomc.name],
forcefield_selection="oplsaa",
)
charmm.write_inp()
with open("charmm_data.inp", "r") as fp:
masses_read = False
out_gomc = fp.readlines()
for i, line in enumerate(out_gomc):
if (
"!atom_types" in line
and "mass" in line
and "atomTypeForceFieldName_ResidueName" in line
and "(i.e., atoms_type_per_utilized_FF)" in line
):
masses_read = True
mass_type_1 = [
["*", "A", "12.010780"],
["*", "B", "1.007947"],
]
mass_type_2 = [["opls_135_ETH"], ["opls_140_ETH"]]
for j in range(0, len(mass_type_1)):
assert (
len(out_gomc[i + 1 + j].split("!")[0].split()) == 3
)
assert (
out_gomc[i + 1 + j].split("!")[0].split()[0:3]
== mass_type_1[j]
)
assert (
out_gomc[i + 1 + j].split()[4:5] == mass_type_2[j]
)
assert masses_read
# test cif reader ETA psf writer outputs correct atom and residue numbering using non-orthoganol box
def test_save_non_othoganol_box_psf(self):
lattice_cif_ETV_triclinic = load_cif(
file_or_path=get_fn("ETV_triclinic.cif")
)
ETV_triclinic = lattice_cif_ETV_triclinic.populate(x=1, y=1, z=1)
ETV_triclinic.name = "ETV"
charmm = Charmm(
ETV_triclinic,
"ETV_triclinic",
ff_filename="ETV_triclinic_FF",
forcefield_selection={
ETV_triclinic.name: get_fn(
"Charmm_writer_testing_only_zeolite.xml"
)
},
residues=[ETV_triclinic.name],
bead_to_atom_name_dict=None,
fix_residue=[ETV_triclinic.name],
)
charmm.write_psf()
with open("ETV_triclinic.psf", "r") as fp:
psf_read = False
out_gomc = fp.readlines()
for i, line in enumerate(out_gomc):
if "42 !NATOM" in line:
psf_read = True
no_O_atoms = 28
no_Si_atoms = 14
atom_type_charge_etc_list = []
for f_i in range(0, no_O_atoms):
atom_type_charge_etc_list.append(
[
str(f_i + 1),
"SYS",
str(f_i + 1),
"ETV",
"O1",
"A",
"-0.400000",
"15.9994",
],
)
for f_i in range(no_O_atoms, no_O_atoms + no_Si_atoms):
atom_type_charge_etc_list.append(
[
str(f_i + 1),
"SYS",
str(f_i + 1),
"ETV",
"Si1",
"B",
"0.800000",
"28.0855",
],
)
for j in range(0, len(atom_type_charge_etc_list)):
assert (
out_gomc[i + 1 + j].split()[0:8]
== atom_type_charge_etc_list[j]
)
else:
pass
assert psf_read
# test cif reader ETA pdb writer outputs correct atom and residue numbering using non-orthoganol box
def test_save_non_othoganol_box_pdb(self):
lattice_cif_ETV_triclinic = load_cif(
file_or_path=get_fn("ETV_triclinic.cif")
)
ETV_triclinic = lattice_cif_ETV_triclinic.populate(x=1, y=1, z=1)
ETV_triclinic.name = "ETV"
charmm = Charmm(
ETV_triclinic,
"ETV_triclinic",
ff_filename="ETV_triclinic_FF",
forcefield_selection={
ETV_triclinic.name: get_fn(
"Charmm_writer_testing_only_zeolite.xml"
)
},
residues=[ETV_triclinic.name],
bead_to_atom_name_dict=None,
fix_residue=[ETV_triclinic.name],
)
charmm.write_pdb()
with open("ETV_triclinic.pdb", "r") as fp:
pdb_read = False
out_gomc = fp.readlines()
for i, line in enumerate(out_gomc):
if "CRYST1" in line:
pdb_read = True
crystal_box_length_angles = [
"CRYST1",
"8.750",
"9.648",
"10.272",
"105.72",
"100.19",
"97.02",
]
no_O_atoms = 28
no_Si_atoms = 14
atom_type_res_part_1_list = []
for f_i in range(0, no_O_atoms):
atom_type_res_part_1_list.append(
[
"ATOM",
str(f_i + 1),
"O1",
"ETV",
"A",
str(f_i + 1),
]
)
for f_i in range(no_O_atoms, no_O_atoms + no_Si_atoms):
atom_type_res_part_1_list.append(
[
"ATOM",
str(f_i + 1),
"Si1",
"ETV",
"A",
str(f_i + 1),
]
)
atom_type_res_part_2_list = []
for f_i in range(0, no_O_atoms):
atom_type_res_part_2_list.append(["1.00", "1.00", "O"])
for f_i in range(no_O_atoms, no_O_atoms + no_Si_atoms):
atom_type_res_part_2_list.append(["1.00", "1.00", "SI"])
assert out_gomc[i].split()[0:7] == crystal_box_length_angles
for j in range(0, len(atom_type_res_part_1_list)):
assert (
out_gomc[i + 1 + j].split()[0:6]
== atom_type_res_part_1_list[j]
)
assert (
out_gomc[i + 1 + j].split()[9:12]
== atom_type_res_part_2_list[j]
)
else:
pass
assert pdb_read
# test methane UA psf writer outputs correct atom and residue numbering using orthoganol box
def test_save_othoganol_methane_ua_psf(self):
methane = mb.Compound(name="MET")
methane_child_bead = mb.Compound(name="_CH4")
methane.add(methane_child_bead, inherit_periodicity=False)
methane_box = mb.fill_box(
compound=methane, n_compounds=4, box=[1, 1, 1]
)
charmm = Charmm(
methane_box,
"methane_box",
ff_filename="methane_box_FF",
forcefield_selection={methane.name: "trappe-ua"},
residues=[methane.name],
bead_to_atom_name_dict={"_CH4": "C"},
)
charmm.write_psf()
with open("methane_box.psf", "r") as fp:
psf_read = False
out_gomc = fp.readlines()
for i, line in enumerate(out_gomc):
if "4 !NATOM" in line:
psf_read = True
no_methane_atoms = 4
atom_type_charge_etc_list = []
for f_i in range(0, no_methane_atoms):
atom_type_charge_etc_list.append(
[
str(f_i + 1),
"SYS",
str(f_i + 1),
"MET",
"C1",
"A",
"0.000000",
"16.0430",
],
)
for j in range(0, len(atom_type_charge_etc_list)):
assert (
out_gomc[i + 1 + j].split()[0:8]
== atom_type_charge_etc_list[j]
)
else:
pass
assert psf_read
# test methane UA pdb writer outputs correct atom and residue numbering using orthoganol box
def test_save_othoganol_methane_ua_pdb(self):
methane = mb.Compound(name="MET")
methane_child_bead = mb.Compound(name="_CH4")
methane.add(methane_child_bead, inherit_periodicity=False)
methane_box = mb.fill_box(
compound=methane, n_compounds=10, box=[1, 2, 3]
)
charmm = Charmm(
methane_box,
"methane_box",
ff_filename="methane_box_FF",
forcefield_selection={methane.name: "trappe-ua"},
residues=[methane.name],
bead_to_atom_name_dict={"_CH4": "C"},
)
charmm.write_pdb()
with open("methane_box.pdb", "r") as fp:
pdb_read = False
out_gomc = fp.readlines()
for i, line in enumerate(out_gomc):
if "CRYST1" in line:
pdb_read = True
crystal_box_length_angles = [
"CRYST1",
"10.000",
"20.000",
"30.000",
"90.00",
"90.00",
"90.00",
]
no_methane_atoms = 4
atom_type_res_part_1_list = []
for f_i in range(0, no_methane_atoms):
atom_type_res_part_1_list.append(
[
"ATOM",
str(f_i + 1),
"C1",
"MET",
"A",
str(f_i + 1),
]
)
atom_type_res_part_2_list = []
for f_i in range(0, no_methane_atoms):
atom_type_res_part_2_list.append(["1.00", "0.00", "EP"])
assert out_gomc[i].split()[0:7] == crystal_box_length_angles
for j in range(0, len(atom_type_res_part_1_list)):
assert (
out_gomc[i + 1 + j].split()[0:6]
== atom_type_res_part_1_list[j]
)
assert (
out_gomc[i + 1 + j].split()[9:12]
== atom_type_res_part_2_list[j]
)
else:
pass
assert pdb_read
| 38.449292
| 118
| 0.429873
| 13,816
| 135,726
| 3.910973
| 0.042053
| 0.036459
| 0.017026
| 0.016323
| 0.874838
| 0.837695
| 0.808806
| 0.775419
| 0.76339
| 0.728338
| 0
| 0.056815
| 0.463257
| 135,726
| 3,529
| 119
| 38.460187
| 0.6849
| 0.011995
| 0
| 0.681016
| 0
| 0.001858
| 0.128588
| 0.015417
| 0
| 0
| 0
| 0
| 0.052338
| 1
| 0.028182
| false
| 0.008052
| 0.004026
| 0
| 0.032518
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
622fead6007f4bfb48f9c867f7eb64dfed600fad
| 178
|
py
|
Python
|
openslides_backend/action/actions/assignment_option/delete.py
|
r-peschke/openslides-backend
|
83d0dab68bb914f06a0f50cffe23fc10ca45376f
|
[
"MIT"
] | null | null | null |
openslides_backend/action/actions/assignment_option/delete.py
|
r-peschke/openslides-backend
|
83d0dab68bb914f06a0f50cffe23fc10ca45376f
|
[
"MIT"
] | null | null | null |
openslides_backend/action/actions/assignment_option/delete.py
|
r-peschke/openslides-backend
|
83d0dab68bb914f06a0f50cffe23fc10ca45376f
|
[
"MIT"
] | null | null | null |
from ...action import DummyAction
from ...util.register import register_action
@register_action("assignment_option.delete")
class AssignmentOptionDelete(DummyAction):
pass
| 22.25
| 44
| 0.808989
| 19
| 178
| 7.421053
| 0.631579
| 0.198582
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.101124
| 178
| 7
| 45
| 25.428571
| 0.88125
| 0
| 0
| 0
| 0
| 0
| 0.134831
| 0.134831
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.2
| 0.4
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
627fb42c5f10f3807ff7f2c93534971d52aeba1e
| 167
|
py
|
Python
|
game/game_code/interactions/__init__.py
|
RDGT/adventure-poc
|
e211491e5958d12a3347b3e279006d915d691d20
|
[
"MIT"
] | 2
|
2018-04-23T15:03:41.000Z
|
2018-07-18T06:36:51.000Z
|
game/game_code/interactions/__init__.py
|
RDGT/adventure-poc
|
e211491e5958d12a3347b3e279006d915d691d20
|
[
"MIT"
] | 6
|
2018-03-25T12:04:27.000Z
|
2018-09-14T09:08:34.000Z
|
game/game_code/interactions/__init__.py
|
RDGT/adventure-poc
|
e211491e5958d12a3347b3e279006d915d691d20
|
[
"MIT"
] | 1
|
2018-07-22T09:46:55.000Z
|
2018-07-22T09:46:55.000Z
|
import room
import level
import thing
import menu
from lib import scene
from lib import choices
from lib import conditions
from lib import events
from lib import area
| 16.7
| 26
| 0.832335
| 28
| 167
| 4.964286
| 0.428571
| 0.251799
| 0.467626
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.167665
| 167
| 9
| 27
| 18.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
659ec21c23b8784310774cd127a38d06b3d2dd9d
| 22
|
py
|
Python
|
minesweeper/__init__.py
|
andreasisnes/Elitekollektivet.Minesweeper.Sprites
|
f3633fdf1d95763a7a7e7396c021012c68b97d49
|
[
"MIT"
] | 1
|
2020-10-26T23:23:56.000Z
|
2020-10-26T23:23:56.000Z
|
minesweeper/__init__.py
|
andreasisnes/Elitekollektivet.Minesweeper.Sprites
|
f3633fdf1d95763a7a7e7396c021012c68b97d49
|
[
"MIT"
] | null | null | null |
minesweeper/__init__.py
|
andreasisnes/Elitekollektivet.Minesweeper.Sprites
|
f3633fdf1d95763a7a7e7396c021012c68b97d49
|
[
"MIT"
] | 1
|
2021-12-19T17:23:30.000Z
|
2021-12-19T17:23:30.000Z
|
from . import sprites
| 11
| 21
| 0.772727
| 3
| 22
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 22
| 1
| 22
| 22
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
65eeba2d7a47903d512bd6f724e681601b002c44
| 196
|
py
|
Python
|
deep_rl/agent/exploration/exploration_strategy.py
|
df424/deep_rl
|
bfe4a5f54df38ec111fb0162fd575c668f9310d0
|
[
"MIT"
] | null | null | null |
deep_rl/agent/exploration/exploration_strategy.py
|
df424/deep_rl
|
bfe4a5f54df38ec111fb0162fd575c668f9310d0
|
[
"MIT"
] | null | null | null |
deep_rl/agent/exploration/exploration_strategy.py
|
df424/deep_rl
|
bfe4a5f54df38ec111fb0162fd575c668f9310d0
|
[
"MIT"
] | null | null | null |
from abc import ABC, abstractmethod
import numpy as np
class ExplorationStrategy(ABC):
@abstractmethod
def pick(self, action_space: np.ndarray, eval_mode:bool=False) -> int:
pass
| 24.5
| 74
| 0.729592
| 26
| 196
| 5.423077
| 0.807692
| 0.241135
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.188776
| 196
| 8
| 75
| 24.5
| 0.886792
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0.166667
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
02a0f198b200f1876b837ddfdb6c1b0d2726d713
| 328
|
py
|
Python
|
src/perimeterator/enumerator/__init__.py
|
darkarnium/perimeterator
|
8c694267d92ca1d28fc1494cd9394af34271ed39
|
[
"MIT"
] | 56
|
2019-03-20T01:44:04.000Z
|
2022-02-16T13:36:39.000Z
|
src/perimeterator/enumerator/__init__.py
|
darkarnium/perimeterator
|
8c694267d92ca1d28fc1494cd9394af34271ed39
|
[
"MIT"
] | 1
|
2020-07-08T20:30:23.000Z
|
2020-11-07T15:41:25.000Z
|
src/perimeterator/enumerator/__init__.py
|
darkarnium/perimeterator
|
8c694267d92ca1d28fc1494cd9394af34271ed39
|
[
"MIT"
] | 9
|
2019-10-09T18:54:52.000Z
|
2021-12-28T15:27:58.000Z
|
''' Perimeterator - Enumerators. '''
from perimeterator.enumerator import ec2 # noqa: F401
from perimeterator.enumerator import elb # noqa: F401
from perimeterator.enumerator import elbv2 # noqa: F401
from perimeterator.enumerator import rds # noqa: F401
from perimeterator.enumerator import es # noqa: F401
| 41
| 57
| 0.746951
| 37
| 328
| 6.621622
| 0.324324
| 0.346939
| 0.55102
| 0.673469
| 0.669388
| 0.669388
| 0
| 0
| 0
| 0
| 0
| 0.063433
| 0.182927
| 328
| 7
| 58
| 46.857143
| 0.850746
| 0.259146
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
02dea7c663a736d4861a9fb5d6267cda2989779f
| 73
|
py
|
Python
|
flattener/tests/test_solidity_flattener.py
|
XertroV/solidity-flattener
|
53b01d1db12a5ef4dfe35adf4bd6e4f73d90d0f8
|
[
"MIT"
] | 266
|
2017-08-08T05:27:40.000Z
|
2022-03-23T01:39:47.000Z
|
flattener/tests/test_solidity_flattener.py
|
XertroV/solidity-flattener
|
53b01d1db12a5ef4dfe35adf4bd6e4f73d90d0f8
|
[
"MIT"
] | 32
|
2017-08-17T10:22:10.000Z
|
2022-01-30T11:51:09.000Z
|
flattener/tests/test_solidity_flattener.py
|
XertroV/solidity-flattener
|
53b01d1db12a5ef4dfe35adf4bd6e4f73d90d0f8
|
[
"MIT"
] | 91
|
2017-09-18T03:16:27.000Z
|
2021-09-21T15:42:18.000Z
|
import pytest
from .. import core
def test_thingy():
assert 1 == 1
| 10.428571
| 19
| 0.657534
| 11
| 73
| 4.272727
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.036364
| 0.246575
| 73
| 6
| 20
| 12.166667
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.25
| true
| 0
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.